From cdd6938b25514a8807182a1a6b79f60bc45a5fe4 Mon Sep 17 00:00:00 2001 From: Rafael Matias Date: Thu, 18 Jul 2024 20:13:28 +0200 Subject: [PATCH] init: caplin support --- src/cl/caplin/caplin_launcher.star | 405 +++++++++++++++++++++++++++++ src/cl/cl_launcher.star | 9 + src/package_io/constants.star | 19 ++ src/package_io/input_parser.star | 10 + src/vc/vc_launcher.star | 2 + 5 files changed, 445 insertions(+) create mode 100644 src/cl/caplin/caplin_launcher.star diff --git a/src/cl/caplin/caplin_launcher.star b/src/cl/caplin/caplin_launcher.star new file mode 100644 index 000000000..ec848f019 --- /dev/null +++ b/src/cl/caplin/caplin_launcher.star @@ -0,0 +1,405 @@ +shared_utils = import_module("../../shared_utils/shared_utils.star") +input_parser = import_module("../../package_io/input_parser.star") +cl_context = import_module("../../cl/cl_context.star") +cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star") +cl_shared = import_module("../cl_shared.star") +node_metrics = import_module("../../node_metrics_info.star") +constants = import_module("../../package_io/constants.star") + +blobber_launcher = import_module("../../blobber/blobber_launcher.star") + +caplin_BINARY_COMMAND = "caplin" + +RUST_BACKTRACE_ENVVAR_NAME = "RUST_BACKTRACE" +RUST_FULL_BACKTRACE_KEYWORD = "full" + +# ---------------------------------- Beacon client ------------------------------------- +BEACON_DATA_DIRPATH_ON_BEACON_SERVICE_CONTAINER = "/home/erigon/caplin/beacon-data" +# TODO: What's the approach on this - Permission Denided creating /data dir? BEACON_DATA_DIRPATH_ON_BEACON_SERVICE_CONTAINER = "/data/caplin/beacon-data" + + +# Port nums +BEACON_DISCOVERY_PORT_NUM = 9000 +BEACON_HTTP_PORT_NUM = 4000 +BEACON_METRICS_PORT_NUM = 5054 + +# The min/max CPU/memory that the beacon node can use +BEACON_MIN_CPU = 50 +BEACON_MIN_MEMORY = 256 + +METRICS_PATH = "/metrics" + +VERBOSITY_LEVELS = { + constants.GLOBAL_LOG_LEVEL.error: "1", + constants.GLOBAL_LOG_LEVEL.warn: "2", + constants.GLOBAL_LOG_LEVEL.info: "3", + constants.GLOBAL_LOG_LEVEL.debug: "4", + constants.GLOBAL_LOG_LEVEL.trace: "5", +} + + +def launch( + plan, + launcher, + service_name, + image, + participant_log_level, + global_log_level, + bootnode_contexts, + el_context, + full_name, + node_keystore_files, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, + snooper_enabled, + snooper_engine_context, + blobber_enabled, + blobber_extra_params, + extra_params, + extra_env_vars, + extra_labels, + persistent, + cl_volume_size, + cl_tolerations, + participant_tolerations, + global_tolerations, + node_selectors, + use_separate_vc, + keymanager_enabled, + checkpoint_sync_enabled, + checkpoint_sync_url, + port_publisher, + participant_index, +): + beacon_service_name = "{0}".format(service_name) + + log_level = input_parser.get_client_log_level_or_default( + participant_log_level, global_log_level, VERBOSITY_LEVELS + ) + + tolerations = input_parser.get_client_tolerations( + cl_tolerations, participant_tolerations, global_tolerations + ) + + network_name = shared_utils.get_network_name(launcher.network) + + cl_min_cpu = int(cl_min_cpu) if int(cl_min_cpu) > 0 else BEACON_MIN_CPU + cl_max_cpu = ( + int(cl_max_cpu) + if int(cl_max_cpu) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["caplin_max_cpu"] + ) + cl_min_mem = int(cl_min_mem) if int(cl_min_mem) > 0 else BEACON_MIN_MEMORY + cl_max_mem = ( + int(cl_max_mem) + if int(cl_max_mem) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["caplin_max_mem"] + ) + + cl_volume_size = ( + int(cl_volume_size) + if int(cl_volume_size) > 0 + else constants.VOLUME_SIZE[network_name]["caplin_volume_size"] + ) + + # Launch Beacon node + beacon_config = get_beacon_config( + plan, + launcher.el_cl_genesis_data, + launcher.jwt_file, + launcher.network, + image, + beacon_service_name, + bootnode_contexts, + el_context, + log_level, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, + snooper_enabled, + snooper_engine_context, + extra_params, + extra_env_vars, + extra_labels, + persistent, + cl_volume_size, + tolerations, + node_selectors, + checkpoint_sync_enabled, + checkpoint_sync_url, + port_publisher, + participant_index, + ) + + beacon_service = plan.add_service(beacon_service_name, beacon_config) + beacon_http_port = beacon_service.ports[constants.HTTP_PORT_ID] + beacon_http_url = "http://{0}:{1}".format( + beacon_service.ip_address, beacon_http_port.number + ) + + # Blobber config + if blobber_enabled: + blobber_service_name = "{0}-{1}".format("blobber", beacon_service_name) + blobber_config = blobber_launcher.get_config( + blobber_service_name, + node_keystore_files, + beacon_http_url, + blobber_extra_params, + node_selectors, + ) + + blobber_service = plan.add_service(blobber_service_name, blobber_config) + blobber_http_port = blobber_service.ports[ + blobber_launcher.BLOBBER_VALIDATOR_PROXY_PORT_ID + ] + blobber_http_url = "http://{0}:{1}".format( + blobber_service.ip_address, blobber_http_port.number + ) + beacon_http_url = blobber_http_url + + # TODO(old) add validator availability using the validator API: https://ethereum.github.io/beacon-APIs/?urls.primaryName=v1#/ValidatorRequiredApi | from eth2-merge-kurtosis-module + beacon_node_identity_recipe = GetHttpRequestRecipe( + endpoint="/eth/v1/node/identity", + port_id=constants.HTTP_PORT_ID, + extract={ + "enr": ".data.enr", + "multiaddr": ".data.p2p_addresses[0]", + "peer_id": ".data.peer_id", + }, + ) + response = plan.request( + recipe=beacon_node_identity_recipe, service_name=beacon_service_name + ) + beacon_node_enr = response["extract.enr"] + beacon_multiaddr = response["extract.multiaddr"] + beacon_peer_id = response["extract.peer_id"] + + beacon_metrics_port = beacon_service.ports[constants.METRICS_PORT_ID] + beacon_metrics_url = "{0}:{1}".format( + beacon_service.ip_address, beacon_metrics_port.number + ) + beacon_node_metrics_info = node_metrics.new_node_metrics_info( + beacon_service_name, METRICS_PATH, beacon_metrics_url + ) + nodes_metrics_info = [beacon_node_metrics_info] + return cl_context.new_cl_context( + "caplin", + beacon_node_enr, + beacon_service.ip_address, + beacon_http_port.number, + beacon_http_url, + nodes_metrics_info, + beacon_service_name, + beacon_multiaddr, + beacon_peer_id, + snooper_enabled, + snooper_engine_context, + validator_keystore_files_artifact_uuid=node_keystore_files.files_artifact_uuid + if node_keystore_files + else "", + ) + + +def get_beacon_config( + plan, + el_cl_genesis_data, + jwt_file, + network, + image, + service_name, + boot_cl_client_ctxs, + el_context, + log_level, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, + snooper_enabled, + snooper_engine_context, + extra_params, + extra_env_vars, + extra_labels, + persistent, + cl_volume_size, + tolerations, + node_selectors, + checkpoint_sync_enabled, + checkpoint_sync_url, + port_publisher, + participant_index, +): + # If snooper is enabled use the snooper engine context, otherwise use the execution client context + if snooper_enabled: + EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format( + snooper_engine_context.ip_addr, + snooper_engine_context.engine_rpc_port_num, + ) + EXECUTION_ENGINE_HOST = snooper_engine_context.ip_addr + EXECUTION_ENGINE_PORT = snooper_engine_context.engine_rpc_port_num + else: + EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format( + el_context.ip_addr, + el_context.engine_rpc_port_num, + ) + EXECUTION_ENGINE_HOST = el_context.ip_addr + EXECUTION_ENGINE_PORT = el_context.engine_rpc_port_num + + public_ports = {} + discovery_port = BEACON_DISCOVERY_PORT_NUM + if port_publisher.cl_enabled: + public_ports_for_component = shared_utils.get_public_ports_for_component( + "cl", port_publisher, participant_index + ) + public_ports, discovery_port = cl_shared.get_general_cl_public_port_specs( + public_ports_for_component + ) + + used_port_assignments = { + constants.TCP_DISCOVERY_PORT_ID: discovery_port, + constants.UDP_DISCOVERY_PORT_ID: discovery_port, + constants.HTTP_PORT_ID: BEACON_HTTP_PORT_NUM, + constants.METRICS_PORT_ID: BEACON_METRICS_PORT_NUM, + } + used_ports = shared_utils.get_port_specs(used_port_assignments) + + cmd = [ + "--log.console.verbosity=" + log_level, + "--datadir=" + BEACON_DATA_DIRPATH_ON_BEACON_SERVICE_CONTAINER, + "--discovery.addr=0.0.0.0", + "--discovery.port={0}".format( + discovery_port + ), + "--beacon.api=beacon,config,debug,events,node,validator,lighthouse", + "--beacon.api.addr=0.0.0.0", + "--beacon.api.port={0}".format(BEACON_HTTP_PORT_NUM), + "--beacon.api.cors.allow-origins=*", + "--engine.api", + "--engine.api.host=" + EXECUTION_ENGINE_HOST, + "--engine.api.port={0}".format(EXECUTION_ENGINE_PORT), + "--engine.api.jwtsecret=" + constants.JWT_MOUNT_PATH_ON_CONTAINER, + # vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv + "--metrics", + "--metrics.addr=0.0.0.0", + "--metrics.port={0}".format(BEACON_METRICS_PORT_NUM), + # ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^ + # vvvvvvvvv PROFILING CONFIG vvvvvvvvvvvvvvvvvvvvv + #"--pprof", + #"--pprof.addr=0.0.0.0", + #"--pprof.port={0}".format(PROFILING_PORT_NUM), + ] + + # If checkpoint sync is enabled, add the checkpoint sync url + if checkpoint_sync_enabled: + if checkpoint_sync_url: + cmd.append("--caplin.checkpoint-sync-url" + checkpoint_sync_url) + else: + if network in ["mainnet", "ephemery"]: + cmd.append( + "--caplin.checkpoint-sync-url=" + constants.CHECKPOINT_SYNC_URL[network] + ) + else: + cmd.append( + "--caplin.checkpoint-sync-url=https://checkpoint-sync.{0}.ethpandaops.io".format( + network + ) + ) + + if network not in constants.PUBLIC_NETWORKS: + cmd.append( + "--beacon-config=" + + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + + "/config.yaml" + ) + cmd.append( + "--genesis-ssz=" + + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + + "/genesis.ssz", + ) + #cmd.append("--chaindata" + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER) + if ( + network == constants.NETWORK_NAME.kurtosis + or constants.NETWORK_NAME.shadowfork in network + ): + if boot_cl_client_ctxs != None: + cmd.append( + "--sentinel.bootnodes=" + + ",".join( + [ + ctx.enr + for ctx in boot_cl_client_ctxs[: constants.MAX_ENR_ENTRIES] + ] + ) + ) + elif network == constants.NETWORK_NAME.ephemery: + cmd.append( + "--sentinel.bootnodes=" + + shared_utils.get_devnet_enrs_list( + plan, el_cl_genesis_data.files_artifact_uuid + ) + ) + else: # Devnets + cmd.append( + "--sentinel.bootnodes=" + + shared_utils.get_devnet_enrs_list( + plan, el_cl_genesis_data.files_artifact_uuid + ) + ) + else: # Public networks + cmd.append("--chain=" + network) + + if len(extra_params) > 0: + # this is a repeated, we convert it into Starlark + cmd.extend([param for param in extra_params]) + + recipe = GetHttpRequestRecipe( + endpoint="/eth/v1/node/identity", port_id=constants.HTTP_PORT_ID + ) + files = { + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, + constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file, + } + + if persistent: + files[BEACON_DATA_DIRPATH_ON_BEACON_SERVICE_CONTAINER] = Directory( + persistent_key="data-{0}".format(service_name), + size=cl_volume_size, + ) + env = {RUST_BACKTRACE_ENVVAR_NAME: RUST_FULL_BACKTRACE_KEYWORD} + env.update(extra_env_vars) + return ServiceConfig( + image=image, + ports=used_ports, + public_ports=public_ports, + entrypoint=[caplin_BINARY_COMMAND], + cmd=cmd, + files=files, + env_vars=env, + private_ip_address_placeholder=constants.PRIVATE_IP_ADDRESS_PLACEHOLDER, + ready_conditions=cl_node_ready_conditions.get_ready_conditions( + constants.HTTP_PORT_ID + ), + min_cpu=cl_min_cpu, + max_cpu=cl_max_cpu, + min_memory=cl_min_mem, + max_memory=cl_max_mem, + labels=shared_utils.label_maker( + constants.CL_TYPE.caplin, + constants.CLIENT_TYPES.cl, + image, + el_context.client_name, + extra_labels, + ), + tolerations=tolerations, + node_selectors=node_selectors, + ) + + +def new_caplin_launcher(el_cl_genesis_data, jwt_file, network_params): + return struct( + el_cl_genesis_data=el_cl_genesis_data, + jwt_file=jwt_file, + network=network_params.network, + ) diff --git a/src/cl/cl_launcher.star b/src/cl/cl_launcher.star index ef16e2ba0..70fa00881 100644 --- a/src/cl/cl_launcher.star +++ b/src/cl/cl_launcher.star @@ -4,6 +4,7 @@ nimbus = import_module("./nimbus/nimbus_launcher.star") prysm = import_module("./prysm/prysm_launcher.star") teku = import_module("./teku/teku_launcher.star") grandine = import_module("./grandine/grandine_launcher.star") +caplin = import_module("./caplin/caplin_launcher.star") constants = import_module("../package_io/constants.star") input_parser = import_module("../package_io/input_parser.star") @@ -85,6 +86,14 @@ def launch( ), "launch_method": grandine.launch, }, + constants.CL_TYPE.caplin: { + "launcher": caplin.new_caplin_launcher( + el_cl_data, + jwt_file, + network_params, + ), + "launch_method": caplin.launch, + }, } all_snooper_engine_contexts = [] diff --git a/src/package_io/constants.star b/src/package_io/constants.star index dd7fa27f6..9aabfc63b 100644 --- a/src/package_io/constants.star +++ b/src/package_io/constants.star @@ -17,6 +17,7 @@ CL_TYPE = struct( prysm="prysm", lodestar="lodestar", grandine="grandine", + caplin="caplin", ) VC_TYPE = struct( @@ -171,6 +172,7 @@ VOLUME_SIZE = { "nimbus_volume_size": 500000, # 500GB "lodestar_volume_size": 500000, # 500GB "grandine_volume_size": 500000, # 500GB + "caplin_volume_size": 500000, # 500GB }, "sepolia": { "geth_volume_size": 300000, # 300GB @@ -186,6 +188,7 @@ VOLUME_SIZE = { "nimbus_volume_size": 150000, # 150GB "lodestar_volume_size": 150000, # 150GB "grandine_volume_size": 150000, # 150GB + "caplin_volume_size": 150000, # 150GB }, "holesky": { "geth_volume_size": 100000, # 100GB @@ -201,6 +204,7 @@ VOLUME_SIZE = { "nimbus_volume_size": 100000, # 100GB "lodestar_volume_size": 100000, # 100GB "grandine_volume_size": 100000, # 100GB + "caplin_volume_size": 100000, # 100GB }, "devnets": { "geth_volume_size": 100000, # 100GB @@ -216,6 +220,7 @@ VOLUME_SIZE = { "nimbus_volume_size": 100000, # 100GB "lodestar_volume_size": 100000, # 100GB "grandine_volume_size": 100000, # 100GB + "caplin_volume_size": 100000, # 100GB }, "ephemery": { "geth_volume_size": 5000, # 5GB @@ -231,6 +236,7 @@ VOLUME_SIZE = { "nimbus_volume_size": 1000, # 1GB "lodestar_volume_size": 1000, # 1GB "grandine_volume_size": 1000, # 1GB + "caplin_volume_size": 1000, # 1GB }, "kurtosis": { "geth_volume_size": 5000, # 5GB @@ -246,6 +252,7 @@ VOLUME_SIZE = { "nimbus_volume_size": 1000, # 1GB "lodestar_volume_size": 1000, # 1GB "grandine_volume_size": 1000, # 1GB + "caplin_volume_size": 1000, # 1GB }, } @@ -277,6 +284,8 @@ RAM_CPU_OVERRIDES = { "lodestar_max_cpu": 4000, # 4 cores "grandine_max_mem": 16384, # 16GB "grandine_max_cpu": 4000, # 4 cores + "caplin_max_mem": 16384, # 16GB + "caplin_max_cpu": 4000, # 4 cores }, "sepolia": { "geth_max_mem": 4096, # 4GB @@ -305,6 +314,8 @@ RAM_CPU_OVERRIDES = { "lodestar_max_cpu": 1000, # 1 core "grandine_max_mem": 4096, # 4GB "grandine_max_cpu": 1000, # 1 core + "caplin_max_mem": 4096, # 4GB + "caplin_max_cpu": 1000, # 1 core }, "holesky": { "geth_max_mem": 8192, # 8GB @@ -333,6 +344,8 @@ RAM_CPU_OVERRIDES = { "lodestar_max_cpu": 2000, # 2 cores "grandine_max_mem": 8192, # 8GB "grandine_max_cpu": 2000, # 2 cores + "caplin_max_mem": 8192, # 8GB + "caplin_max_cpu": 2000, # 2 cores }, "devnets": { "geth_max_mem": 4096, # 4GB @@ -361,6 +374,8 @@ RAM_CPU_OVERRIDES = { "lodestar_max_cpu": 1000, # 1 core "grandine_max_mem": 4096, # 4GB "grandine_max_cpu": 1000, # 1 core + "caplin_max_mem": 4096, # 4GB + "caplin_max_cpu": 1000, # 1 core }, "ephemery": { "geth_max_mem": 1024, # 1GB @@ -389,6 +404,8 @@ RAM_CPU_OVERRIDES = { "lodestar_max_cpu": 1000, # 1 core "grandine_max_mem": 1024, # 1GB "grandine_max_cpu": 1000, # 1 core + "caplin_max_mem": 1024, # 1GB + "caplin_max_cpu": 1000, # 1 core }, "kurtosis": { "geth_max_mem": 1024, # 1GB @@ -417,5 +434,7 @@ RAM_CPU_OVERRIDES = { "lodestar_max_cpu": 1000, # 1 core "grandine_max_mem": 2048, # 2GB "grandine_max_cpu": 1000, # 1 core + "caplin_max_mem": 2048, # 2GB + "caplin_max_cpu": 1000, # 1 core }, } diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index 7a761a90a..759447f9d 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -23,6 +23,7 @@ DEFAULT_CL_IMAGES = { "prysm": "gcr.io/prysmaticlabs/prysm/beacon-chain:latest", "lodestar": "chainsafe/lodestar:latest", "grandine": "ethpandaops/grandine:master", + "caplin": "ethpandaops/erigon:main", } DEFAULT_CL_IMAGES_MINIMAL = { @@ -32,6 +33,7 @@ DEFAULT_CL_IMAGES_MINIMAL = { "prysm": "ethpandaops/prysm-beacon-chain:develop-minimal", "lodestar": "chainsafe/lodestar:latest", "grandine": "ethpandaops/grandine:master-minimal", + "caplin": "ethpandaops/erigon:main", } DEFAULT_VC_IMAGES = { @@ -41,6 +43,7 @@ DEFAULT_VC_IMAGES = { "prysm": "gcr.io/prysmaticlabs/prysm/validator:latest", "teku": "consensys/teku:latest", "grandine": "ethpandaops/grandine:master", + "caplin": "ethpandaops/erigon:main", } DEFAULT_VC_IMAGES_MINIMAL = { @@ -50,6 +53,7 @@ DEFAULT_VC_IMAGES_MINIMAL = { "prysm": "ethpandaops/prysm-validator:develop-minimal", "teku": "consensys/teku:latest", "grandine": "ethpandaops/grandine:master-minimal", + "caplin": "ethpandaops/erigon:main", } # Placeholder value for the deneb fork epoch if electra is being run @@ -507,6 +511,7 @@ def parse_network_params(plan, input_args): constants.CL_TYPE.nimbus, constants.CL_TYPE.teku, constants.CL_TYPE.grandine, + constants.CL_TYPE.caplin, ) and vc_type == "" ): @@ -1036,6 +1041,8 @@ def enrich_disable_peer_scoring(parsed_arguments_dict): participant["cl_extra_params"].append("--disablePeerScoring") if participant["cl_type"] == "grandine": participant["cl_extra_params"].append("--disable-peer-scoring") + #if participant["cl_type"] == "caplin": + # TODO: caplin doesn't have a flag for peer scoring return parsed_arguments_dict @@ -1084,6 +1091,9 @@ def enrich_mev_extra_params(parsed_arguments_dict, mev_prefix, mev_port, mev_typ if participant["cl_type"] == "grandine": participant["cl_extra_params"].append("--builder-url={0}".format(mev_url)) + if participant["cl_type"] == "caplin": + participant["cl_extra_params"].append("--mev-relay-url={0}".format(mev_url)) + num_participants = len(parsed_arguments_dict["participants"]) index_str = shared_utils.zfill_custom( num_participants + 1, len(str(num_participants + 1)) diff --git a/src/vc/vc_launcher.star b/src/vc/vc_launcher.star index 3512b3442..4d14bc891 100644 --- a/src/vc/vc_launcher.star +++ b/src/vc/vc_launcher.star @@ -198,6 +198,8 @@ def launch( ) elif vc_type == constants.VC_TYPE.grandine: fail("Grandine VC is not yet supported") + #elif vc_type == constants.VC_TYPE.caplin: + # fail("Caplin VC is not yet supported") else: fail("Unsupported vc_type: {0}".format(vc_type))