From 30e29c4c2d0fd04c9199f9158adb9d7b5b4a751e Mon Sep 17 00:00:00 2001 From: Rodrigo Quelhas <22591718+RomarQ@users.noreply.github.com> Date: Wed, 11 Sep 2024 11:52:53 +0100 Subject: [PATCH] Lazy loading (#2840) * feat: implement lazy loading backend and call executor experiments * code cleanup * add lazy loading chain spec * feat(lazy-loading): replace async-std with tokio and retry failed requests * remove local polkadot-sdk dependencies * fix Cargo.toml * chore: update Cargo.lock file * fix editor config errors * fix editor config errors * fix build * fix lazy-loading feature * cleanup * reset AsyncBacking > SlotInfo * test: add lazy loading tests * remove parent field * remove files * test: fix formatting * fix(ci): lazy loading test * fix test * fix format * test: move lazy loading state overrides to configs folder * fix build and add mandatory storage override * fix pipeline * add state overrides for dev accounts * fix build * remove redundant test * fix remark from review * improve lazy loading test --- .../workflow-templates/cargo-build/action.yml | 3 + .github/workflows/build.yml | 57 + Cargo.lock | 71 +- Cargo.toml | 6 +- node/Cargo.toml | 2 + node/cli-opt/src/lib.rs | 10 + node/cli/Cargo.toml | 2 + node/cli/src/cli.rs | 22 + node/cli/src/command.rs | 125 +- node/service/Cargo.toml | 15 +- node/service/src/chain_spec/test_spec.rs | 63 + node/service/src/externalities.rs | 872 +++++++++ node/service/src/lazy_loading/backend.rs | 1626 +++++++++++++++++ .../service/src/lazy_loading/call_executor.rs | 346 ++++ node/service/src/lazy_loading/client.rs | 80 + node/service/src/lazy_loading/helpers.rs | 107 ++ node/service/src/lazy_loading/mod.rs | 820 +++++++++ .../src/lazy_loading/state_overrides.rs | 206 +++ .../service/src/lazy_loading/wasm_override.rs | 381 ++++ .../src/lazy_loading/wasm_substitutes.rs | 174 ++ node/service/src/lib.rs | 55 +- test/configs/lazyLoadingStateOverrides.json | 1 + test/moonwall.config.json | 45 + .../scripts/prepare-lazy-loading-overrides.ts | 73 + .../lazy-loading/test-runtime-upgrade.ts | 92 + 25 files changed, 5176 insertions(+), 78 deletions(-) create mode 100644 node/service/src/externalities.rs create mode 100644 node/service/src/lazy_loading/backend.rs create mode 100644 node/service/src/lazy_loading/call_executor.rs create mode 100644 node/service/src/lazy_loading/client.rs create mode 100644 node/service/src/lazy_loading/helpers.rs create mode 100644 node/service/src/lazy_loading/mod.rs create mode 100644 node/service/src/lazy_loading/state_overrides.rs create mode 100644 node/service/src/lazy_loading/wasm_override.rs create mode 100644 node/service/src/lazy_loading/wasm_substitutes.rs create mode 100644 test/configs/lazyLoadingStateOverrides.json create mode 100644 test/scripts/prepare-lazy-loading-overrides.ts create mode 100644 test/suites/lazy-loading/test-runtime-upgrade.ts diff --git a/.github/workflow-templates/cargo-build/action.yml b/.github/workflow-templates/cargo-build/action.yml index dd9cf28ca0..a83d56b7db 100644 --- a/.github/workflow-templates/cargo-build/action.yml +++ b/.github/workflow-templates/cargo-build/action.yml @@ -48,6 +48,8 @@ runs: params="$params --features ${{ inputs.features }}" fi echo "cargo build $params" + cargo build $params --features lazy-loading + cp target/release/moonbeam target/release/lazy-loading cargo build $params - name: Display binary comments shell: bash @@ -74,3 +76,4 @@ runs: run: | mkdir -p build cp target/release/moonbeam build/moonbeam; + cp target/release/lazy-loading build/lazy-loading; diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ce90f83fb2..234da2db29 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -700,6 +700,63 @@ jobs: org.opencontainers.image.revision=${{ github.sha }} org.opencontainers.image.licenses=${{ github.event.repository.license.spdx_id }} + lazy-loading-tests: + runs-on: + labels: bare-metal + needs: ["set-tags", "build"] + strategy: + fail-fast: false + matrix: + chain: ["moonbeam"] + env: + GH_WORKFLOW_MATRIX_CHAIN: ${{ matrix.chain }} + DEBUG_COLORS: 1 + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ needs.set-tags.outputs.git_ref }} + - uses: pnpm/action-setup@v4 + with: + version: 8 + - uses: actions/setup-node@v4 + with: + node-version: 20.10.0 + - name: Create local folders + run: | + mkdir -p target/release/wbuild/${{ matrix.chain }}-runtime/ + mkdir -p test/tmp + - name: "Download branch built runtime" + uses: actions/download-artifact@v4 + with: + name: runtimes + path: target/release/wbuild/${{ matrix.chain }}-runtime/ + - name: "Download branch built node" + uses: actions/download-artifact@v4 + with: + name: moonbeam + path: target/release + - name: "Run lazy loading tests" + run: | + cd test + pnpm install + chmod uog+x ../target/release/lazy-loading + pnpm moonwall test lazy_loading_${{ matrix.chain }} + - name: Zip and Upload Node Logs on Failure + if: failure() + run: | + TIMESTAMP=$(date +%Y%m%d%H%M%S) + export NODE_LOGS_ZIP="node_logs_$TIMESTAMP.zip" + MOST_RECENT_ZOMBIE_DIR=$(ls -td /tmp/zombie-* | head -n 1) + find $MOST_RECENT_ZOMBIE_DIR -maxdepth 1 -type f -name '*.log' -exec zip -r $NODE_LOGS_ZIP {} \; + echo "NODE_LOGS_ZIP=${NODE_LOGS_ZIP}" >> $GITHUB_ENV + - uses: actions/upload-artifact@v4 + if: failure() + with: + name: failed-node-logs + path: ${{ env.NODE_LOGS_ZIP }} + chopsticks-upgrade-test: runs-on: labels: bare-metal diff --git a/Cargo.lock b/Cargo.lock index a123c86280..60f774d72d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1587,7 +1587,7 @@ dependencies = [ [[package]] name = "common" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#b273d33f9981e2bb3375ab45faeb537f7ee35224" +source = "git+https://github.com/w3f/ring-proof#665f5f51af5734c7b6d90b985dd6861d4c5b4752" dependencies = [ "ark-ec", "ark-ff", @@ -5054,6 +5054,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfdb12a2381ea5b2e68c3469ec604a007b367778cdb14d09612c8069ebd616ad" dependencies = [ "jsonrpsee-core", + "jsonrpsee-http-client", "jsonrpsee-proc-macros", "jsonrpsee-server", "jsonrpsee-types", @@ -5108,6 +5109,26 @@ dependencies = [ "tracing", ] +[[package]] +name = "jsonrpsee-http-client" +version = "0.22.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ccf93fc4a0bfe05d851d37d7c32b7f370fe94336b52a2f0efc5f1981895c2e5" +dependencies = [ + "async-trait", + "hyper", + "hyper-rustls", + "jsonrpsee-core", + "jsonrpsee-types", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "tracing", + "url", +] + [[package]] name = "jsonrpsee-proc-macros" version = "0.22.5" @@ -7058,8 +7079,10 @@ dependencies = [ "fp-storage", "frame-benchmarking", "frame-benchmarking-cli", + "frame-system", "frame-system-rpc-runtime-api", "futures 0.3.30", + "hex", "hex-literal 0.3.4", "jsonrpsee", "libsecp256k1", @@ -7085,6 +7108,7 @@ dependencies = [ "nimbus-primitives", "nix 0.23.2", "pallet-author-inherent", + "pallet-balances", "pallet-ethereum", "pallet-parachain-staking", "pallet-sudo", @@ -7133,26 +7157,34 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-externalities", "sp-inherents", "sp-io", "sp-keystore", "sp-offchain", + "sp-rpc", "sp-runtime", "sp-session", + "sp-state-machine", "sp-storage", "sp-timestamp", "sp-transaction-pool", "sp-trie", + "sp-version", "staging-xcm", "substrate-build-script-utils", "substrate-frame-rpc-system", "substrate-prometheus-endpoint", + "substrate-rpc-client", "substrate-test-client", "substrate-test-runtime", "substrate-test-runtime-client", "tempfile", + "thiserror", "tiny-bip39", "tokio", + "tokio-retry", + "tracing", "trie-root 0.15.2", "xcm-fee-payment-runtime-api", ] @@ -12469,7 +12501,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.12.1", "proc-macro2", "quote", "syn 2.0.66", @@ -12901,13 +12933,14 @@ dependencies = [ [[package]] name = "ring" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#b273d33f9981e2bb3375ab45faeb537f7ee35224" +source = "git+https://github.com/w3f/ring-proof#665f5f51af5734c7b6d90b985dd6861d4c5b4752" dependencies = [ "ark-ec", "ark-ff", "ark-poly", "ark-serialize", "ark-std", + "arrayvec 0.7.4", "blake2 0.10.6", "common", "fflonk", @@ -16541,6 +16574,19 @@ dependencies = [ "tokio", ] +[[package]] +name = "substrate-rpc-client" +version = "0.33.0" +source = "git+https://github.com/moonbeam-foundation/polkadot-sdk?branch=moonbeam-polkadot-v1.11.0#125e709e299d83556c21d668660fe37e2e3962cb" +dependencies = [ + "async-trait", + "jsonrpsee", + "log", + "sc-rpc-api", + "serde", + "sp-runtime", +] + [[package]] name = "substrate-state-trie-migration-rpc" version = "27.0.0" @@ -16821,9 +16867,9 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] @@ -16850,9 +16896,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", @@ -17022,6 +17068,17 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "tokio-retry" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" +dependencies = [ + "pin-project", + "rand 0.8.5", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.24.1" diff --git a/Cargo.toml b/Cargo.toml index 350512607c..088d6e6319 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -224,6 +224,7 @@ sp-consensus = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", br sp-storage = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" } sp-timestamp = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" } sp-wasm-interface = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" } +sp-rpc = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" } substrate-build-script-utils = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" } substrate-frame-rpc-system = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" } substrate-prometheus-endpoint = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" } @@ -231,6 +232,7 @@ substrate-test-client = { git = "https://github.com/moonbeam-foundation/polkadot substrate-test-runtime = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" } substrate-test-runtime-client = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" } substrate-wasm-builder = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" } +substrate-rpc-client = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" } # Frontier (wasm) @@ -404,10 +406,12 @@ schnorrkel = { version = "0.11.4", default-features = false, features = [ "preaudit_deprecated", ] } tokio = { version = "1.36" } +tokio-retry = { version = "0.3.0" } tracing = "0.1.34" tracing-core = "0.1.29" trie-root = "0.15.2" url = "2.2.2" +thiserror = "1.0.63" # The list of dependencies below (which can be both direct and indirect dependencies) are crates # that are suspected to be CPU-intensive, and that are unlikely to require debugging (as some of @@ -493,4 +497,4 @@ inherits = "release" overflow-checks = true [patch."https://github.com/paritytech/polkadot-sdk"] -sp-crypto-ec-utils = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" } +sp-crypto-ec-utils = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" } \ No newline at end of file diff --git a/node/Cargo.toml b/node/Cargo.toml index bb5a483867..eb291b1e07 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -42,6 +42,8 @@ moonriver-native = [ "moonbeam-cli/moonriver-native", "moonbeam-service/moonrive metadata-hash = ["moonbeam-service/metadata-hash"] +lazy-loading = ["moonbeam-service/lazy-loading", "moonbeam-cli/lazy-loading"] + test-spec = [] runtime-benchmarks = [ diff --git a/node/cli-opt/src/lib.rs b/node/cli-opt/src/lib.rs index 93259730cb..b18676c645 100644 --- a/node/cli-opt/src/lib.rs +++ b/node/cli-opt/src/lib.rs @@ -11,8 +11,10 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. +use std::path::PathBuf; // You should have received a copy of the GNU General Public License // along with Moonbeam. If not, see . +use primitive_types::H256; use std::str::FromStr; pub mod account_key; @@ -110,3 +112,11 @@ pub struct RpcConfig { pub frontier_backend_config: FrontierBackendConfig, pub no_prometheus_prefix: bool, } + +#[derive(Clone)] +pub struct LazyLoadingConfig { + pub state_rpc: String, + pub from_block: Option, + pub state_overrides_path: Option, + pub runtime_override: Option, +} diff --git a/node/cli/Cargo.toml b/node/cli/Cargo.toml index 805bd19b35..2abb7aa2e5 100644 --- a/node/cli/Cargo.toml +++ b/node/cli/Cargo.toml @@ -54,6 +54,8 @@ default = [ "westend-native", ] +lazy-loading = ["sc-service/test-helpers", "moonbeam-service/lazy-loading"] + westend-native = ["polkadot-service/westend-native"] moonbase-native = ["moonbeam-service/moonbase-native", "westend-native"] diff --git a/node/cli/src/cli.rs b/node/cli/src/cli.rs index 7758aab380..1e48e3f570 100644 --- a/node/cli/src/cli.rs +++ b/node/cli/src/cli.rs @@ -26,6 +26,12 @@ use sc_cli::{Error as CliError, SubstrateCli}; use std::path::PathBuf; use std::time::Duration; +#[cfg(feature = "lazy-loading")] +fn parse_block_hash(s: &str) -> Result { + use std::str::FromStr; + sp_core::H256::from_str(s).map_err(|err| err.to_string()) +} + /// Sub-commands supported by the collator. #[derive(Debug, clap::Subcommand)] pub enum Subcommand { @@ -136,6 +142,22 @@ pub struct RunCmd { #[clap(long)] pub dev_service: bool, + #[cfg(feature = "lazy-loading")] + #[clap(long)] + pub fork_chain_from_rpc: Option, + + #[cfg(feature = "lazy-loading")] + #[arg(long, value_name = "BLOCK", value_parser = parse_block_hash)] + pub block: Option, + + #[cfg(feature = "lazy-loading")] + #[clap(long, value_name = "PATH", value_parser)] + pub fork_state_overrides: Option, + + #[cfg(feature = "lazy-loading")] + #[clap(long, value_name = "PATH", value_parser)] + pub runtime_override: Option, + /// When blocks should be sealed in the dev service. /// /// Options are "instant", "manual", or timer interval in milliseconds diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index 937a243716..22f6adb37e 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -693,7 +693,10 @@ pub fn run() -> Result<()> { None => { let runner = cli.create_runner(&(*cli.run).normalize())?; let collator_options = cli.run.collator_options(); - runner.run_node_until_exit(|config| async move { + + // It is used when feature "lazy-loading" is enabled + #[allow(unused_mut)] + runner.run_node_until_exit(|mut config| async move { let hwbench = if !cli.run.no_hardware_benchmarks { config.database.path().map(|database_path| { let _ = std::fs::create_dir_all(&database_path); @@ -704,23 +707,11 @@ pub fn run() -> Result<()> { }; let extension = chain_spec::Extensions::try_get(&*config.chain_spec); - let para_id = extension.map(|e| e.para_id); - let id = ParaId::from(cli.run.parachain_id.clone().or(para_id).unwrap_or(1000)); let rpc_config = cli.run.new_rpc_config(); - // If dev service was requested, start up manual or instant seal. - // Otherwise continue with the normal parachain node. - // Dev service can be requested in two ways. - // 1. by providing the --dev-service flag to the CLI - // 2. by specifying "dev-service" in the chain spec's "relay-chain" field. - // NOTE: the --dev flag triggers the dev service by way of number 2 - let relay_chain_id = extension.map(|e| e.relay_chain.as_str()); - let dev_service = cli.run.dev_service - || config.chain_spec.is_dev() - || relay_chain_id == Some("dev-service"); - - if dev_service { + #[cfg(feature = "lazy-loading")] + if let Some(fork_chain_from_rpc) = cli.run.fork_chain_from_rpc { // When running the dev service, just use Alice's author inherent //TODO maybe make the --alice etc flags work here, and consider bringing back // the author-id flag. For now, this will work. @@ -728,34 +719,81 @@ pub fn run() -> Result<()> { "Alice", )); - return match &config.chain_spec { - #[cfg(feature = "moonriver-native")] - spec if spec.is_moonriver() => moonbeam_service::new_dev::< - moonbeam_service::moonriver_runtime::RuntimeApi, - moonbeam_service::MoonriverCustomizations, - sc_network::NetworkWorker<_, _>, - >(config, author_id, cli.run.sealing, rpc_config, hwbench) - .await - .map_err(Into::into), - #[cfg(feature = "moonbeam-native")] - spec if spec.is_moonbeam() => moonbeam_service::new_dev::< - moonbeam_service::moonbeam_runtime::RuntimeApi, - moonbeam_service::MoonbeamCustomizations, - sc_network::NetworkWorker<_, _>, - >(config, author_id, cli.run.sealing, rpc_config, hwbench) - .await - .map_err(Into::into), - #[cfg(feature = "moonbase-native")] - _ => moonbeam_service::new_dev::< - moonbeam_service::moonbase_runtime::RuntimeApi, - moonbeam_service::MoonbaseCustomizations, - sc_network::NetworkWorker<_, _>, - >(config, author_id, cli.run.sealing, rpc_config, hwbench) - .await - .map_err(Into::into), - #[cfg(not(feature = "moonbase-native"))] - _ => panic!("invalid chain spec"), + let lazy_loading_config = moonbeam_cli_opt::LazyLoadingConfig { + state_rpc: fork_chain_from_rpc, + from_block: cli.run.block, + state_overrides_path: cli.run.fork_state_overrides, + runtime_override: cli.run.runtime_override, }; + + let spec_builder = + chain_spec::test_spec::lazy_loading_spec_builder(Default::default()); + config.chain_spec = Box::new(spec_builder.build()); + + return moonbeam_service::lazy_loading::new_lazy_loading_service::< + moonbeam_runtime::RuntimeApi, + moonbeam_service::MoonbeamCustomizations, + sc_network::NetworkWorker<_, _>, + >( + config, + author_id, + cli.run.sealing, + rpc_config, + lazy_loading_config, + hwbench, + ) + .await + .map_err(Into::into); + } + #[cfg(not(feature = "lazy-loading"))] + { + // If dev service was requested, start up manual or instant seal. + // Otherwise continue with the normal parachain node. + // Dev service can be requested in two ways. + // 1. by providing the --dev-service flag to the CLI + // 2. by specifying "dev-service" in the chain spec's "relay-chain" field. + // NOTE: the --dev flag triggers the dev service by way of number 2 + let relay_chain_id = extension.map(|e| e.relay_chain.as_str()); + let dev_service = cli.run.dev_service + || config.chain_spec.is_dev() + || relay_chain_id == Some("dev-service"); + if dev_service { + // When running the dev service, just use Alice's author inherent + //TODO maybe make the --alice etc flags work here, and consider bringing back + // the author-id flag. For now, this will work. + let author_id = Some(chain_spec::get_from_seed::< + nimbus_primitives::NimbusId, + >("Alice")); + + return match &config.chain_spec { + #[cfg(feature = "moonriver-native")] + spec if spec.is_moonriver() => moonbeam_service::new_dev::< + moonbeam_service::moonriver_runtime::RuntimeApi, + moonbeam_service::MoonriverCustomizations, + sc_network::NetworkWorker<_, _>, + >(config, author_id, cli.run.sealing, rpc_config, hwbench) + .await + .map_err(Into::into), + #[cfg(feature = "moonbeam-native")] + spec if spec.is_moonbeam() => moonbeam_service::new_dev::< + moonbeam_service::moonbeam_runtime::RuntimeApi, + moonbeam_service::MoonbeamCustomizations, + sc_network::NetworkWorker<_, _>, + >(config, author_id, cli.run.sealing, rpc_config, hwbench) + .await + .map_err(Into::into), + #[cfg(feature = "moonbase-native")] + _ => moonbeam_service::new_dev::< + moonbeam_service::moonbase_runtime::RuntimeApi, + moonbeam_service::MoonbaseCustomizations, + sc_network::NetworkWorker<_, _>, + >(config, author_id, cli.run.sealing, rpc_config, hwbench) + .await + .map_err(Into::into), + #[cfg(not(feature = "moonbase-native"))] + _ => panic!("invalid chain spec"), + }; + } } let polkadot_cli = RelayChainCli::new( @@ -765,6 +803,9 @@ pub fn run() -> Result<()> { .chain(cli.relaychain_args.iter()), ); + let para_id = extension.map(|e| e.para_id); + let id = ParaId::from(cli.run.parachain_id.clone().or(para_id).unwrap_or(1000)); + let parachain_account = AccountIdConversion::::into_account_truncating(&id); diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index 61bce2a190..5fbf6d898b 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -15,7 +15,7 @@ exit-future = { workspace = true } flume = { workspace = true } futures = { workspace = true, features = ["compat"] } hex-literal = { workspace = true } -jsonrpsee = { workspace = true, features = ["macros", "server"] } +jsonrpsee = { workspace = true, features = ["macros", "server", "http-client"] } libsecp256k1 = { workspace = true, features = ["hmac"] } log = { workspace = true } maplit = { workspace = true } @@ -26,6 +26,11 @@ sha3 = { workspace = true } tiny-bip39 = { workspace = true } tokio = { workspace = true, features = ["macros", "sync"] } trie-root = { workspace = true } +tokio-retry = { workspace = true } +substrate-rpc-client = { workspace = true } +hex = { workspace = true, features = ["std"] } +thiserror = { workspace = true } +tracing = { workspace = true } # Moonbeam moonbeam-dev-rpc = { workspace = true } @@ -52,6 +57,8 @@ moonriver-runtime = { workspace = true, optional = true } # Substrate frame-system-rpc-runtime-api = { workspace = true, features = ["std"] } +frame-system = { workspace = true, features = ["std"] } +pallet-balances = { workspace = true, features = ["std"] } pallet-transaction-payment = { workspace = true, features = ["std"] } pallet-transaction-payment-rpc = { workspace = true } pallet-transaction-payment-rpc-runtime-api = { workspace = true, features = [ @@ -95,6 +102,10 @@ sp-storage = { workspace = true, features = ["std"] } sp-timestamp = { workspace = true, features = ["std"] } sp-transaction-pool = { workspace = true, features = ["std"] } sp-trie = { workspace = true, features = ["std"] } +sp-state-machine = { workspace = true } +sp-rpc = { workspace = true } +sp-externalities = { workspace = true } +sp-version = { workspace = true } substrate-frame-rpc-system = { workspace = true } substrate-prometheus-endpoint = { workspace = true } @@ -178,6 +189,8 @@ default = [ "westend-native", ] +lazy-loading = ["sc-service/test-helpers"] + rococo-native = ["polkadot-cli/rococo-native", "polkadot-service/rococo-native"] westend-native = [ "polkadot-cli/westend-native", diff --git a/node/service/src/chain_spec/test_spec.rs b/node/service/src/chain_spec/test_spec.rs index 19bb7c0706..48715de217 100644 --- a/node/service/src/chain_spec/test_spec.rs +++ b/node/service/src/chain_spec/test_spec.rs @@ -87,3 +87,66 @@ pub fn staking_spec(para_id: ParaId) -> ChainSpec { )) .build() } + +#[cfg(feature = "lazy-loading")] +pub fn lazy_loading_spec_builder( + para_id: ParaId, +) -> sc_chain_spec::ChainSpecBuilder { + crate::chain_spec::moonbeam::ChainSpec::builder( + moonbeam_runtime::WASM_BINARY.expect("WASM binary was not build, please build it!"), + Default::default(), + ) + .with_name("Lazy Loading") + .with_id("lazy_loading") + .with_chain_type(ChainType::Development) + .with_properties( + serde_json::from_str( + "{\"tokenDecimals\": 18, \"tokenSymbol\": \"GLMR\", \"SS58Prefix\": 1284}", + ) + .expect("Provided valid json map"), + ) + .with_genesis_config(crate::chain_spec::moonbeam::testnet_genesis( + // Treasury Council members: Baltathar, Charleth and Dorothy + vec![ + AccountId::from(hex!("3Cd0A705a2DC65e5b1E1205896BaA2be8A07c6e0")), + AccountId::from(hex!("798d4Ba9baf0064Ec19eB4F0a1a45785ae9D6DFc")), + AccountId::from(hex!("773539d4Ac0e786233D90A233654ccEE26a613D9")), + ], + // Open Tech Committee members: Alith and Baltathar + vec![ + AccountId::from(hex!("6Be02d1d3665660d22FF9624b7BE0551ee1Ac91b")), + AccountId::from(hex!("3Cd0A705a2DC65e5b1E1205896BaA2be8A07c6e0")), + ], + // Collators + vec![ + ( + AccountId::from(hex!("6Be02d1d3665660d22FF9624b7BE0551ee1Ac91b")), + get_from_seed::("Alice"), + 1_000 * moonbeam_runtime::currency::GLMR, + ), + ( + AccountId::from(hex!("C0F0f4ab324C46e55D02D0033343B4Be8A55532d")), + get_from_seed::("Faith"), + 1_000 * moonbeam_runtime::currency::GLMR, + ), + ], + // Delegations + vec![], + // Endowed accounts (each minted 1 << 80 balance) + vec![ + // Alith, Baltathar, Charleth, Dorothy and Faith + AccountId::from(hex!("6Be02d1d3665660d22FF9624b7BE0551ee1Ac91b")), + AccountId::from(hex!("3Cd0A705a2DC65e5b1E1205896BaA2be8A07c6e0")), + AccountId::from(hex!("798d4Ba9baf0064Ec19eB4F0a1a45785ae9D6DFc")), + AccountId::from(hex!("773539d4Ac0e786233D90A233654ccEE26a613D9")), + AccountId::from(hex!("C0F0f4ab324C46e55D02D0033343B4Be8A55532d")), + // Additional accounts + AccountId::from(hex!("Ff64d3F6efE2317EE2807d223a0Bdc4c0c49dfDB")), + AccountId::from(hex!("f24FF3a9CF04c71Dbc94D0b566f7A27B94566cac")), + ], + 3_000_000 * moonbeam_runtime::currency::GLMR, + para_id, + // Chain ID + 1280, + )) +} diff --git a/node/service/src/externalities.rs b/node/service/src/externalities.rs new file mode 100644 index 0000000000..f8db22959d --- /dev/null +++ b/node/service/src/externalities.rs @@ -0,0 +1,872 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Concrete externalities implementation. + +extern crate alloc; + +#[cfg(feature = "std")] +use crate::overlayed_changes::OverlayedExtensions; +use hash_db::Hasher; +use parity_scale_codec::{Encode, EncodeAppend}; +#[cfg(feature = "std")] +use sp_core::hexdisplay::HexDisplay; +use sp_core::storage::{ + well_known_keys::is_child_storage_key, ChildInfo, StateVersion, TrackedStorageKey, +}; +use sp_externalities::{Extension, ExtensionStore, Externalities, MultiRemovalResults}; +use sp_state_machine::{ + backend::Backend, IndexOperation, IterArgs, OverlayedChanges, StorageKey, StorageValue, +}; + +use alloc::{boxed::Box, vec, vec::Vec}; +use core::{ + any::{Any, TypeId}, + cmp::Ordering, +}; +use sp_core::hexdisplay::HexDisplay; +use sp_state_machine::{log_error, trace, warn}; +#[cfg(feature = "std")] +use std::error; + +const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within runtime"; +const BENCHMARKING_FN: &str = "\ + This is a special fn only for benchmarking where a database commit happens from the runtime. + For that reason client started transactions before calling into runtime are not allowed. + Without client transactions the loop condition guarantees the success of the tx close."; + +#[cfg(feature = "std")] +fn guard() -> sp_panic_handler::AbortGuard { + sp_panic_handler::AbortGuard::force_abort() +} + +#[cfg(not(feature = "std"))] +fn guard() -> () { + () +} + +/// Errors that can occur when interacting with the externalities. +#[cfg(feature = "std")] +#[derive(Debug, Copy, Clone)] +pub enum Error { + /// Failure to load state data from the backend. + #[allow(unused)] + Backend(B), + /// Failure to execute a function. + #[allow(unused)] + Executor(E), +} + +#[cfg(feature = "std")] +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match *self { + Error::Backend(ref e) => write!(f, "Storage backend error: {}", e), + Error::Executor(ref e) => write!(f, "Sub-call execution error: {}", e), + } + } +} + +#[cfg(feature = "std")] +impl error::Error for Error { + fn description(&self) -> &str { + match *self { + Error::Backend(..) => "backend error", + Error::Executor(..) => "executor error", + } + } +} + +/// Wraps a read-only backend, call executor, and current overlayed changes. +pub struct LazyLoadingExt<'a, H, B> +where + H: Hasher, + B: 'a + Backend, +{ + /// The overlayed changes to write to. + overlay: &'a mut OverlayedChanges, + /// The storage backend to read from. + backend: &'a B, + /// Pseudo-unique id used for tracing. + pub id: u16, + /// Extensions registered with this instance. + #[cfg(feature = "std")] + extensions: Option>, +} + +impl<'a, H, B> LazyLoadingExt<'a, H, B> +where + H: Hasher, + B: Backend, +{ + /// Create a new `Ext`. + #[cfg(not(feature = "std"))] + pub fn new(overlay: &'a mut OverlayedChanges, backend: &'a B) -> Self { + LazyLoadingExt { + overlay, + backend, + id: 0, + } + } + + /// Create a new `Ext` from overlayed changes and read-only backend + #[cfg(feature = "std")] + pub fn new( + overlay: &'a mut OverlayedChanges, + backend: &'a B, + extensions: Option<&'a mut sp_externalities::Extensions>, + ) -> Self { + Self { + overlay, + backend, + id: rand::random(), + extensions: extensions.map(OverlayedExtensions::new), + } + } +} + +#[cfg(test)] +impl<'a, H, B> LazyLoadingExt<'a, H, B> +where + H: Hasher, + H::Out: Ord + 'static, + B: 'a + Backend, +{ + pub fn storage_pairs(&self) -> Vec<(StorageKey, StorageValue)> { + use std::collections::HashMap; + + self.backend + .pairs(Default::default()) + .expect("never fails in tests; qed.") + .map(|key_value| key_value.expect("never fails in tests; qed.")) + .map(|(k, v)| (k, Some(v))) + .chain( + self.overlay + .changes() + .map(|(k, v)| (k.clone(), v.value().cloned())), + ) + .collect::>() + .into_iter() + .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) + .collect() + } +} + +impl<'a, H, B> Externalities for LazyLoadingExt<'a, H, B> +where + H: Hasher, + H::Out: Ord + 'static + parity_scale_codec::Codec, + B: Backend, +{ + fn set_offchain_storage(&mut self, key: &[u8], value: Option<&[u8]>) { + self.overlay.set_offchain_storage(key, value) + } + + fn storage(&self, key: &[u8]) -> Option { + let _guard = guard(); + + let result = self + .overlay + .storage(key) + .map(|x| x.map(|x| x.to_vec())) + .unwrap_or_else(|| self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL)); + + // NOTE: be careful about touching the key names – used outside substrate! + trace!( + target: "state", + method = "Get", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + key = %HexDisplay::from(&key), + result = ?result.as_ref().map(HexDisplay::from), + result_encoded = %HexDisplay::from( + &result + .as_ref() + .map(|v| EncodeOpaqueValue(v.clone())) + .encode() + ), + ); + + result + } + + fn storage_hash(&self, key: &[u8]) -> Option> { + let _guard = guard(); + let result = self + .overlay + .storage(key) + .map(|x| x.map(|x| H::hash(x))) + .unwrap_or_else(|| { + self.backend + .storage_hash(key) + .expect(EXT_NOT_ALLOWED_TO_FAIL) + }); + + trace!( + target: "state", + method = "Hash", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + key = %HexDisplay::from(&key), + ?result, + ); + result.map(|r| r.encode()) + } + + fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + let _guard = guard(); + let result = self + .overlay + .child_storage(child_info, key) + .map(|x| x.map(|x| x.to_vec())) + .unwrap_or_else(|| { + self.backend + .child_storage(child_info, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL) + }); + + trace!( + target: "state", + method = "ChildGet", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), + key = %HexDisplay::from(&key), + result = ?result.as_ref().map(HexDisplay::from) + ); + + result + } + + fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { + let _guard = guard(); + let result = self + .overlay + .child_storage(child_info, key) + .map(|x| x.map(|x| H::hash(x))) + .unwrap_or_else(|| { + self.backend + .child_storage_hash(child_info, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL) + }); + + trace!( + target: "state", + method = "ChildHash", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), + key = %HexDisplay::from(&key), + ?result, + ); + + result.map(|r| r.encode()) + } + + fn exists_storage(&self, key: &[u8]) -> bool { + let _guard = guard(); + let result = match self.overlay.storage(key) { + Some(x) => x.is_some(), + _ => self + .backend + .exists_storage(key) + .expect(EXT_NOT_ALLOWED_TO_FAIL), + }; + + trace!( + target: "state", + method = "Exists", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + key = %HexDisplay::from(&key), + %result, + ); + + result + } + + fn exists_child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> bool { + let _guard = guard(); + + let result = match self.overlay.child_storage(child_info, key) { + Some(x) => x.is_some(), + _ => self + .backend + .exists_child_storage(child_info, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL), + }; + + trace!( + target: "state", + method = "ChildExists", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), + key = %HexDisplay::from(&key), + %result, + ); + result + } + + fn next_storage_key(&self, key: &[u8]) -> Option { + let mut next_backend_key = self + .backend + .next_storage_key(key) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + let mut overlay_changes = self.overlay.iter_after(key).peekable(); + + match (&next_backend_key, overlay_changes.peek()) { + (_, None) => next_backend_key, + (Some(_), Some(_)) => { + for overlay_key in overlay_changes { + let cmp = next_backend_key.as_deref().map(|v| v.cmp(overlay_key.0)); + + // If `backend_key` is less than the `overlay_key`, we found out next key. + if cmp == Some(Ordering::Less) { + return next_backend_key; + } else if overlay_key.1.value().is_some() { + // If there exists a value for the `overlay_key` in the overlay + // (aka the key is still valid), it means we have found our next key. + return Some(overlay_key.0.to_vec()); + } else if cmp == Some(Ordering::Equal) { + // If the `backend_key` and `overlay_key` are equal, it means that we need + // to search for the next backend key, because the overlay has overwritten + // this key. + next_backend_key = self + .backend + .next_storage_key(overlay_key.0) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + } + } + + next_backend_key + } + (None, Some(_)) => { + // Find the next overlay key that has a value attached. + overlay_changes.find_map(|k| k.1.value().as_ref().map(|_| k.0.to_vec())) + } + } + } + + fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + let mut next_backend_key = self + .backend + .next_child_storage_key(child_info, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + let mut overlay_changes = self + .overlay + .child_iter_after(child_info.storage_key(), key) + .peekable(); + + match (&next_backend_key, overlay_changes.peek()) { + (_, None) => next_backend_key, + (Some(_), Some(_)) => { + for overlay_key in overlay_changes { + let cmp = next_backend_key.as_deref().map(|v| v.cmp(overlay_key.0)); + + // If `backend_key` is less than the `overlay_key`, we found out next key. + if cmp == Some(Ordering::Less) { + return next_backend_key; + } else if overlay_key.1.value().is_some() { + // If there exists a value for the `overlay_key` in the overlay + // (aka the key is still valid), it means we have found our next key. + return Some(overlay_key.0.to_vec()); + } else if cmp == Some(Ordering::Equal) { + // If the `backend_key` and `overlay_key` are equal, it means that we need + // to search for the next backend key, because the overlay has overwritten + // this key. + next_backend_key = self + .backend + .next_child_storage_key(child_info, overlay_key.0) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + } + } + + next_backend_key + } + (None, Some(_)) => { + // Find the next overlay key that has a value attached. + overlay_changes.find_map(|k| k.1.value().as_ref().map(|_| k.0.to_vec())) + } + } + } + + fn place_storage(&mut self, key: StorageKey, value: Option) { + let _guard = guard(); + if is_child_storage_key(&key) { + warn!(target: "trie", "Refuse to directly set child storage key"); + return; + } + + // NOTE: be careful about touching the key names – used outside substrate! + trace!( + target: "state", + method = "Put", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + key = %HexDisplay::from(&key), + value = ?value.as_ref().map(HexDisplay::from), + value_encoded = %HexDisplay::from( + &value + .as_ref() + .map(|v| EncodeOpaqueValue(v.clone())) + .encode() + ), + ); + + self.overlay.set_storage(key, value); + } + + fn place_child_storage( + &mut self, + child_info: &ChildInfo, + key: StorageKey, + value: Option, + ) { + trace!( + target: "state", + method = "ChildPut", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), + key = %HexDisplay::from(&key), + value = ?value.as_ref().map(HexDisplay::from), + ); + let _guard = guard(); + + self.overlay.set_child_storage(child_info, key, value); + } + + fn kill_child_storage( + &mut self, + child_info: &ChildInfo, + maybe_limit: Option, + maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { + trace!( + target: "state", + method = "ChildKill", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), + ); + let _guard = guard(); + let overlay = self.overlay.clear_child_storage(child_info); + let (maybe_cursor, backend, loops) = + self.limit_remove_from_backend(Some(child_info), None, maybe_limit, maybe_cursor); + MultiRemovalResults { + maybe_cursor, + backend, + unique: overlay + backend, + loops, + } + } + + fn clear_prefix( + &mut self, + prefix: &[u8], + maybe_limit: Option, + maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { + trace!( + target: "state", + method = "ClearPrefix", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + prefix = %HexDisplay::from(&prefix), + ); + let _guard = guard(); + + if sp_core::storage::well_known_keys::starts_with_child_storage_key(prefix) { + warn!( + target: "trie", + "Refuse to directly clear prefix that is part or contains of child storage key", + ); + return MultiRemovalResults { + maybe_cursor: None, + backend: 0, + unique: 0, + loops: 0, + }; + } + + let overlay = self.overlay.clear_prefix(prefix); + let (maybe_cursor, backend, loops) = + self.limit_remove_from_backend(None, Some(prefix), maybe_limit, maybe_cursor); + MultiRemovalResults { + maybe_cursor, + backend, + unique: overlay + backend, + loops, + } + } + + fn clear_child_prefix( + &mut self, + child_info: &ChildInfo, + prefix: &[u8], + maybe_limit: Option, + maybe_cursor: Option<&[u8]>, + ) -> MultiRemovalResults { + trace!( + target: "state", + method = "ChildClearPrefix", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), + prefix = %HexDisplay::from(&prefix), + ); + let _guard = guard(); + + let overlay = self.overlay.clear_child_prefix(child_info, prefix); + let (maybe_cursor, backend, loops) = self.limit_remove_from_backend( + Some(child_info), + Some(prefix), + maybe_limit, + maybe_cursor, + ); + MultiRemovalResults { + maybe_cursor, + backend, + unique: overlay + backend, + loops, + } + } + + fn storage_append(&mut self, key: Vec, value: Vec) { + trace!( + target: "state", + method = "Append", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + key = %HexDisplay::from(&key), + value = %HexDisplay::from(&value), + ); + + let _guard = guard(); + + let backend = &mut self.backend; + let current_value = self.overlay.value_mut_or_insert_with(&key, || { + backend + .storage(&key) + .expect(EXT_NOT_ALLOWED_TO_FAIL) + .unwrap_or_default() + }); + StorageAppend::new(current_value).append(value); + } + + fn storage_root(&mut self, state_version: StateVersion) -> Vec { + let _guard = guard(); + + let (root, _cached) = self.overlay.storage_root(self.backend, state_version); + + trace!( + target: "state", + method = "StorageRoot", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + storage_root = %HexDisplay::from(&root.as_ref()), + cached = %_cached, + ); + + root.encode() + } + + fn child_storage_root( + &mut self, + child_info: &ChildInfo, + state_version: StateVersion, + ) -> Vec { + let _guard = guard(); + + let (root, _cached) = self + .overlay + .child_storage_root(child_info, self.backend, state_version) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + + trace!( + target: "state", + method = "ChildStorageRoot", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + child_info = %HexDisplay::from(&child_info.storage_key()), + storage_root = %HexDisplay::from(&root.as_ref()), + cached = %_cached, + ); + + root.encode() + } + + fn storage_index_transaction(&mut self, index: u32, hash: &[u8], size: u32) { + trace!( + target: "state", + method = "IndexTransaction", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + %index, + tx_hash = %HexDisplay::from(&hash), + %size, + ); + + self.overlay.add_transaction_index(IndexOperation::Insert { + extrinsic: index, + hash: hash.to_vec(), + size, + }); + } + + /// Renew existing piece of data storage. + fn storage_renew_transaction_index(&mut self, index: u32, hash: &[u8]) { + trace!( + target: "state", + method = "RenewTransactionIndex", + ext_id = %HexDisplay::from(&self.id.to_le_bytes()), + %index, + tx_hash = %HexDisplay::from(&hash), + ); + + self.overlay.add_transaction_index(IndexOperation::Renew { + extrinsic: index, + hash: hash.to_vec(), + }); + } + + fn storage_start_transaction(&mut self) { + self.overlay.start_transaction() + } + + fn storage_rollback_transaction(&mut self) -> Result<(), ()> { + self.overlay.rollback_transaction().map_err(|_| ()) + } + + fn storage_commit_transaction(&mut self) -> Result<(), ()> { + self.overlay.commit_transaction().map_err(|_| ()) + } + + fn wipe(&mut self) { + for _ in 0..self.overlay.transaction_depth() { + self.overlay.rollback_transaction().expect(BENCHMARKING_FN); + } + self.overlay + .drain_storage_changes(self.backend, Default::default()) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL); + self.overlay + .enter_runtime() + .expect("We have reset the overlay above, so we can not be in the runtime; qed"); + } + + fn commit(&mut self) { + // Bench always use latest state. + let state_version = StateVersion::default(); + for _ in 0..self.overlay.transaction_depth() { + self.overlay.commit_transaction().expect(BENCHMARKING_FN); + } + let changes = self + .overlay + .drain_storage_changes(self.backend, state_version) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + self.backend + .commit( + changes.transaction_storage_root, + changes.transaction, + changes.main_storage_changes, + changes.child_storage_changes, + ) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + self.overlay + .enter_runtime() + .expect("We have reset the overlay above, so we can not be in the runtime; qed"); + } + + fn read_write_count(&self) -> (u32, u32, u32, u32) { + self.backend.read_write_count() + } + + fn reset_read_write_count(&mut self) { + self.backend.reset_read_write_count() + } + + fn get_whitelist(&self) -> Vec { + self.backend.get_whitelist() + } + + fn set_whitelist(&mut self, new: Vec) { + self.backend.set_whitelist(new) + } + + fn proof_size(&self) -> Option { + self.backend.proof_size() + } + + fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { + self.backend.get_read_and_written_keys() + } +} + +impl<'a, H, B> LazyLoadingExt<'a, H, B> +where + H: Hasher, + H::Out: Ord + 'static + parity_scale_codec::Codec, + B: Backend, +{ + fn limit_remove_from_backend( + &mut self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + maybe_limit: Option, + start_at: Option<&[u8]>, + ) -> (Option>, u32, u32) { + let mut args = IterArgs::default(); + args.prefix = prefix; + args.start_at = start_at; + args.child_info = child_info.cloned(); + + let iter = match self.backend.keys(args) { + Ok(iter) => iter, + Err(error) => { + log::debug!(target: "trie", "Error while iterating the storage: {}", error); + return (None, 0, 0); + } + }; + + let mut delete_count: u32 = 0; + let mut loop_count: u32 = 0; + let mut maybe_next_key = None; + for key in iter { + let key = match key { + Ok(key) => key, + Err(error) => { + log::debug!(target: "trie", "Error while iterating the storage: {}", error); + break; + } + }; + + if maybe_limit.map_or(false, |limit| loop_count == limit) { + maybe_next_key = Some(key); + break; + } + let overlay = match child_info { + Some(child_info) => self.overlay.child_storage(child_info, &key), + None => self.overlay.storage(&key), + }; + if !matches!(overlay, Some(None)) { + // not pending deletion from the backend - delete it. + if let Some(child_info) = child_info { + self.overlay.set_child_storage(child_info, key, None); + } else { + self.overlay.set_storage(key, None); + } + delete_count = delete_count.saturating_add(1); + } + loop_count = loop_count.saturating_add(1); + } + + (maybe_next_key, delete_count, loop_count) + } +} + +/// Implement `Encode` by forwarding the stored raw vec. +struct EncodeOpaqueValue(Vec); + +impl Encode for EncodeOpaqueValue { + fn using_encoded R>(&self, f: F) -> R { + f(&self.0) + } +} + +/// Auxiliary structure for appending a value to a storage item. +pub(crate) struct StorageAppend<'a>(&'a mut Vec); + +impl<'a> StorageAppend<'a> { + /// Create a new instance using the given `storage` reference. + pub fn new(storage: &'a mut Vec) -> Self { + Self(storage) + } + + /// Append the given `value` to the storage item. + /// + /// If appending fails, `[value]` is stored in the storage item. + pub fn append(&mut self, value: Vec) { + let value = vec![EncodeOpaqueValue(value)]; + + let item = core::mem::take(self.0); + + *self.0 = match Vec::::append_or_new(item, &value) { + Ok(item) => item, + Err(_) => { + log_error!( + target: "runtime", + "Failed to append value, resetting storage item to `[value]`.", + ); + value.encode() + } + }; + } +} + +#[cfg(not(feature = "std"))] +impl<'a, H, B> ExtensionStore for LazyLoadingExt<'a, H, B> +where + H: Hasher, + H::Out: Ord + 'static + parity_scale_codec::Codec, + B: Backend, +{ + fn extension_by_type_id(&mut self, _type_id: TypeId) -> Option<&mut dyn Any> { + None + } + + fn register_extension_with_type_id( + &mut self, + _type_id: TypeId, + _extension: Box, + ) -> Result<(), sp_externalities::Error> { + Err(sp_externalities::Error::ExtensionsAreNotSupported) + } + + fn deregister_extension_by_type_id( + &mut self, + _type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { + Err(sp_externalities::Error::ExtensionsAreNotSupported) + } +} + +#[cfg(feature = "std")] +impl<'a, H, B> ExtensionStore for Ext<'a, H, B> +where + H: Hasher, + B: 'a + Backend, +{ + fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { + self.extensions + .as_mut() + .and_then(|exts| exts.get_mut(type_id)) + } + + fn register_extension_with_type_id( + &mut self, + type_id: TypeId, + extension: Box, + ) -> Result<(), sp_externalities::Error> { + if let Some(ref mut extensions) = self.extensions { + extensions.register(type_id, extension) + } else { + Err(sp_externalities::Error::ExtensionsAreNotSupported) + } + } + + fn deregister_extension_by_type_id( + &mut self, + type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { + if let Some(ref mut extensions) = self.extensions { + if extensions.deregister(type_id) { + Ok(()) + } else { + Err(sp_externalities::Error::ExtensionIsNotRegistered(type_id)) + } + } else { + Err(sp_externalities::Error::ExtensionsAreNotSupported) + } + } +} diff --git a/node/service/src/lazy_loading/backend.rs b/node/service/src/lazy_loading/backend.rs new file mode 100644 index 0000000000..e2f2e1a3aa --- /dev/null +++ b/node/service/src/lazy_loading/backend.rs @@ -0,0 +1,1626 @@ +// Copyright 2024 Moonbeam foundation +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use parking_lot::RwLock; +use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; +use sp_core::storage::well_known_keys; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashingFor, Header as HeaderT, NumberFor, Zero}, + Justification, Justifications, StateVersion, Storage, +}; +use sp_state_machine::{ + BackendTransaction, ChildStorageCollection, IndexOperation, StorageCollection, TrieBackend, +}; +use std::future::Future; +use std::marker::PhantomData; +use std::ops::AddAssign; +use std::time::Duration; +use std::{ + collections::{HashMap, HashSet}, + ptr, + sync::Arc, +}; + +use sc_client_api::{ + backend::{self, NewBlockState}, + blockchain::{self, BlockStatus, HeaderBackend}, + leaves::LeafSet, + UsageInfo, +}; + +use jsonrpsee::http_client::HttpClient; +use sp_runtime::generic::SignedBlock; + +use crate::chain_spec; +use crate::lazy_loading::state_overrides::StateEntry; +use crate::lazy_loading::{helpers, state_overrides}; +use moonbeam_cli_opt::LazyLoadingConfig; +use moonbeam_core_primitives::BlockNumber; +use sc_client_api::StorageKey; +use sc_service::{Configuration, Error}; +use serde::de::DeserializeOwned; +use sp_core::offchain::storage::InMemOffchainStorage; +use sp_core::{twox_128, H256}; +use sp_rpc::list::ListOrValue; +use sp_rpc::number::NumberOrHex; +use sp_storage::{ChildInfo, StorageData}; +use sp_trie::PrefixedMemoryDB; +use tokio_retry::strategy::FixedInterval; +use tokio_retry::Retry; + +struct PendingBlock { + block: StoredBlock, + state: NewBlockState, +} + +#[derive(PartialEq, Eq, Clone)] +enum StoredBlock { + Header(B::Header, Option), + Full(B, Option), +} + +impl StoredBlock { + fn new( + header: B::Header, + body: Option>, + just: Option, + ) -> Self { + match body { + Some(body) => StoredBlock::Full(B::new(header, body), just), + None => StoredBlock::Header(header, just), + } + } + + fn header(&self) -> &B::Header { + match *self { + StoredBlock::Header(ref h, _) => h, + StoredBlock::Full(ref b, _) => b.header(), + } + } + + fn justifications(&self) -> Option<&Justifications> { + match *self { + StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref(), + } + } + + fn extrinsics(&self) -> Option<&[B::Extrinsic]> { + match *self { + StoredBlock::Header(_, _) => None, + StoredBlock::Full(ref b, _) => Some(b.extrinsics()), + } + } + + fn into_inner(self) -> (B::Header, Option>, Option) { + match self { + StoredBlock::Header(header, just) => (header, None, just), + StoredBlock::Full(block, just) => { + let (header, body) = block.deconstruct(); + (header, Some(body), just) + } + } + } +} + +#[derive(Clone)] +struct BlockchainStorage { + blocks: HashMap>, + hashes: HashMap, Block::Hash>, + best_hash: Block::Hash, + best_number: NumberFor, + finalized_hash: Block::Hash, + finalized_number: NumberFor, + genesis_hash: Block::Hash, + header_cht_roots: HashMap, Block::Hash>, + leaves: LeafSet>, + aux: HashMap, Vec>, +} + +/// In-memory blockchain. Supports concurrent reads. +#[derive(Clone)] +pub struct Blockchain { + rpc_client: Arc, + storage: Arc>>, +} + +impl Blockchain { + /// Get header hash of given block. + pub fn id(&self, id: BlockId) -> Option { + match id { + BlockId::Hash(h) => Some(h), + BlockId::Number(n) => self.storage.read().hashes.get(&n).cloned(), + } + } + + /// Create new in-memory blockchain storage. + fn new(rpc_client: Arc) -> Blockchain { + let storage = Arc::new(RwLock::new(BlockchainStorage { + blocks: HashMap::new(), + hashes: HashMap::new(), + best_hash: Default::default(), + best_number: Zero::zero(), + finalized_hash: Default::default(), + finalized_number: Zero::zero(), + genesis_hash: Default::default(), + header_cht_roots: HashMap::new(), + leaves: LeafSet::new(), + aux: HashMap::new(), + })); + Blockchain { + rpc_client, + storage, + } + } + + /// Insert a block header and associated data. + pub fn insert( + &self, + hash: Block::Hash, + header: ::Header, + justifications: Option, + body: Option::Extrinsic>>, + new_state: NewBlockState, + ) -> sp_blockchain::Result<()> { + let number = *header.number(); + if new_state.is_best() { + self.apply_head(&header)?; + } + + { + let mut storage = self.storage.write(); + storage.leaves.import(hash, number, *header.parent_hash()); + storage + .blocks + .insert(hash, StoredBlock::new(header, body, justifications)); + + if let NewBlockState::Final = new_state { + storage.finalized_hash = hash; + storage.finalized_number = number; + } + + if number == Zero::zero() { + storage.genesis_hash = hash; + } + } + + Ok(()) + } + + /// Get total number of blocks. + pub fn blocks_count(&self) -> usize { + let count = self.storage.read().blocks.len(); + log::error!("Total number of blocks: {:?}", count); + + count + } + + /// Compare this blockchain with another in-mem blockchain + pub fn equals_to(&self, other: &Self) -> bool { + // Check ptr equality first to avoid double read locks. + if ptr::eq(self, other) { + return true; + } + self.canon_equals_to(other) && self.storage.read().blocks == other.storage.read().blocks + } + + /// Compare canonical chain to other canonical chain. + pub fn canon_equals_to(&self, other: &Self) -> bool { + // Check ptr equality first to avoid double read locks. + if ptr::eq(self, other) { + return true; + } + let this = self.storage.read(); + let other = other.storage.read(); + this.hashes == other.hashes + && this.best_hash == other.best_hash + && this.best_number == other.best_number + && this.genesis_hash == other.genesis_hash + } + + /// Insert header CHT root. + pub fn insert_cht_root(&self, block: NumberFor, cht_root: Block::Hash) { + self.storage + .write() + .header_cht_roots + .insert(block, cht_root); + } + + /// Set an existing block as head. + pub fn set_head(&self, hash: Block::Hash) -> sp_blockchain::Result<()> { + let header = self + .header(hash)? + .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", hash)))?; + + self.apply_head(&header) + } + + fn apply_head(&self, header: &::Header) -> sp_blockchain::Result<()> { + let hash = header.hash(); + let number = header.number(); + /* + // Note: this may lock storage, so it must happen before obtaining storage + // write lock. + let best_tree_route = { + let best_hash = self.storage.read().best_hash; + if &best_hash == header.parent_hash() { + None + } else { + let route = sp_blockchain::tree_route(self, best_hash, *header.parent_hash())?; + Some(route) + } + }; + */ + + let mut storage = self.storage.write(); + /* + if let Some(tree_route) = best_tree_route { + // apply retraction and enaction when reorganizing up to parent hash + let enacted = tree_route.enacted(); + + for entry in enacted { + storage.hashes.insert(entry.number, entry.hash); + } + + for entry in tree_route.retracted().iter().skip(enacted.len()) { + storage.hashes.remove(&entry.number); + } + } + */ + storage.best_hash = hash; + storage.best_number = *number; + storage.hashes.insert(*number, hash); + + Ok(()) + } + + fn finalize_header( + &self, + block: Block::Hash, + justification: Option, + ) -> sp_blockchain::Result<()> { + let mut storage = self.storage.write(); + storage.finalized_hash = block; + + if justification.is_some() { + let block = storage + .blocks + .get_mut(&block) + .expect("hash was fetched from a block in the db; qed"); + + let block_justifications = match block { + StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j, + }; + + *block_justifications = justification.map(Justifications::from); + } + + Ok(()) + } + + fn append_justification( + &self, + hash: Block::Hash, + justification: Justification, + ) -> sp_blockchain::Result<()> { + let mut storage = self.storage.write(); + + let block = storage + .blocks + .get_mut(&hash) + .expect("hash was fetched from a block in the db; qed"); + + let block_justifications = match block { + StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j, + }; + + if let Some(stored_justifications) = block_justifications { + if !stored_justifications.append(justification) { + return Err(sp_blockchain::Error::BadJustification( + "Duplicate consensus engine ID".into(), + )); + } + } else { + *block_justifications = Some(Justifications::from(justification)); + }; + + Ok(()) + } + + fn write_aux(&self, ops: Vec<(Vec, Option>)>) { + let mut storage = self.storage.write(); + for (k, v) in ops { + match v { + Some(v) => storage.aux.insert(k, v), + None => storage.aux.remove(&k), + }; + } + } +} + +impl HeaderBackend for Blockchain { + fn header( + &self, + hash: Block::Hash, + ) -> sp_blockchain::Result::Header>> { + // First, try to get the header from local storage + if let Some(header) = self + .storage + .read() + .blocks + .get(&hash) + .map(|b| b.header().clone()) + { + return Ok(Some(header)); + } + + // If not found in local storage, fetch from RPC client + let header = self + .rpc_client + .block::(Some(hash)) + .ok() + .flatten() + .map(|full_block| { + // Cache block header + let block = full_block.block.clone(); + self.storage.write().blocks.insert( + hash, + StoredBlock::Full(block.clone(), full_block.justifications), + ); + + block.header().clone() + }); + + if header.is_none() { + log::warn!("Expected block {:x?} to exist.", &hash); + } + + Ok(header) + } + + fn info(&self) -> blockchain::Info { + let storage = self.storage.read(); + blockchain::Info { + best_hash: storage.best_hash, + best_number: storage.best_number, + genesis_hash: storage.genesis_hash, + finalized_hash: storage.finalized_hash, + finalized_number: storage.finalized_number, + finalized_state: Some((storage.finalized_hash, storage.finalized_number)), + number_leaves: storage.leaves.count(), + block_gap: None, + } + } + + fn status(&self, hash: Block::Hash) -> sp_blockchain::Result { + match self.storage.read().blocks.contains_key(&hash) { + true => Ok(BlockStatus::InChain), + false => Ok(BlockStatus::Unknown), + } + } + + fn number(&self, hash: Block::Hash) -> sp_blockchain::Result>> { + let number = match self.storage.read().blocks.get(&hash) { + Some(block) => *block.header().number(), + _ => match self.rpc_client.block::(Some(hash)) { + Ok(Some(block)) => *block.block.header().number(), + err => { + log::error!("Failed to fetch block number from RPC: {:?}", err); + return Err(sp_blockchain::Error::UnknownBlock( + "Failed to fetch block number from RPC".into(), + )); + } + }, + }; + + Ok(Some(number)) + } + + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> sp_blockchain::Result> { + Ok(self.id(BlockId::Number(number))) + } +} + +impl HeaderMetadata for Blockchain { + type Error = sp_blockchain::Error; + + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.header(hash)? + .map(|header| CachedHeaderMetadata::from(&header)) + .ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!("header not found: {}", hash)) + }) + } + + fn insert_header_metadata(&self, _hash: Block::Hash, _metadata: CachedHeaderMetadata) { + // No need to implement. + unimplemented!("insert_header_metadata") + } + fn remove_header_metadata(&self, _hash: Block::Hash) { + // No need to implement. + unimplemented!("remove_header_metadata") + } +} + +impl blockchain::Backend for Blockchain { + fn body( + &self, + hash: Block::Hash, + ) -> sp_blockchain::Result::Extrinsic>>> { + // First, try to get the header from local storage + if let Some(extrinsics) = self + .storage + .read() + .blocks + .get(&hash) + .and_then(|b| b.extrinsics().map(|x| x.to_vec())) + { + return Ok(Some(extrinsics)); + } + let extrinsics = self + .rpc_client + .block::(Some(hash)) + .ok() + .flatten() + .map(|b| b.block.extrinsics().to_vec()); + + Ok(extrinsics) + } + + fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result> { + Ok(self + .storage + .read() + .blocks + .get(&hash) + .and_then(|b| b.justifications().cloned())) + } + + fn last_finalized(&self) -> sp_blockchain::Result { + let last_finalized = self.storage.read().finalized_hash; + + Ok(last_finalized) + } + + fn leaves(&self) -> sp_blockchain::Result> { + Ok(self.storage.read().leaves.hashes()) + } + + fn displaced_leaves_after_finalizing( + &self, + block_number: NumberFor, + ) -> sp_blockchain::Result> { + Ok(self + .storage + .read() + .leaves + .displaced_by_finalize_height(block_number) + .leaves() + .cloned() + .collect::>()) + } + + fn children(&self, _parent_hash: Block::Hash) -> sp_blockchain::Result> { + unimplemented!("Not supported by the `lazy-loading` backend.") + } + + fn indexed_transaction(&self, _hash: Block::Hash) -> sp_blockchain::Result>> { + unimplemented!("Not supported by the `lazy-loading` backend.") + } + + fn block_indexed_body( + &self, + _hash: Block::Hash, + ) -> sp_blockchain::Result>>> { + unimplemented!("Not supported by the `lazy-loading` backend.") + } +} + +impl backend::AuxStore for Blockchain { + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { + let mut storage = self.storage.write(); + for (k, v) in insert { + storage.aux.insert(k.to_vec(), v.to_vec()); + } + for k in delete { + storage.aux.remove(*k); + } + Ok(()) + } + + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + Ok(self.storage.read().aux.get(key).cloned()) + } +} + +pub struct BlockImportOperation { + pending_block: Option>, + old_state: ForkedLazyBackend, + new_state: Option>>, + aux: Vec<(Vec, Option>)>, + finalized_blocks: Vec<(Block::Hash, Option)>, + set_head: Option, + pub(crate) before_fork: bool, +} + +impl BlockImportOperation { + fn apply_storage( + &mut self, + storage: Storage, + commit: bool, + state_version: StateVersion, + ) -> sp_blockchain::Result { + use sp_state_machine::Backend; + check_genesis_storage(&storage)?; + + let child_delta = storage.children_default.values().map(|child_content| { + ( + &child_content.child_info, + child_content + .data + .iter() + .map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), + ) + }); + + let (root, transaction) = self.old_state.full_storage_root( + storage + .top + .iter() + .map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), + child_delta, + state_version, + ); + + if commit { + self.new_state = Some(transaction); + } + Ok(root) + } +} + +impl backend::BlockImportOperation + for BlockImportOperation +{ + type State = ForkedLazyBackend; + + fn state(&self) -> sp_blockchain::Result> { + Ok(Some(&self.old_state)) + } + + fn set_block_data( + &mut self, + header: ::Header, + body: Option::Extrinsic>>, + _indexed_body: Option>>, + justifications: Option, + state: NewBlockState, + ) -> sp_blockchain::Result<()> { + assert!( + self.pending_block.is_none(), + "Only one block per operation is allowed" + ); + self.pending_block = Some(PendingBlock { + block: StoredBlock::new(header, body, justifications), + state, + }); + Ok(()) + } + + fn update_db_storage( + &mut self, + update: BackendTransaction>, + ) -> sp_blockchain::Result<()> { + self.new_state = Some(update); + Ok(()) + } + + fn set_genesis_state( + &mut self, + storage: Storage, + commit: bool, + state_version: StateVersion, + ) -> sp_blockchain::Result { + self.apply_storage(storage, commit, state_version) + } + + fn reset_storage( + &mut self, + storage: Storage, + state_version: StateVersion, + ) -> sp_blockchain::Result { + self.apply_storage(storage, true, state_version) + } + + fn insert_aux(&mut self, ops: I) -> sp_blockchain::Result<()> + where + I: IntoIterator, Option>)>, + { + self.aux.append(&mut ops.into_iter().collect()); + Ok(()) + } + + fn update_storage( + &mut self, + _update: StorageCollection, + _child_update: ChildStorageCollection, + ) -> sp_blockchain::Result<()> { + Ok(()) + } + + fn mark_finalized( + &mut self, + hash: Block::Hash, + justification: Option, + ) -> sp_blockchain::Result<()> { + self.finalized_blocks.push((hash, justification)); + Ok(()) + } + + fn mark_head(&mut self, hash: Block::Hash) -> sp_blockchain::Result<()> { + assert!( + self.pending_block.is_none(), + "Only one set block per operation is allowed" + ); + self.set_head = Some(hash); + Ok(()) + } + + fn update_transaction_index( + &mut self, + _index: Vec, + ) -> sp_blockchain::Result<()> { + Ok(()) + } +} + +/// DB-backed patricia trie state, transaction type is an overlay of changes to commit. +pub type DbState = TrieBackend>>, HashingFor>; + +/// A struct containing arguments for iterating over the storage. +#[derive(Default)] +pub struct RawIterArgs { + /// The prefix of the keys over which to iterate. + pub prefix: Option>, + + /// The prefix from which to start the iteration from. + /// + /// This is inclusive and the iteration will include the key which is specified here. + pub start_at: Option>, + + /// If this is `true` then the iteration will *not* include + /// the key specified in `start_at`, if there is such a key. + pub start_at_exclusive: bool, + + /// The info of the child trie over which to iterate over. + pub child_info: Option, +} + +/// A raw iterator over the `BenchmarkingState`. +pub struct RawIter { + pub(crate) args: RawIterArgs, + complete: bool, + _phantom: PhantomData, +} + +impl sp_state_machine::StorageIterator> + for RawIter +{ + type Backend = ForkedLazyBackend; + type Error = String; + + fn next_key( + &mut self, + backend: &Self::Backend, + ) -> Option> { + use sp_state_machine::Backend; + + let result = if let Some(start_at) = self.args.start_at.clone() { + let maybe_key = (*backend) + .next_storage_key(start_at.as_slice()) + .ok() + .flatten(); + self.args.start_at = maybe_key.clone(); + maybe_key.map(|v| Ok::(v)) + } else { + None + }; + + if result.is_none() { + self.complete = true; + } + + result + } + + fn next_pair( + &mut self, + backend: &Self::Backend, + ) -> Option> + { + use sp_state_machine::Backend; + + let result = if let Some(start_at) = self.args.start_at.clone() { + let maybe_key = (*backend) + .next_storage_key(start_at.as_slice()) + .ok() + .flatten(); + self.args.start_at = maybe_key.clone(); + + let maybe_value = maybe_key + .clone() + .map(|key| (*backend).storage(key.as_slice()).ok()) + .flatten() + .flatten(); + + match (maybe_key, maybe_value) { + (Some(key), Some(value)) => Some(Ok((key, value))), + _ => None, + } + } else { + None + }; + + if result.is_none() { + self.complete = true; + } + + result + } + + fn was_complete(&self) -> bool { + self.complete + } +} + +#[derive(Debug, Clone)] +pub struct ForkedLazyBackend { + rpc_client: Arc, + block_hash: Option, + fork_block: Block::Hash, + pub(crate) db: Arc>>>, + before_fork: bool, +} + +impl sp_state_machine::Backend> + for ForkedLazyBackend +{ + type Error = as sp_state_machine::Backend>>::Error; + type TrieBackendStorage = PrefixedMemoryDB>; + type RawIter = RawIter; + + fn storage(&self, key: &[u8]) -> Result, Self::Error> { + let remote_fetch = |block: Option| { + let result = self.rpc_client.storage(StorageKey(key.to_vec()), block); + + match result { + Ok(data) => Ok(data.map(|v| v.0)), + Err(err) => Err(format!("Failed to fetch storage from RPC: {:?}", err).into()), + } + }; + + if self.before_fork { + return remote_fetch(self.block_hash); + } + + let maybe_storage = self.db.read().storage(key); + let value = match maybe_storage { + Ok(Some(data)) => Ok(Some(data)), + _ => remote_fetch(Some(self.fork_block)), + }; + + if let Ok(Some(ref val)) = value { + let mut entries: HashMap, StorageCollection> = Default::default(); + entries.insert(None, vec![(key.to_vec(), Some(val.clone()))]); + + self.db.write().insert(entries, StateVersion::V1); + } + + value + } + + fn storage_hash( + &self, + key: &[u8], + ) -> Result as sp_core::Hasher>::Out>, Self::Error> { + let remote_fetch = |block: Option| { + let result = self + .rpc_client + .storage_hash(StorageKey(key.to_vec()), block); + + match result { + Ok(hash) => Ok(hash), + Err(err) => Err(format!("Failed to fetch storage hash from RPC: {:?}", err).into()), + } + }; + + if self.before_fork { + return remote_fetch(self.block_hash); + } + + let storage_hash = self.db.read().storage_hash(key); + match storage_hash { + Ok(Some(hash)) => Ok(Some(hash)), + _ => remote_fetch(Some(self.fork_block)), + } + } + + fn closest_merkle_value( + &self, + _key: &[u8], + ) -> Result< + Option as sp_core::Hasher>::Out>>, + Self::Error, + > { + panic!("closest_merkle_value: unsupported feature for lazy loading") + } + + fn child_closest_merkle_value( + &self, + _child_info: &sp_storage::ChildInfo, + _key: &[u8], + ) -> Result< + Option as sp_core::Hasher>::Out>>, + Self::Error, + > { + panic!("child_closest_merkle_value: unsupported feature for lazy loading") + } + + fn child_storage( + &self, + _child_info: &sp_storage::ChildInfo, + _key: &[u8], + ) -> Result, Self::Error> { + panic!("child_storage: unsupported feature for lazy loading"); + } + + fn child_storage_hash( + &self, + _child_info: &sp_storage::ChildInfo, + _key: &[u8], + ) -> Result as sp_core::Hasher>::Out>, Self::Error> { + panic!("child_storage_hash: unsupported feature for lazy loading"); + } + + fn next_storage_key( + &self, + key: &[u8], + ) -> Result, Self::Error> { + let remote_fetch = |block: Option| { + let result = self + .rpc_client + .storage_keys_paged(StorageKey(key.to_vec()), block); + + match result { + Ok(keys) => { + let mut entries: HashMap, StorageCollection> = + Default::default(); + let _ = self + .rpc_client + .query_storage_at( + keys.iter().map(|item| StorageKey(item.clone())).collect(), + self.block_hash, + ) + .map(|keys| { + for (key, value) in &keys { + entries.insert( + None, + vec![(key.0.to_vec(), value.clone().map(|v| v.0))], + ); + } + }); + + self.db.write().insert(entries, StateVersion::V0); + Ok(keys.get(1).cloned()) + } + Err(err) => { + Err(format!("Failed to fetch `next storage key` from RPC: {:?}", err).into()) + } + } + }; + + if self.before_fork { + return remote_fetch(self.block_hash); + } + + let next_storage_key = self.db.read().next_storage_key(key); + match next_storage_key { + Ok(Some(key)) => Ok(Some(key)), + _ => remote_fetch(Some(self.fork_block)), + } + } + + fn next_child_storage_key( + &self, + _child_info: &sp_storage::ChildInfo, + _key: &[u8], + ) -> Result, Self::Error> { + panic!("next_child_storage_key: unsupported feature for lazy loading"); + } + + fn storage_root<'a>( + &self, + delta: impl Iterator)>, + state_version: StateVersion, + ) -> ( + as sp_core::Hasher>::Out, + BackendTransaction>, + ) + where + as sp_core::Hasher>::Out: Ord, + { + self.db.read().storage_root(delta, state_version) + } + + fn child_storage_root<'a>( + &self, + child_info: &sp_storage::ChildInfo, + delta: impl Iterator)>, + state_version: StateVersion, + ) -> ( + as sp_core::Hasher>::Out, + bool, + BackendTransaction>, + ) + where + as sp_core::Hasher>::Out: Ord, + { + self.db + .read() + .child_storage_root(child_info, delta, state_version) + } + + fn raw_iter(&self, args: sp_state_machine::IterArgs) -> Result { + let mut clone: RawIterArgs = Default::default(); + clone.start_at_exclusive = args.start_at_exclusive.clone(); + clone.child_info = args.child_info.clone(); + clone.prefix = args.prefix.map(|v| v.to_vec()); + clone.start_at = args.start_at.map(|v| v.to_vec()); + + Ok(RawIter:: { + args: clone, + complete: false, + _phantom: Default::default(), + }) + } + + fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) { + self.db.read().register_overlay_stats(stats) + } + + fn usage_info(&self) -> sp_state_machine::UsageInfo { + self.db.read().usage_info() + } +} + +impl sp_state_machine::backend::AsTrieBackend> for ForkedLazyBackend { + type TrieBackendStorage = PrefixedMemoryDB>; + + fn as_trie_backend( + &self, + ) -> &sp_state_machine::TrieBackend> { + unimplemented!("`as_trie_backend` is not supported in lazy loading mode.") + } +} + +/// In-memory backend. Keeps all states and blocks in memory. +/// +/// > **Warning**: Doesn't support all the features necessary for a proper database. Only use this +/// > struct for testing purposes. Do **NOT** use in production. +pub struct Backend { + pub(crate) rpc_client: Arc, + states: RwLock>>, + pub(crate) blockchain: Blockchain, + import_lock: RwLock<()>, + pinned_blocks: RwLock>, + fork_checkpoint: Block::Header, +} + +impl Backend { + fn new(rpc_client: Arc, fork_checkpoint: Block::Header) -> Self { + Backend { + rpc_client: rpc_client.clone(), + states: RwLock::new(HashMap::new()), + blockchain: Blockchain::new(rpc_client), + import_lock: Default::default(), + pinned_blocks: Default::default(), + fork_checkpoint, + } + } +} + +impl backend::AuxStore for Backend { + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + _insert: I, + _delete: D, + ) -> sp_blockchain::Result<()> { + unimplemented!("`insert_aux` is not supported in lazy loading mode.") + } + + fn get_aux(&self, _key: &[u8]) -> sp_blockchain::Result>> { + unimplemented!("`get_aux` is not supported in lazy loading mode.") + } +} + +impl backend::Backend for Backend { + type BlockImportOperation = BlockImportOperation; + type Blockchain = Blockchain; + type State = ForkedLazyBackend; + type OffchainStorage = InMemOffchainStorage; + + fn begin_operation(&self) -> sp_blockchain::Result { + let old_state = self.state_at(Default::default())?; + Ok(BlockImportOperation { + pending_block: None, + old_state, + new_state: None, + aux: Default::default(), + finalized_blocks: Default::default(), + set_head: None, + before_fork: false, + }) + } + + fn begin_state_operation( + &self, + operation: &mut Self::BlockImportOperation, + block: Block::Hash, + ) -> sp_blockchain::Result<()> { + operation.old_state = self.state_at(block)?; + Ok(()) + } + + fn commit_operation(&self, operation: Self::BlockImportOperation) -> sp_blockchain::Result<()> { + if !operation.finalized_blocks.is_empty() { + for (block, justification) in operation.finalized_blocks { + self.blockchain.finalize_header(block, justification)?; + } + } + + if let Some(pending_block) = operation.pending_block { + let old_state = &operation.old_state; + let (header, body, justification) = pending_block.block.into_inner(); + + let hash = header.hash(); + + let new_state = match operation.new_state.clone() { + Some(state) => Arc::new(RwLock::new( + old_state + .db + .read() + .update_backend(*header.state_root(), state), + )), + None => old_state.db.clone(), + }; + + let new_state = ForkedLazyBackend { + rpc_client: self.rpc_client.clone(), + block_hash: Some(hash.clone()), + fork_block: self.fork_checkpoint.hash(), + db: new_state, + before_fork: operation.before_fork, + }; + self.states.write().insert(hash, new_state); + + self.blockchain + .insert(hash, header, justification, body, pending_block.state)?; + } + + if !operation.aux.is_empty() { + self.blockchain.write_aux(operation.aux); + } + + if let Some(set_head) = operation.set_head { + self.blockchain.set_head(set_head)?; + } + + Ok(()) + } + + fn finalize_block( + &self, + hash: Block::Hash, + justification: Option, + ) -> sp_blockchain::Result<()> { + self.blockchain.finalize_header(hash, justification) + } + + fn append_justification( + &self, + hash: Block::Hash, + justification: Justification, + ) -> sp_blockchain::Result<()> { + self.blockchain.append_justification(hash, justification) + } + + fn blockchain(&self) -> &Self::Blockchain { + &self.blockchain + } + + fn usage_info(&self) -> Option { + None + } + + fn offchain_storage(&self) -> Option { + None + } + + fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result { + if hash == Default::default() { + return Ok(ForkedLazyBackend:: { + rpc_client: self.rpc_client.clone(), + block_hash: Some(hash), + fork_block: self.fork_checkpoint.hash(), + db: Default::default(), + before_fork: true, + }); + } + + let (backend, should_write) = self + .states + .read() + .get(&hash) + .cloned() + .map(|state| (state, false)) + .unwrap_or_else(|| { + let header: Block::Header = self + .rpc_client + .header::(Some(hash)) + .ok() + .flatten() + .expect("block header"); + + let checkpoint = self.fork_checkpoint.clone(); + let state = if header.number().gt(checkpoint.number()) { + let parent = self.state_at(*header.parent_hash()).ok(); + + ForkedLazyBackend:: { + rpc_client: self.rpc_client.clone(), + block_hash: Some(hash), + fork_block: checkpoint.hash(), + db: parent.map_or(Default::default(), |p| p.db), + before_fork: false, + } + } else { + ForkedLazyBackend:: { + rpc_client: self.rpc_client.clone(), + block_hash: Some(hash), + fork_block: checkpoint.hash(), + db: Default::default(), + before_fork: true, + } + }; + + (state, true) + }); + + if should_write { + self.states.write().insert(hash, backend.clone()); + } + + Ok(backend) + } + + fn revert( + &self, + _n: NumberFor, + _revert_finalized: bool, + ) -> sp_blockchain::Result<(NumberFor, HashSet)> { + Ok((Zero::zero(), HashSet::new())) + } + + fn remove_leaf_block(&self, _hash: Block::Hash) -> sp_blockchain::Result<()> { + Ok(()) + } + + fn get_import_lock(&self) -> &RwLock<()> { + &self.import_lock + } + + fn requires_full_sync(&self) -> bool { + false + } + + fn pin_block(&self, hash: ::Hash) -> blockchain::Result<()> { + let mut blocks = self.pinned_blocks.write(); + *blocks.entry(hash).or_default() += 1; + Ok(()) + } + + fn unpin_block(&self, hash: ::Hash) { + let mut blocks = self.pinned_blocks.write(); + blocks + .entry(hash) + .and_modify(|counter| *counter -= 1) + .or_insert(-1); + } +} + +impl backend::LocalBackend for Backend {} + +/// Check that genesis storage is valid. +pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { + if storage + .top + .iter() + .any(|(k, _)| well_known_keys::is_child_storage_key(k)) + { + return Err(sp_blockchain::Error::InvalidState); + } + + if storage + .children_default + .keys() + .any(|child_key| !well_known_keys::is_child_storage_key(child_key)) + { + return Err(sp_blockchain::Error::InvalidState); + } + + Ok(()) +} + +#[derive(Debug, Clone)] +pub struct RPC { + http_client: HttpClient, + delay_between_requests_ms: u64, + max_retries_per_request: usize, + counter: Arc>, +} + +impl RPC { + pub fn new( + http_client: HttpClient, + delay_between_requests_ms: u64, + max_retries_per_request: usize, + ) -> Self { + Self { + http_client, + delay_between_requests_ms, + max_retries_per_request, + counter: Default::default(), + } + } + pub fn system_chain(&self) -> Result { + let request = &|| { + substrate_rpc_client::SystemApi::::system_chain(&self.http_client) + }; + + self.block_on(request) + } + + pub fn system_properties( + &self, + ) -> Result { + let request = &|| { + substrate_rpc_client::SystemApi::::system_properties( + &self.http_client, + ) + }; + + self.block_on(request) + } + + pub fn system_name(&self) -> Result { + let request = &|| { + substrate_rpc_client::SystemApi::::system_name(&self.http_client) + }; + + self.block_on(request) + } + + pub fn block( + &self, + hash: Option, + ) -> Result>, jsonrpsee::core::ClientError> + where + Block: BlockT + DeserializeOwned, + Hash: 'static + Send + Sync + sp_runtime::Serialize + DeserializeOwned, + { + let request = &|| { + substrate_rpc_client::ChainApi::< + BlockNumber, + Hash, + Block::Header, + SignedBlock, + >::block(&self.http_client, hash.clone()) + }; + + self.block_on(request) + } + + pub fn block_hash( + &self, + block_number: Option, + ) -> Result, jsonrpsee::core::ClientError> { + let request = &|| { + substrate_rpc_client::ChainApi::< + BlockNumber, + Block::Hash, + Block::Header, + SignedBlock, + >::block_hash( + &self.http_client, + block_number.map(|n| ListOrValue::Value(NumberOrHex::Number(n.into()))), + ) + }; + + self.block_on(request).map(|ok| match ok { + ListOrValue::List(v) => v.get(0).map_or(None, |some| *some), + ListOrValue::Value(v) => v, + }) + } + + pub fn header( + &self, + hash: Option, + ) -> Result, jsonrpsee::core::ClientError> { + let request = &|| { + substrate_rpc_client::ChainApi::< + BlockNumber, + Block::Hash, + Block::Header, + SignedBlock, + >::header(&self.http_client, hash) + }; + + self.block_on(request) + } + + pub fn storage_hash< + Hash: 'static + Clone + Sync + Send + DeserializeOwned + sp_runtime::Serialize, + >( + &self, + key: StorageKey, + at: Option, + ) -> Result, jsonrpsee::core::ClientError> { + let request = &|| { + substrate_rpc_client::StateApi::::storage_hash( + &self.http_client, + key.clone(), + at.clone(), + ) + }; + + self.block_on(request) + } + + pub fn storage< + Hash: 'static + Clone + Sync + Send + DeserializeOwned + sp_runtime::Serialize + core::fmt::Debug, + >( + &self, + key: StorageKey, + at: Option, + ) -> Result, jsonrpsee::core::ClientError> { + let request = &|| { + substrate_rpc_client::StateApi::::storage( + &self.http_client, + key.clone(), + at.clone(), + ) + }; + + self.block_on(request) + } + + pub fn storage_keys_paged< + Hash: 'static + Clone + Sync + Send + DeserializeOwned + sp_runtime::Serialize, + >( + &self, + key: StorageKey, + at: Option, + ) -> Result, jsonrpsee::core::ClientError> { + let request = &|| { + substrate_rpc_client::StateApi::::storage_keys_paged( + &self.http_client, + Some(key.clone()), + 2, + None, + at.clone(), + ) + }; + let result = self.block_on(request); + + let keys = match result { + Ok(result) => result.iter().map(|item| item.0.clone()).collect(), + Err(err) => panic!("failed in `storage_keys_paged`: {:?}", err), + }; + + Ok(keys) + } + + pub fn query_storage_at< + Hash: 'static + Clone + Sync + Send + DeserializeOwned + sp_runtime::Serialize, + >( + &self, + keys: Vec, + from_block: Option, + ) -> Result)>, jsonrpsee::core::ClientError> { + let request = &|| { + substrate_rpc_client::StateApi::::query_storage_at( + &self.http_client, + keys.clone(), + from_block.clone(), + ) + }; + let result = self.block_on(request); + + match result { + Ok(result) => Ok(result + .iter() + .flat_map(|item| item.changes.clone()) + .collect()), + Err(err) => Err(err), + } + } + + fn block_on(&self, f: &dyn Fn() -> F) -> Result + where + F: Future>, + { + use tokio::runtime::Handle; + + tokio::task::block_in_place(move || { + Handle::current().block_on(async move { + let delay_between_requests = Duration::from_millis(self.delay_between_requests_ms); + + // TODO: Remove debug information + let start = std::time::Instant::now(); + self.counter.write().add_assign(1); + log::debug!("sending request: {}", self.counter.read()); + + // Explicit request delay, to avoid getting 429 errors + let _ = tokio::time::sleep(delay_between_requests).await; + + // Retry request in case of failure + // The maximum number of retries is specified by `self.max_retries_per_request` + let retry_strategy = + FixedInterval::new(delay_between_requests).take(self.max_retries_per_request); + let result = Retry::spawn(retry_strategy, f).await; + + log::debug!( + "Completed request (id: {}, successful: {}, elapsed_time: {:?})", + self.counter.read(), + result.is_ok(), + start.elapsed() + ); + + result + }) + }) + } +} + +/// Create an instance of a lazy loading memory backend. +pub fn new_lazy_loading_backend( + config: &mut Configuration, + lazy_loading_config: &LazyLoadingConfig, +) -> Result>, Error> +where + Block: BlockT + DeserializeOwned, + Block::Hash: From, +{ + use sc_client_api::Backend as _; + use sc_client_api::BlockImportOperation as _; + let uri: String = lazy_loading_config.state_rpc.clone().into(); + + let http_client = jsonrpsee::http_client::HttpClientBuilder::default() + .max_request_size(u32::MAX) + .max_response_size(u32::MAX) + .request_timeout(Duration::from_secs(10)) + .build(uri) + .map_err(|e| { + sp_blockchain::Error::Backend( + format!("failed to build http client: {:?}", e).to_string(), + ) + })?; + + let rpc = RPC::new(http_client, 100, 10); + let block_hash = lazy_loading_config + .from_block + .map(|block| Into::::into(block)); + let checkpoint: Block = rpc + .block::(block_hash) + .ok() + .flatten() + .expect("Fetching fork checkpoint") + .block; + + let backend = Arc::new(Backend::new(Arc::new(rpc), checkpoint.header().clone())); + + let chain_name = backend + .rpc_client + .system_chain() + .expect("Should fetch chain id"); + let chain_properties = backend + .rpc_client + .system_properties() + .expect("Should fetch chain properties"); + + let spec_builder = chain_spec::test_spec::lazy_loading_spec_builder(Default::default()) + .with_name(chain_name.as_str()) + .with_properties(chain_properties); + config.chain_spec = Box::new(spec_builder.build()); + + let base_overrides = + state_overrides::base_state_overrides(lazy_loading_config.runtime_override.clone()); + let custom_overrides = if let Some(path) = lazy_loading_config.state_overrides_path.clone() { + state_overrides::read(path)? + } else { + Default::default() + }; + let state_overrides: Vec<(Vec, Vec)> = [base_overrides, custom_overrides] + .concat() + .iter() + .map(|entry| match entry { + StateEntry::Concrete(v) => { + let key = [ + &twox_128(v.pallet.as_bytes()), + &twox_128(v.storage.as_bytes()), + v.key.clone().unwrap_or(Vec::new()).as_slice(), + ] + .concat(); + + (key, v.value.clone()) + } + StateEntry::Raw(raw) => (raw.key.clone(), raw.value.clone()), + }) + .collect(); + + let _ = helpers::produce_genesis_block(backend.clone()); + + let mut op = backend.begin_operation().unwrap(); + op.before_fork = true; + + let extrinsics: Vec = checkpoint.extrinsics().to_vec(); + + op.set_block_data( + checkpoint.header().clone(), + Some(extrinsics.clone()), + None, + None, + NewBlockState::Final, + )?; + + backend.commit_operation(op)?; + + // Produce first block after the fork + let _ = helpers::produce_first_block(backend.clone(), checkpoint, state_overrides)?; + + Ok(backend) +} diff --git a/node/service/src/lazy_loading/call_executor.rs b/node/service/src/lazy_loading/call_executor.rs new file mode 100644 index 0000000000..ae3af5a793 --- /dev/null +++ b/node/service/src/lazy_loading/call_executor.rs @@ -0,0 +1,346 @@ +// Copyright 2024 Moonbeam foundation +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use crate::lazy_loading::wasm_override::WasmOverride; +use crate::lazy_loading::wasm_substitutes::WasmSubstitutes; +use moonbeam_cli_opt::LazyLoadingConfig; +use sc_client_api::{ + backend, call_executor::CallExecutor, execution_extensions::ExecutionExtensions, HeaderBackend, +}; +use sc_executor::{NativeVersion, RuntimeVersion, RuntimeVersionOf}; +use sc_service::ClientConfig; +use sp_api::ProofRecorder; +use sp_core::traits::{CallContext, CodeExecutor, Externalities, RuntimeCode}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashingFor}, +}; +use sp_state_machine::{backend::AsTrieBackend, Ext, OverlayedChanges, StateMachine, StorageProof}; +use sp_version::{GetNativeVersion, GetRuntimeVersionAt}; +use std::{cell::RefCell, path::PathBuf, sync::Arc}; + +/// Call executor that executes methods locally, querying all required +/// data from local backend. +pub struct LazyLoadingCallExecutor { + backend: Arc, + lazy_loading_config: LazyLoadingConfig, + executor: E, + wasm_override: Arc>, + wasm_substitutes: WasmSubstitutes, + wasmtime_precompiled_path: Option, + execution_extensions: Arc>, +} + +impl LazyLoadingCallExecutor +where + E: CodeExecutor + RuntimeVersionOf + Clone + 'static, + B: backend::Backend, + Block::Hash: From, +{ + /// Creates new instance of local call executor. + pub fn new( + backend: Arc, + lazy_loading_config: &LazyLoadingConfig, + executor: E, + client_config: ClientConfig, + execution_extensions: ExecutionExtensions, + ) -> sp_blockchain::Result { + let wasm_override = client_config + .wasm_runtime_overrides + .as_ref() + .map(|p| WasmOverride::new(p.clone(), &executor)) + .transpose()?; + + let wasm_substitutes = WasmSubstitutes::new( + client_config.wasm_runtime_substitutes, + executor.clone(), + backend.clone(), + )?; + + Ok(LazyLoadingCallExecutor { + backend, + lazy_loading_config: (*lazy_loading_config).clone(), + executor, + wasm_override: Arc::new(wasm_override), + wasm_substitutes, + wasmtime_precompiled_path: client_config.wasmtime_precompiled, + execution_extensions: Arc::new(execution_extensions), + }) + } + + /// Check if local runtime code overrides are enabled and one is available + /// for the given `BlockId`. If yes, return it; otherwise return the same + /// `RuntimeCode` instance that was passed. + fn check_override<'a>( + &'a self, + onchain_code: RuntimeCode<'a>, + state: &B::State, + hash: Block::Hash, + ) -> sp_blockchain::Result<(RuntimeCode<'a>, RuntimeVersion)> + where + Block: BlockT, + B: backend::Backend, + { + let on_chain_version = self.on_chain_runtime_version(&onchain_code, state)?; + let code_and_version = if let Some(d) = self.wasm_override.as_ref().as_ref().and_then(|o| { + o.get( + &on_chain_version.spec_version, + onchain_code.heap_pages, + &on_chain_version.spec_name, + ) + }) { + log::debug!(target: "wasm_overrides", "using WASM override for block {}", hash); + d + } else if let Some(s) = + self.wasm_substitutes + .get(on_chain_version.spec_version, onchain_code.heap_pages, hash) + { + log::debug!(target: "wasm_substitutes", "Using WASM substitute for block {:?}", hash); + s + } else { + log::debug!( + target: "wasm_overrides", + "Neither WASM override nor substitute available for block {hash}, using onchain code", + ); + (onchain_code, on_chain_version) + }; + + Ok(code_and_version) + } + + /// Returns the on chain runtime version. + fn on_chain_runtime_version( + &self, + code: &RuntimeCode, + state: &B::State, + ) -> sp_blockchain::Result { + let mut overlay = OverlayedChanges::default(); + + let mut ext = Ext::new(&mut overlay, state, None); + + self.executor + .runtime_version(&mut ext, code) + .map_err(|e| sp_blockchain::Error::VersionInvalid(e.to_string())) + } +} + +impl Clone + for LazyLoadingCallExecutor +where + E: Clone, +{ + fn clone(&self) -> Self { + LazyLoadingCallExecutor { + backend: self.backend.clone(), + lazy_loading_config: self.lazy_loading_config.clone(), + executor: self.executor.clone(), + wasm_override: self.wasm_override.clone(), + wasm_substitutes: self.wasm_substitutes.clone(), + wasmtime_precompiled_path: self.wasmtime_precompiled_path.clone(), + execution_extensions: self.execution_extensions.clone(), + } + } +} + +impl CallExecutor for LazyLoadingCallExecutor +where + B: backend::Backend, + E: CodeExecutor + RuntimeVersionOf + Clone + 'static, + Block: BlockT + sp_runtime::DeserializeOwned, + Block::Hash: From, +{ + type Error = E::Error; + + type Backend = B; + + fn execution_extensions(&self) -> &ExecutionExtensions { + &self.execution_extensions + } + + fn call( + &self, + at_hash: Block::Hash, + method: &str, + call_data: &[u8], + context: CallContext, + ) -> sp_blockchain::Result> { + let mut changes = OverlayedChanges::default(); + let at_number = self + .backend + .blockchain() + .expect_block_number_from_id(&BlockId::Hash(at_hash))?; + let state = self.backend.state_at(at_hash)?; + + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + let runtime_code = state_runtime_code + .runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; + + let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; + + let mut extensions = self.execution_extensions.extensions(at_hash, at_number); + + let mut sm = StateMachine::new( + &state, + &mut changes, + &self.executor, + method, + call_data, + &mut extensions, + &runtime_code, + context, + ) + .set_parent_hash(at_hash); + + sm.execute().map_err(Into::into) + } + + fn contextual_call( + &self, + at_hash: Block::Hash, + method: &str, + call_data: &[u8], + changes: &RefCell>>, + // TODO: Confirm that `recorder` is not needed. + _recorder: &Option>, + call_context: CallContext, + extensions: &RefCell, + ) -> Result, sp_blockchain::Error> { + let state = self.backend.state_at(at_hash)?; + + let changes = &mut *changes.borrow_mut(); + + // It is important to extract the runtime code here before we create the proof + // recorder to not record it. We also need to fetch the runtime code from `state` to + // make sure we use the caching layers. + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + + let runtime_code = state_runtime_code + .runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; + let mut extensions = extensions.borrow_mut(); + + let mut state_machine = StateMachine::new( + &state, + changes, + &self.executor, + method, + call_data, + &mut extensions, + &runtime_code, + call_context, + ) + .set_parent_hash(at_hash); + state_machine.execute().map_err(Into::into) + } + + fn runtime_version(&self, at_hash: Block::Hash) -> sp_blockchain::Result { + let state = self.backend.state_at(at_hash)?; + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + + let runtime_code = state_runtime_code + .runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; + self.check_override(runtime_code, &state, at_hash) + .map(|(_, v)| v) + } + + fn prove_execution( + &self, + at_hash: Block::Hash, + method: &str, + call_data: &[u8], + ) -> sp_blockchain::Result<(Vec, StorageProof)> { + let at_number = self + .backend + .blockchain() + .expect_block_number_from_id(&BlockId::Hash(at_hash))?; + let state = self.backend.state_at(at_hash)?; + + let trie_backend = state.as_trie_backend(); + + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend); + let runtime_code = state_runtime_code + .runtime_code() + .map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = self.check_override(runtime_code, &state, at_hash)?.0; + + sp_state_machine::prove_execution_on_trie_backend( + trie_backend, + &mut Default::default(), + &self.executor, + method, + call_data, + &runtime_code, + &mut self.execution_extensions.extensions(at_hash, at_number), + ) + .map_err(Into::into) + } +} + +impl RuntimeVersionOf for LazyLoadingCallExecutor +where + E: RuntimeVersionOf, + Block: BlockT, +{ + fn runtime_version( + &self, + ext: &mut dyn Externalities, + runtime_code: &sp_core::traits::RuntimeCode, + ) -> Result { + RuntimeVersionOf::runtime_version(&self.executor, ext, runtime_code) + } +} + +impl GetRuntimeVersionAt for LazyLoadingCallExecutor +where + B: backend::Backend, + E: CodeExecutor + RuntimeVersionOf + Clone + 'static, + Block: BlockT + sp_runtime::DeserializeOwned, + Block::Hash: From, +{ + fn runtime_version(&self, at: Block::Hash) -> Result { + CallExecutor::runtime_version(self, at).map_err(|e| e.to_string()) + } +} + +impl GetNativeVersion for LazyLoadingCallExecutor +where + B: backend::Backend, + E: CodeExecutor + sp_version::GetNativeVersion + Clone + 'static, + Block: BlockT + sp_runtime::DeserializeOwned, +{ + fn native_version(&self) -> &NativeVersion { + self.executor.native_version() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sc_executor::{NativeElseWasmExecutor, WasmExecutor}; + use substrate_test_runtime_client::LocalExecutorDispatch; + + fn executor() -> NativeElseWasmExecutor { + NativeElseWasmExecutor::new_with_wasm_executor( + WasmExecutor::builder() + .with_max_runtime_instances(1) + .with_runtime_cache_size(2) + .build(), + ) + } +} diff --git a/node/service/src/lazy_loading/client.rs b/node/service/src/lazy_loading/client.rs new file mode 100644 index 0000000000..0588fdcb67 --- /dev/null +++ b/node/service/src/lazy_loading/client.rs @@ -0,0 +1,80 @@ +// Copyright 2024 Moonbeam foundation +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use crate::lazy_loading; +use cumulus_primitives_core::BlockT; +use moonbeam_cli_opt::LazyLoadingConfig; +use sc_chain_spec::BuildGenesisBlock; +use sc_client_api::execution_extensions::ExecutionExtensions; +use sc_client_api::{BadBlocks, ForkBlocks}; +use sc_executor::RuntimeVersionOf; +use sc_service::client::Client; +use sc_service::ClientConfig; +use sc_telemetry::TelemetryHandle; +use sp_core::traits::{CodeExecutor, SpawnNamed}; +use std::sync::Arc; + +pub fn new_client( + backend: Arc, + executor: E, + genesis_block_builder: G, + fork_blocks: ForkBlocks, + bad_blocks: BadBlocks, + execution_extensions: ExecutionExtensions, + spawn_handle: Box, + prometheus_registry: Option, + telemetry: Option, + config: ClientConfig, + lazy_loading_config: &LazyLoadingConfig, +) -> Result< + Client< + Backend, + lazy_loading::call_executor::LazyLoadingCallExecutor, + Block, + RA, + >, + sp_blockchain::Error, +> + where + Block: BlockT + sp_runtime::DeserializeOwned, + Block::Hash: From, + E: CodeExecutor + RuntimeVersionOf, + Backend: sc_client_api::Backend + 'static, + G: BuildGenesisBlock< + Block, + BlockImportOperation = >::BlockImportOperation + > +{ + let executor = lazy_loading::call_executor::LazyLoadingCallExecutor::new( + backend.clone(), + lazy_loading_config, + executor, + config.clone(), + execution_extensions, + )?; + + Client::new( + backend, + executor, + spawn_handle, + genesis_block_builder, + fork_blocks, + bad_blocks, + prometheus_registry, + telemetry, + config, + ) +} diff --git a/node/service/src/lazy_loading/helpers.rs b/node/service/src/lazy_loading/helpers.rs new file mode 100644 index 0000000000..085b9f1f88 --- /dev/null +++ b/node/service/src/lazy_loading/helpers.rs @@ -0,0 +1,107 @@ +// Copyright 2024 Moonbeam foundation +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use crate::lazy_loading; +use crate::lazy_loading::backend::RPC; +use cumulus_primitives_core::BlockT; +use sc_client_api::{Backend, BlockImportOperation, NewBlockState}; +use sp_core::{twox_128, H256}; +use sp_runtime::traits::{Header, One}; +use sp_runtime::Saturating; +use sp_storage::{StateVersion, Storage, StorageKey}; +use std::sync::Arc; + +pub fn produce_genesis_block( + backend: Arc>, +) -> sp_blockchain::Result<()> { + let mut op = backend.begin_operation()?; + op.before_fork = true; + + let genesis_block_hash: TBl::Hash = backend + .rpc_client + .block_hash::(Some(0)) + .unwrap() + .expect("Not able to obtain genesis block hash"); + + let genesis_block = backend + .rpc_client + .block::(Some(genesis_block_hash)) + .unwrap() + .unwrap() + .block; + + let _ = op.set_block_data( + genesis_block.header().clone(), + Some(genesis_block.extrinsics().to_vec()), + None, + None, + NewBlockState::Final, + ); + + backend.commit_operation(op) +} + +pub fn produce_first_block( + backend: Arc>, + last_block: Block, + state_overrides: Vec<(Vec, Vec)>, +) -> sp_blockchain::Result<()> { + use sc_client_api::HeaderBackend; + let mut op = backend.begin_operation()?; + + let state_root = op.reset_storage( + Storage { + top: state_overrides.into_iter().collect(), + children_default: Default::default(), + }, + StateVersion::V1, + )?; + + let head_info = backend.blockchain.info(); + let next_block_number = head_info.finalized_number.saturating_add(One::one()); + + let header: Block::Header = Block::Header::new( + next_block_number, + last_block.header().extrinsics_root().clone(), + state_root, + head_info.finalized_hash, + Default::default(), + ); + + let _ = op.set_block_data( + header.clone(), + Some(last_block.extrinsics().to_vec()), + None, + None, + NewBlockState::Final, + ); + + backend.commit_operation(op) +} + +pub fn get_parachain_id(rpc_client: Arc) -> Option { + let key = [twox_128(b"ParachainInfo"), twox_128(b"ParachainId")].concat(); + let result = rpc_client.storage::(StorageKey(key), None); + + result + .map(|o| { + o.and_then(|data| { + ::decode(&mut data.0.as_slice()).ok() + }) + }) + .ok() + .flatten() +} diff --git a/node/service/src/lazy_loading/mod.rs b/node/service/src/lazy_loading/mod.rs new file mode 100644 index 0000000000..2f1e95988f --- /dev/null +++ b/node/service/src/lazy_loading/mod.rs @@ -0,0 +1,820 @@ +// Copyright 2024 Moonbeam foundation +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use crate::{ + lazy_loading, open_frontier_backend, rpc, set_prometheus_registry, BlockImportPipeline, + ClientCustomizations, FrontierBlockImport, HostFunctions, PartialComponentsResult, + PendingConsensusDataProvider, RuntimeApiCollection, SOFT_DEADLINE_PERCENT, +}; +use cumulus_client_parachain_inherent::{MockValidationDataInherentDataProvider, MockXcmConfig}; +use cumulus_primitives_core::{relay_chain, BlockT, ParaId}; +use cumulus_primitives_parachain_inherent::ParachainInherentData; +use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; +use fc_rpc::StorageOverrideHandler; +use fc_rpc_core::types::{FeeHistoryCache, FilterPool}; +use futures::{FutureExt, StreamExt}; +use moonbeam_cli_opt::{EthApi as EthApiCmd, LazyLoadingConfig, RpcConfig}; +use moonbeam_core_primitives::{Block, Hash}; +use nimbus_consensus::NimbusManualSealConsensusDataProvider; +use nimbus_primitives::NimbusId; +use parity_scale_codec::Encode; +use polkadot_primitives::{ + AbridgedHostConfiguration, AsyncBackingParams, PersistedValidationData, UpgradeGoAhead, +}; +use sc_chain_spec::{get_extension, BuildGenesisBlock, GenesisBlockBuilder}; +use sc_client_api::{Backend, BadBlocks, ExecutorProvider, ForkBlocks, StorageProvider}; +use sc_consensus_manual_seal::rpc::{ManualSeal, ManualSealApiServer}; +use sc_executor::{HeapAllocStrategy, RuntimeVersionOf, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; +use sc_network::config::FullNetworkConfiguration; +use sc_network::NetworkBackend; +use sc_network_common::sync::SyncMode; +use sc_service::{ + error::Error as ServiceError, ClientConfig, Configuration, Error, KeystoreContainer, + PartialComponents, TaskManager, +}; +use sc_telemetry::{TelemetryHandle, TelemetryWorker}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use sp_api::ConstructRuntimeApi; +use sp_blockchain::HeaderBackend; +use sp_core::traits::CodeExecutor; +use sp_core::{twox_128, H256}; +use sp_runtime::traits::NumberFor; +use sp_storage::StorageKey; +use std::collections::BTreeMap; +use std::str::FromStr; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +pub mod backend; +pub mod call_executor; +mod client; +mod helpers; +mod state_overrides; +mod wasm_override; +mod wasm_substitutes; + +/// Lazy loading client type. +pub type TLazyLoadingClient = sc_service::client::Client< + TLazyLoadingBackend, + TLazyLoadingCallExecutor, + TBl, + TRtApi, +>; + +/// Lazy loading client backend type. +pub type TLazyLoadingBackend = backend::Backend; + +/// Lazy loading client call executor type. +pub type TLazyLoadingCallExecutor = + call_executor::LazyLoadingCallExecutor, TExec>; + +/// Lazy loading parts type. +pub type TLazyLoadingParts = ( + TLazyLoadingClient, + Arc>, + KeystoreContainer, + TaskManager, +); + +type LazyLoadingClient = + TLazyLoadingClient>; +type LazyLoadingBackend = TLazyLoadingBackend; + +/// Create the initial parts of a lazy loading node. +pub fn new_lazy_loading_parts( + config: &mut Configuration, + lazy_loading_config: &LazyLoadingConfig, + telemetry: Option, + executor: TExec, +) -> Result, Error> +where + TBl: BlockT + sp_runtime::DeserializeOwned, + TBl::Hash: From, + TExec: CodeExecutor + RuntimeVersionOf + Clone, +{ + let backend = backend::new_lazy_loading_backend(config, &lazy_loading_config)?; + + let genesis_block_builder = GenesisBlockBuilder::new( + config.chain_spec.as_storage_builder(), + !config.no_genesis(), + backend.clone(), + executor.clone(), + )?; + + new_lazy_loading_parts_with_genesis_builder( + config, + lazy_loading_config, + telemetry, + executor, + backend, + genesis_block_builder, + ) +} + +/// Create the initial parts of a lazy loading node. +pub fn new_lazy_loading_parts_with_genesis_builder( + config: &Configuration, + lazy_loading_config: &LazyLoadingConfig, + telemetry: Option, + executor: TExec, + backend: Arc>, + genesis_block_builder: TBuildGenesisBlock, +) -> Result, Error> +where + TBl: BlockT + sp_runtime::DeserializeOwned, + TBl::Hash: From, + TExec: CodeExecutor + RuntimeVersionOf + Clone, + TBuildGenesisBlock: + BuildGenesisBlock< + TBl, + BlockImportOperation = as sc_client_api::backend::Backend< + TBl, + >>::BlockImportOperation, + >, +{ + let keystore_container = KeystoreContainer::new(&config.keystore)?; + + let task_manager = { + let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); + TaskManager::new(config.tokio_handle.clone(), registry)? + }; + + let chain_spec = &config.chain_spec; + let fork_blocks = get_extension::>(chain_spec.extensions()) + .cloned() + .unwrap_or_default(); + + let bad_blocks = get_extension::>(chain_spec.extensions()) + .cloned() + .unwrap_or_default(); + + let client = { + let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( + None, + Arc::new(executor.clone()), + ); + + let wasm_runtime_substitutes = config + .chain_spec + .code_substitutes() + .into_iter() + .map(|(n, c)| { + let number = NumberFor::::from_str(&n).map_err(|_| { + Error::Application(Box::from(format!( + "Failed to parse `{}` as block number for code substitutes. \ + In an old version the key for code substitute was a block hash. \ + Please update the chain spec to a version that is compatible with your node.", + n + ))) + })?; + Ok((number, c)) + }) + .collect::, Error>>()?; + + let client = client::new_client( + backend.clone(), + executor, + genesis_block_builder, + fork_blocks, + bad_blocks, + extensions, + Box::new(task_manager.spawn_handle()), + config + .prometheus_config + .as_ref() + .map(|config| config.registry.clone()), + telemetry, + ClientConfig { + offchain_worker_enabled: config.offchain_worker.enabled, + offchain_indexing_api: config.offchain_worker.indexing_enabled, + wasmtime_precompiled: config.wasmtime_precompiled.clone(), + wasm_runtime_overrides: config.wasm_runtime_overrides.clone(), + no_genesis: matches!( + config.network.sync_mode, + SyncMode::LightState { .. } | SyncMode::Warp { .. } + ), + wasm_runtime_substitutes, + enable_import_proof_recording: false, + }, + lazy_loading_config, + )?; + + client + }; + + Ok((client, backend, keystore_container, task_manager)) +} + +/// Builds the PartialComponents for a lazy loading node. +#[allow(clippy::type_complexity)] +pub fn new_lazy_loading_partial( + config: &mut Configuration, + rpc_config: &RpcConfig, + lazy_loading_config: &LazyLoadingConfig, +) -> PartialComponentsResult, LazyLoadingBackend> +where + RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: RuntimeApiCollection, + Customizations: ClientCustomizations + 'static, +{ + set_prometheus_registry(config, rpc_config.no_prometheus_prefix)?; + + // Use ethereum style for subscription ids + config.rpc_id_provider = Some(Box::new(fc_rpc::EthereumSubIdProvider)); + + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let heap_pages = config + .default_heap_pages + .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { + extra_pages: h as _, + }); + let mut wasm_builder = WasmExecutor::builder() + .with_execution_method(config.wasm_method) + .with_onchain_heap_alloc_strategy(heap_pages) + .with_offchain_heap_alloc_strategy(heap_pages) + .with_ignore_onchain_heap_pages(true) + .with_max_runtime_instances(config.max_runtime_instances) + .with_runtime_cache_size(config.runtime_cache_size); + + if let Some(ref wasmtime_precompiled_path) = config.wasmtime_precompiled { + wasm_builder = wasm_builder.with_wasmtime_precompiled_path(wasmtime_precompiled_path); + } + + let executor = wasm_builder.build(); + + let (client, backend, keystore_container, task_manager) = + new_lazy_loading_parts::( + config, + lazy_loading_config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + + if let Some(block_number) = Customizations::first_block_number_compatible_with_ed25519_zebra() { + client + .execution_extensions() + .set_extensions_factory(sc_client_api::execution_extensions::ExtensionBeforeBlock::< + Block, + sp_io::UseDalekExt, + >::new(block_number)); + } + + let client = Arc::new(client); + + let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager + .spawn_handle() + .spawn("telemetry", None, worker.run()); + telemetry + }); + + let maybe_select_chain = Some(sc_consensus::LongestChain::new(backend.clone())); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); + + let filter_pool: Option = Some(Arc::new(Mutex::new(BTreeMap::new()))); + let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new())); + + let frontier_backend = Arc::new(open_frontier_backend(client.clone(), config, rpc_config)?); + let frontier_block_import = FrontierBlockImport::new(client.clone(), client.clone()); + + let create_inherent_data_providers = move |_, _| async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + // Create a dummy parachain inherent data provider which is required to pass + // the checks by the para chain system. We use dummy values because in the 'pending context' + // neither do we have access to the real values nor do we need them. + let (relay_parent_storage_root, relay_chain_state) = + RelayStateSproofBuilder::default().into_state_root_and_proof(); + let vfp = PersistedValidationData { + // This is a hack to make `cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases` + // happy. Relay parent number can't be bigger than u32::MAX. + relay_parent_number: u32::MAX, + relay_parent_storage_root, + ..Default::default() + }; + let parachain_inherent_data = ParachainInherentData { + validation_data: vfp, + relay_chain_state, + downward_messages: Default::default(), + horizontal_messages: Default::default(), + }; + Ok((time, parachain_inherent_data)) + }; + + let import_queue = nimbus_consensus::import_queue( + client.clone(), + frontier_block_import.clone(), + create_inherent_data_providers, + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + false, + )?; + let block_import = BlockImportPipeline::Dev(frontier_block_import); + + Ok(PartialComponents { + backend, + client, + import_queue, + keystore_container, + task_manager, + transaction_pool, + select_chain: maybe_select_chain, + other: ( + block_import, + filter_pool, + telemetry, + telemetry_worker_handle, + frontier_backend, + fee_history_cache, + ), + }) +} + +/// Builds a new lazy loading service. This service uses manual seal, and mocks +/// the parachain inherent. +#[sc_tracing::logging::prefix_logs_with("Lazy loading 🌗")] +pub async fn new_lazy_loading_service( + mut config: Configuration, + _author_id: Option, + sealing: moonbeam_cli_opt::Sealing, + rpc_config: RpcConfig, + lazy_loading_config: LazyLoadingConfig, + hwbench: Option, +) -> Result +where + RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, + RuntimeApi::RuntimeApi: RuntimeApiCollection, + Customizations: ClientCustomizations + 'static, + Net: NetworkBackend, +{ + use async_io::Timer; + use futures::Stream; + use sc_consensus_manual_seal::{run_manual_seal, EngineCommand, ManualSealParams}; + + let sc_service::PartialComponents { + client, + backend, + mut task_manager, + import_queue, + keystore_container, + select_chain: maybe_select_chain, + transaction_pool, + other: + ( + block_import_pipeline, + filter_pool, + mut telemetry, + _telemetry_worker_handle, + frontier_backend, + fee_history_cache, + ), + } = lazy_loading::new_lazy_loading_partial::( + &mut config, + &rpc_config, + &lazy_loading_config, + )?; + + let block_import = if let BlockImportPipeline::Dev(block_import) = block_import_pipeline { + block_import + } else { + return Err(ServiceError::Other( + "Block import pipeline is not dev".to_string(), + )); + }; + + let net_config = FullNetworkConfiguration::<_, _, Net>::new(&config.network); + + let metrics = Net::register_notification_metrics( + config.prometheus_config.as_ref().map(|cfg| &cfg.registry), + ); + + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + block_announce_validator_builder: None, + warp_sync_params: None, + net_config, + block_relay: None, + metrics, + })?; + + if config.offchain_worker.enabled { + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-work", + sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { + runtime_api_provider: client.clone(), + keystore: Some(keystore_container.keystore()), + offchain_db: backend.offchain_storage(), + transaction_pool: Some(OffchainTransactionPoolFactory::new( + transaction_pool.clone(), + )), + network_provider: Arc::new(network.clone()), + is_validator: config.role.is_authority(), + enable_http_requests: true, + custom_extensions: move |_| vec![], + }) + .run(client.clone(), task_manager.spawn_handle()) + .boxed(), + ); + } + + let prometheus_registry = config.prometheus_registry().cloned(); + let overrides = Arc::new(StorageOverrideHandler::new(client.clone())); + let fee_history_limit = rpc_config.fee_history_limit; + let mut command_sink = None; + let mut dev_rpc_data = None; + let collator = config.role.is_authority(); + + if collator { + let mut env = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), + ); + env.set_soft_deadline(SOFT_DEADLINE_PERCENT); + + let commands_stream: Box> + Send + Sync + Unpin> = + match sealing { + moonbeam_cli_opt::Sealing::Instant => { + Box::new( + // This bit cribbed from the implementation of instant seal. + transaction_pool + .pool() + .validated_pool() + .import_notification_stream() + .map(|_| EngineCommand::SealNewBlock { + create_empty: false, + finalize: false, + parent_hash: None, + sender: None, + }), + ) + } + moonbeam_cli_opt::Sealing::Manual => { + let (sink, stream) = futures::channel::mpsc::channel(1000); + // Keep a reference to the other end of the channel. It goes to the RPC. + command_sink = Some(sink); + Box::new(stream) + } + moonbeam_cli_opt::Sealing::Interval(millis) => Box::new(StreamExt::map( + Timer::interval(Duration::from_millis(millis)), + |_| EngineCommand::SealNewBlock { + create_empty: true, + finalize: false, + parent_hash: None, + sender: None, + }, + )), + }; + + let select_chain = maybe_select_chain.expect( + "`new_partial` builds a `LongestChainRule` when building dev service.\ + We specified the dev service when calling `new_partial`.\ + Therefore, a `LongestChainRule` is present. qed.", + ); + + let client_set_aside_for_cidp = client.clone(); + + // Create channels for mocked XCM messages. + let (downward_xcm_sender, downward_xcm_receiver) = flume::bounded::>(100); + let (hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec)>(100); + let additional_relay_offset = Arc::new(std::sync::atomic::AtomicU32::new(0)); + dev_rpc_data = Some(( + downward_xcm_sender, + hrmp_xcm_sender, + additional_relay_offset, + )); + + let client_clone = client.clone(); + let keystore_clone = keystore_container.keystore().clone(); + let maybe_provide_vrf_digest = + move |nimbus_id: NimbusId, parent: Hash| -> Option { + moonbeam_vrf::vrf_pre_digest::>( + &client_clone, + &keystore_clone, + nimbus_id, + parent, + ) + }; + + let parachain_id = helpers::get_parachain_id(backend.rpc_client.clone()) + .unwrap_or_else(|| panic!("Could not get parachain identifier for lazy loading mode.")); + + task_manager.spawn_essential_handle().spawn_blocking( + "authorship_task", + Some("block-authoring"), + run_manual_seal(ManualSealParams { + block_import, + env, + client: client.clone(), + pool: transaction_pool.clone(), + commands_stream, + select_chain, + consensus_data_provider: Some(Box::new(NimbusManualSealConsensusDataProvider { + keystore: keystore_container.keystore(), + client: client.clone(), + additional_digests_provider: maybe_provide_vrf_digest, + _phantom: Default::default(), + })), + create_inherent_data_providers: move |block: H256, ()| { + let maybe_current_para_block = client_set_aside_for_cidp.number(block); + let maybe_current_para_head = client_set_aside_for_cidp.expect_header(block); + let downward_xcm_receiver = downward_xcm_receiver.clone(); + let hrmp_xcm_receiver = hrmp_xcm_receiver.clone(); + + let client_for_cidp = client_set_aside_for_cidp.clone(); + async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + let current_para_block = maybe_current_para_block? + .ok_or(sp_blockchain::Error::UnknownBlock(block.to_string()))?; + + let current_para_block_head = Some(polkadot_primitives::HeadData( + maybe_current_para_head?.encode(), + )); + + let mut additional_key_values = vec![ + ( + moonbeam_core_primitives::well_known_relay_keys::TIMESTAMP_NOW + .to_vec(), + sp_timestamp::Timestamp::current().encode(), + ), + ( + relay_chain::well_known_keys::ACTIVE_CONFIG.to_vec(), + AbridgedHostConfiguration { + max_code_size: 3_145_728, + max_head_data_size: 20_480, + max_upward_queue_count: 174_762, + max_upward_queue_size: 1_048_576, + max_upward_message_size: 65_531, + max_upward_message_num_per_candidate: 16, + hrmp_max_message_num_per_candidate: 10, + validation_upgrade_cooldown: 14_400, + validation_upgrade_delay: 600, + async_backing_params: AsyncBackingParams { + max_candidate_depth: 3, + allowed_ancestry_len: 2, + }, + } + .encode(), + ), + ]; + + // If there is a pending upgrade, lets mimic a GoAhead + // signal from the relay + + let storage_key = [ + twox_128(b"ParachainSystem"), + twox_128(b"PendingValidationCode"), + ] + .concat(); + let has_pending_upgrade = client_for_cidp + .storage(block, &StorageKey(storage_key)) + .map_or(false, |ok| ok.map_or(false, |some| !some.0.is_empty())); + if has_pending_upgrade { + additional_key_values.push(( + relay_chain::well_known_keys::upgrade_go_ahead_signal(ParaId::new( + parachain_id, + )), + Some(UpgradeGoAhead::GoAhead).encode(), + )); + } + + let mocked_parachain = MockValidationDataInherentDataProvider { + current_para_block, + current_para_block_head, + relay_offset: 1000, + relay_blocks_per_para_block: 2, + // TODO: Recheck + para_blocks_per_relay_epoch: 10, + relay_randomness_config: (), + xcm_config: MockXcmConfig::new( + &*client_for_cidp, + block, + ParaId::new(parachain_id), + Default::default(), + ), + raw_downward_messages: downward_xcm_receiver.drain().collect(), + raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(), + additional_key_values: Some(additional_key_values), + }; + + let randomness = session_keys_primitives::InherentDataProvider; + + Ok((time, mocked_parachain, randomness)) + } + }, + }), + ); + } + + // Sinks for pubsub notifications. + // Everytime a new subscription is created, a new mpsc channel is added to the sink pool. + // The MappingSyncWorker sends through the channel on block import and the subscription emits a + // notification to the subscriber on receiving a message through this channel. + // This way we avoid race conditions when using native substrate block import notification + // stream. + let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks< + fc_mapping_sync::EthereumBlockNotification, + > = Default::default(); + let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); + + rpc::spawn_essential_tasks( + rpc::SpawnTasksParams { + task_manager: &task_manager, + client: client.clone(), + substrate_backend: backend.clone(), + frontier_backend: frontier_backend.clone(), + filter_pool: filter_pool.clone(), + overrides: overrides.clone(), + fee_history_limit, + fee_history_cache: fee_history_cache.clone(), + }, + sync_service.clone(), + pubsub_notification_sinks.clone(), + ); + let ethapi_cmd = rpc_config.ethapi.clone(); + let tracing_requesters = + if ethapi_cmd.contains(&EthApiCmd::Debug) || ethapi_cmd.contains(&EthApiCmd::Trace) { + rpc::tracing::spawn_tracing_tasks( + &rpc_config, + prometheus_registry.clone(), + rpc::SpawnTasksParams { + task_manager: &task_manager, + client: client.clone(), + substrate_backend: backend.clone(), + frontier_backend: frontier_backend.clone(), + filter_pool: filter_pool.clone(), + overrides: overrides.clone(), + fee_history_limit, + fee_history_cache: fee_history_cache.clone(), + }, + ) + } else { + rpc::tracing::RpcRequesters { + debug: None, + trace: None, + } + }; + + let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new( + task_manager.spawn_handle(), + overrides.clone(), + rpc_config.eth_log_block_cache, + rpc_config.eth_statuses_cache, + prometheus_registry, + )); + + let rpc_builder = { + let client = client.clone(); + let pool = transaction_pool.clone(); + let backend = backend.clone(); + let network = network.clone(); + let sync = sync_service.clone(); + let ethapi_cmd = ethapi_cmd.clone(); + let max_past_logs = rpc_config.max_past_logs; + let overrides = overrides.clone(); + let fee_history_cache = fee_history_cache.clone(); + let block_data_cache = block_data_cache.clone(); + let pubsub_notification_sinks = pubsub_notification_sinks.clone(); + + let keystore = keystore_container.keystore(); + let command_sink_for_task = command_sink.clone(); + move |deny_unsafe, subscription_task_executor| { + let deps = rpc::FullDeps { + backend: backend.clone(), + client: client.clone(), + command_sink: command_sink_for_task.clone(), + deny_unsafe, + ethapi_cmd: ethapi_cmd.clone(), + filter_pool: filter_pool.clone(), + frontier_backend: match *frontier_backend { + fc_db::Backend::KeyValue(ref b) => b.clone(), + fc_db::Backend::Sql(ref b) => b.clone(), + }, + graph: pool.pool().clone(), + pool: pool.clone(), + is_authority: collator, + max_past_logs, + fee_history_limit, + fee_history_cache: fee_history_cache.clone(), + network: network.clone(), + sync: sync.clone(), + dev_rpc_data: dev_rpc_data.clone(), + overrides: overrides.clone(), + block_data_cache: block_data_cache.clone(), + forced_parent_hashes: None, + }; + + let pending_consensus_data_provider = Box::new(PendingConsensusDataProvider::new( + client.clone(), + keystore.clone(), + )); + if ethapi_cmd.contains(&EthApiCmd::Debug) || ethapi_cmd.contains(&EthApiCmd::Trace) { + rpc::create_full( + deps, + subscription_task_executor, + Some(crate::rpc::TracingConfig { + tracing_requesters: tracing_requesters.clone(), + trace_filter_max_count: rpc_config.ethapi_trace_max_count, + }), + pubsub_notification_sinks.clone(), + pending_consensus_data_provider, + ) + .map_err(Into::into) + } else { + rpc::create_full( + deps, + subscription_task_executor, + None, + pubsub_notification_sinks.clone(), + pending_consensus_data_provider, + ) + .map_err(Into::into) + } + } + }; + + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network, + client, + keystore: keystore_container.keystore(), + task_manager: &mut task_manager, + transaction_pool, + rpc_builder: Box::new(rpc_builder), + backend, + system_rpc_tx, + sync_service: sync_service.clone(), + config, + tx_handler_controller, + telemetry: None, + })?; + + if let Some(hwbench) = hwbench { + sc_sysinfo::print_hwbench(&hwbench); + + if let Some(ref mut telemetry) = telemetry { + let telemetry_handle = telemetry.handle(); + task_manager.spawn_handle().spawn( + "telemetry_hwbench", + None, + sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench), + ); + } + } + + network_starter.start_network(); + + // If a manual seal channel exists, create the first block + if let Some(sink) = command_sink { + let _ = as ManualSealApiServer<_>>::create_block( + &ManualSeal::new(sink), + true, + false, + None, + ) + .await; + } + + log::info!("Service Ready"); + + Ok(task_manager) +} diff --git a/node/service/src/lazy_loading/state_overrides.rs b/node/service/src/lazy_loading/state_overrides.rs new file mode 100644 index 0000000000..12cb28bced --- /dev/null +++ b/node/service/src/lazy_loading/state_overrides.rs @@ -0,0 +1,206 @@ +// Copyright 2024 Moonbeam foundation +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +use crate::chain_spec::generate_accounts; +use moonbeam_core_primitives::Balance; +use parity_scale_codec::Encode; +use serde::Deserialize; +use sp_core::blake2_128; +use std::io::Read; +use std::path::PathBuf; + +#[derive(Deserialize, Debug, Clone)] +pub struct StateEntryConcrete { + pub(crate) pallet: String, + pub(crate) storage: String, + #[serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "serde_hex::deserialize_as_option", + default + )] + pub(crate) key: Option>, + #[serde(deserialize_with = "serde_hex::deserialize")] + pub(crate) value: Vec, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct StateEntryRaw { + #[serde(deserialize_with = "serde_hex::deserialize")] + pub(crate) key: Vec, + #[serde(deserialize_with = "serde_hex::deserialize")] + pub(crate) value: Vec, +} + +#[derive(Deserialize, Debug, Clone)] +#[serde(untagged)] +pub enum StateEntry { + Concrete(StateEntryConcrete), + Raw(StateEntryRaw), +} + +/// Mandatory state overrides that most exist when starting a node in lazy loading mode. +pub fn base_state_overrides(runtime_code: Option) -> Vec { + let mut overrides = vec![ + StateEntry::Concrete( + StateEntryConcrete { + pallet: "AuthorMapping".to_string(), + storage: "NimbusLookup".to_string(), + key: Some(hex_literal::hex!("9dfefc73f89d24437a9c2dce5572808af24ff3a9cf04c71dbc94d0b566f7a27b94566cac").to_vec()), // editorconfig-checker-disable-line + value: hex_literal::hex!("d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d").to_vec() // editorconfig-checker-disable-line + } + ), + StateEntry::Concrete( + StateEntryConcrete { + pallet: "AuthorMapping".to_string(), + storage: "MappingWithDeposit".to_string(), + key: Some(hex_literal::hex!("de1e86a9a8c739864cf3cc5ec2bea59fd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d").to_vec()), // editorconfig-checker-disable-line + value: hex_literal::hex!("f24ff3a9cf04c71dbc94d0b566f7a27b94566cac000010632d5ec76b0500000000000000d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d").to_vec() // editorconfig-checker-disable-line + } + ), + // Set candidate pool + StateEntry::Concrete( + StateEntryConcrete { + pallet: "ParachainStaking".to_string(), + storage: "CandidatePool".to_string(), + key: None, + value: hex_literal::hex!("04f24ff3a9cf04c71dbc94d0b566f7a27b94566cac0000a0dec5adc9353600000000000000").to_vec() // editorconfig-checker-disable-line + } + ), + // Set Alith as selected candidate + StateEntry::Concrete( + StateEntryConcrete { + pallet: "ParachainStaking".to_string(), + storage: "SelectedCandidates".to_string(), + key: None, + value: hex_literal::hex!("04f24ff3a9cf04c71dbc94d0b566f7a27b94566cac").to_vec() + } + ), + // AtStake + StateEntry::Concrete( + StateEntryConcrete { + pallet: "ParachainStaking".to_string(), + storage: "AtStake".to_string(), + key: Some(hex_literal::hex!("5153cb1f00942ff4010000004a6bb7c01d316509f24ff3a9cf04c71dbc94d0b566f7a27b94566cac").to_vec()), // editorconfig-checker-disable-line + value: hex_literal::hex!("0000a0dec5adc9353600000000000000000000a0dec5adc9353600000000000000").to_vec() // editorconfig-checker-disable-line + } + ), + // Reset SlotInfo + StateEntry::Concrete( + StateEntryConcrete { + pallet: "AsyncBacking".to_string(), + storage: "SlotInfo".to_string(), + key: None, + value: (1u64, 1u32).encode() + } + ), + // Reset LastRelayChainBlockNumber + StateEntry::Concrete( + StateEntryConcrete { + pallet: "ParachainSystem".to_string(), + storage: "LastRelayChainBlockNumber".to_string(), + key: None, + value: 0u32.encode() + } + ), + ]; + + // Default mnemonic if none was provided + let test_mnemonic = + "bottom drive obey lake curtain smoke basket hold race lonely fit walk".to_string(); + // Prefund the standard dev accounts + for address in generate_accounts(test_mnemonic, 6) { + overrides.push(StateEntry::Concrete(StateEntryConcrete { + pallet: "System".to_string(), + storage: "Account".to_string(), + key: Some( + [blake2_128(&address.0).as_slice(), address.0.as_slice()] + .concat() + .to_vec(), + ), + value: frame_system::AccountInfo { + nonce: 0u32, + consumers: 0, + providers: 1, + sufficients: 0, + data: pallet_balances::AccountData:: { + free: Balance::MAX, + reserved: Default::default(), + frozen: Default::default(), + flags: Default::default(), + }, + } + .encode(), + })) + } + + if let Some(path) = runtime_code { + let mut reader = std::fs::File::open(path.clone()) + .expect(format!("Could not open file {:?}", path).as_str()); + let mut data = vec![]; + reader + .read_to_end(&mut data) + .expect("Runtime code override invalid."); + + overrides.push(StateEntry::Raw(StateEntryRaw { + key: sp_core::storage::well_known_keys::CODE.to_vec(), + value: data.to_vec(), + })); + } + + overrides +} + +pub fn read(path: PathBuf) -> Result, String> { + let reader = std::fs::File::open(path).expect("Can open file"); + let state = serde_json::from_reader(reader).expect("Can parse state overrides JSON"); + + Ok(state) +} + +mod serde_hex { + use hex::FromHex; + use serde::{Deserialize, Deserializer}; + + fn sanitize(data: &str) -> &str { + if data.starts_with("0x") { + &data[2..] + } else { + data + } + } + + pub fn deserialize_as_option<'de, D, T>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + T: FromHex, + ::Error: std::fmt::Display + std::fmt::Debug, + { + Option::::deserialize(deserializer).map(|value| { + value.map(|data| FromHex::from_hex(sanitize(data.as_str())).expect("Invalid option")) + }) + } + + pub fn deserialize<'de, D, T>(deserializer: D) -> Result + where + D: Deserializer<'de>, + T: FromHex, + ::Error: std::fmt::Display + std::fmt::Debug, + { + String::deserialize(deserializer).map(|data| { + FromHex::from_hex(sanitize(data.as_str())).expect("Invalid hex encoded string") + }) + } +} diff --git a/node/service/src/lazy_loading/wasm_override.rs b/node/service/src/lazy_loading/wasm_override.rs new file mode 100644 index 0000000000..89890ef234 --- /dev/null +++ b/node/service/src/lazy_loading/wasm_override.rs @@ -0,0 +1,381 @@ +// Copyright 2024 Moonbeam foundation +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +//! # WASM Local Blob-Override +//! +//! WASM Local blob override provides tools to replace on-chain WASM with custom WASM. +//! These customized WASM blobs may include functionality that is not included in the +//! on-chain WASM, such as tracing or debugging information. This extra information is especially +//! useful in external scenarios, like exchanges or archive nodes. +//! +//! ## Usage +//! +//! WASM overrides may be enabled with the `--wasm-runtime-overrides` argument. The argument +//! expects a path to a directory that holds custom WASM. +//! +//! Any file ending in '.wasm' will be scraped and instantiated as a WASM blob. WASM can be built by +//! compiling the required runtime with the changes needed. For example, compiling a runtime with +//! tracing enabled would produce a WASM blob that can used. +//! +//! A custom WASM blob will override on-chain WASM if the spec version matches. If it is +//! required to overrides multiple runtimes, multiple WASM blobs matching each of the spec versions +//! needed must be provided in the given directory. + +use sc_executor::RuntimeVersionOf; +use sp_blockchain::Result; +use sp_core::traits::{FetchRuntimeCode, RuntimeCode, WrappedRuntimeCode}; +use sp_state_machine::BasicExternalities; +use sp_version::RuntimeVersion; +use std::{ + collections::{hash_map::DefaultHasher, HashMap}, + fs, + hash::Hasher as _, + path::{Path, PathBuf}, + time::{Duration, Instant}, +}; + +/// The interval in that we will print a warning when a wasm blob `spec_name` +/// doesn't match with the on-chain `spec_name`. +const WARN_INTERVAL: Duration = Duration::from_secs(30); + +/// Auxiliary structure that holds a wasm blob and its hash. +#[derive(Debug)] +struct WasmBlob { + /// The actual wasm blob, aka the code. + code: Vec, + /// The hash of [`Self::code`]. + hash: Vec, + /// The path where this blob was found. + path: PathBuf, + /// The runtime version of this blob. + version: RuntimeVersion, + /// When was the last time we have warned about the wasm blob having + /// a wrong `spec_name`? + last_warn: parking_lot::Mutex>, +} + +impl WasmBlob { + fn new(code: Vec, hash: Vec, path: PathBuf, version: RuntimeVersion) -> Self { + Self { + code, + hash, + path, + version, + last_warn: Default::default(), + } + } + + fn runtime_code(&self, heap_pages: Option) -> RuntimeCode { + RuntimeCode { + code_fetcher: self, + hash: self.hash.clone(), + heap_pages, + } + } +} + +/// Make a hash out of a byte string using the default rust hasher +fn make_hash(val: &K) -> Vec { + let mut state = DefaultHasher::new(); + val.hash(&mut state); + state.finish().to_le_bytes().to_vec() +} + +impl FetchRuntimeCode for WasmBlob { + fn fetch_runtime_code(&self) -> Option> { + Some(self.code.as_slice().into()) + } +} + +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum WasmOverrideError { + #[error("Failed to get runtime version: {0}")] + VersionInvalid(String), + + #[error("WASM override IO error")] + Io(PathBuf, #[source] std::io::Error), + + #[error("Overwriting WASM requires a directory where local \ + WASM is stored. {} is not a directory", .0.display())] + NotADirectory(PathBuf), + + #[error("Duplicate WASM Runtimes found: \n{}\n", .0.join("\n") )] + DuplicateRuntime(Vec), +} + +impl From for sp_blockchain::Error { + fn from(err: WasmOverrideError) -> Self { + Self::Application(Box::new(err)) + } +} + +/// Scrapes WASM from a folder and returns WASM from that folder +/// if the runtime spec version matches. +#[derive(Debug)] +pub struct WasmOverride { + // Map of runtime spec version -> Wasm Blob + overrides: HashMap, +} + +impl WasmOverride { + pub fn new(path: P, executor: &E) -> Result + where + P: AsRef, + E: RuntimeVersionOf, + { + let overrides = Self::scrape_overrides(path.as_ref(), executor)?; + Ok(Self { overrides }) + } + + /// Gets an override by it's runtime spec version. + /// + /// Returns `None` if an override for a spec version does not exist. + pub fn get<'a, 'b: 'a>( + &'b self, + spec: &u32, + pages: Option, + spec_name: &str, + ) -> Option<(RuntimeCode<'a>, RuntimeVersion)> { + self.overrides.get(spec).and_then(|w| { + if spec_name == &*w.version.spec_name { + Some((w.runtime_code(pages), w.version.clone())) + } else { + let mut last_warn = w.last_warn.lock(); + let now = Instant::now(); + + if last_warn.map_or(true, |l| l + WARN_INTERVAL <= now) { + *last_warn = Some(now); + + tracing::warn!( + target = "wasm_overrides", + on_chain_spec_name = %spec_name, + override_spec_name = %w.version, + spec_version = %spec, + wasm_file = %w.path.display(), + "On chain and override `spec_name` do not match! Ignoring override.", + ); + } + + None + } + }) + } + + /// Scrapes a folder for WASM runtimes. + /// Returns a hashmap of the runtime version and wasm runtime code. + fn scrape_overrides(dir: &Path, executor: &E) -> Result> + where + E: RuntimeVersionOf, + { + let handle_err = |e: std::io::Error| -> sp_blockchain::Error { + WasmOverrideError::Io(dir.to_owned(), e).into() + }; + + if !dir.is_dir() { + return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()); + } + + let mut overrides = HashMap::new(); + let mut duplicates = Vec::new(); + for entry in fs::read_dir(dir).map_err(handle_err)? { + let entry = entry.map_err(handle_err)?; + let path = entry.path(); + if let Some("wasm") = path.extension().and_then(|e| e.to_str()) { + let code = fs::read(&path).map_err(handle_err)?; + let code_hash = make_hash(&code); + let version = Self::runtime_version(executor, &code, &code_hash, Some(128))?; + tracing::info!( + target: "wasm_overrides", + version = %version, + file = %path.display(), + "Found wasm override.", + ); + + let wasm = WasmBlob::new(code, code_hash, path.clone(), version.clone()); + + if let Some(other) = overrides.insert(version.spec_version, wasm) { + tracing::info!( + target: "wasm_overrides", + first = %other.path.display(), + second = %path.display(), + %version, + "Found duplicate spec version for runtime.", + ); + duplicates.push(path.display().to_string()); + } + } + } + + if !duplicates.is_empty() { + return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()); + } + + Ok(overrides) + } + + fn runtime_version( + executor: &E, + code: &[u8], + code_hash: &[u8], + heap_pages: Option, + ) -> Result + where + E: RuntimeVersionOf, + { + let mut ext = BasicExternalities::default(); + executor + .runtime_version( + &mut ext, + &RuntimeCode { + code_fetcher: &WrappedRuntimeCode(code.into()), + heap_pages, + hash: code_hash.into(), + }, + ) + .map_err(|e| WasmOverrideError::VersionInvalid(e.to_string()).into()) + } +} + +/// Returns a WasmOverride struct filled with dummy data for testing. +#[cfg(test)] +pub fn dummy_overrides() -> WasmOverride { + let version = RuntimeVersion { + spec_name: "test".into(), + ..Default::default() + }; + let mut overrides = HashMap::new(); + overrides.insert( + 0, + WasmBlob::new( + vec![0, 0, 0, 0, 0, 0, 0, 0], + vec![0], + PathBuf::new(), + version.clone(), + ), + ); + overrides.insert( + 1, + WasmBlob::new( + vec![1, 1, 1, 1, 1, 1, 1, 1], + vec![1], + PathBuf::new(), + version.clone(), + ), + ); + overrides.insert( + 2, + WasmBlob::new( + vec![2, 2, 2, 2, 2, 2, 2, 2], + vec![2], + PathBuf::new(), + version, + ), + ); + + WasmOverride { overrides } +} + +#[cfg(test)] +mod tests { + use super::*; + use sc_executor::{HeapAllocStrategy, NativeElseWasmExecutor, WasmExecutor}; + use std::fs::{self, File}; + use substrate_test_runtime_client::LocalExecutorDispatch; + + fn executor() -> NativeElseWasmExecutor { + NativeElseWasmExecutor::::new_with_wasm_executor( + WasmExecutor::builder() + .with_onchain_heap_alloc_strategy(HeapAllocStrategy::Static { extra_pages: 128 }) + .with_offchain_heap_alloc_strategy(HeapAllocStrategy::Static { extra_pages: 128 }) + .with_max_runtime_instances(1) + .with_runtime_cache_size(2) + .build(), + ) + } + + fn wasm_test(fun: F) + where + F: Fn(&Path, &[u8], &NativeElseWasmExecutor), + { + let exec = executor(); + let bytes = substrate_test_runtime::wasm_binary_unwrap(); + let dir = tempfile::tempdir().expect("Create a temporary directory"); + fun(dir.path(), bytes, &exec); + dir.close().expect("Temporary Directory should close"); + } + + #[test] + fn should_get_runtime_version() { + let executor = executor(); + + let version = WasmOverride::runtime_version( + &executor, + substrate_test_runtime::wasm_binary_unwrap(), + &[1], + Some(128), + ) + .expect("should get the `RuntimeVersion` of the test-runtime wasm blob"); + assert_eq!(version.spec_version, 2); + } + + #[test] + fn should_scrape_wasm() { + wasm_test(|dir, wasm_bytes, exec| { + fs::write(dir.join("test.wasm"), wasm_bytes).expect("Create test file"); + let overrides = + WasmOverride::scrape_overrides(dir, exec).expect("HashMap of u32 and WasmBlob"); + let wasm = overrides.get(&2).expect("WASM binary"); + assert_eq!( + wasm.code, + substrate_test_runtime::wasm_binary_unwrap().to_vec() + ) + }); + } + + #[test] + fn should_check_for_duplicates() { + wasm_test(|dir, wasm_bytes, exec| { + fs::write(dir.join("test0.wasm"), wasm_bytes).expect("Create test file"); + fs::write(dir.join("test1.wasm"), wasm_bytes).expect("Create test file"); + let scraped = WasmOverride::scrape_overrides(dir, exec); + + match scraped { + Err(sp_blockchain::Error::Application(e)) => { + match e.downcast_ref::() { + Some(WasmOverrideError::DuplicateRuntime(duplicates)) => { + assert_eq!(duplicates.len(), 1); + } + _ => panic!("Test should end with Msg Error Variant"), + } + } + _ => panic!("Test should end in error"), + } + }); + } + + #[test] + fn should_ignore_non_wasm() { + wasm_test(|dir, wasm_bytes, exec| { + File::create(dir.join("README.md")).expect("Create test file"); + File::create(dir.join("LICENSE")).expect("Create a test file"); + fs::write(dir.join("test0.wasm"), wasm_bytes).expect("Create test file"); + let scraped = + WasmOverride::scrape_overrides(dir, exec).expect("HashMap of u32 and WasmBlob"); + assert_eq!(scraped.len(), 1); + }); + } +} diff --git a/node/service/src/lazy_loading/wasm_substitutes.rs b/node/service/src/lazy_loading/wasm_substitutes.rs new file mode 100644 index 0000000000..e2d5f36c81 --- /dev/null +++ b/node/service/src/lazy_loading/wasm_substitutes.rs @@ -0,0 +1,174 @@ +// Copyright 2024 Moonbeam foundation +// This file is part of Moonbeam. + +// Moonbeam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Moonbeam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Moonbeam. If not, see . + +//! # WASM substitutes + +use sc_client_api::backend; +use sc_executor::RuntimeVersionOf; +use sp_blockchain::{HeaderBackend, Result}; +use sp_core::traits::{FetchRuntimeCode, RuntimeCode, WrappedRuntimeCode}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_state_machine::BasicExternalities; +use sp_version::RuntimeVersion; +use std::{ + collections::{hash_map::DefaultHasher, HashMap}, + hash::Hasher as _, + sync::Arc, +}; + +/// A wasm substitute for the on chain wasm. +#[derive(Debug)] +struct WasmSubstitute { + code: Vec, + hash: Vec, + /// The block number on which we should start using the substitute. + block_number: NumberFor, + version: RuntimeVersion, +} + +impl WasmSubstitute { + fn new(code: Vec, block_number: NumberFor, version: RuntimeVersion) -> Self { + let hash = make_hash(&code); + Self { + code, + hash, + block_number, + version, + } + } + + fn runtime_code(&self, heap_pages: Option) -> RuntimeCode { + RuntimeCode { + code_fetcher: self, + hash: self.hash.clone(), + heap_pages, + } + } + + /// Returns `true` when the substitute matches for the given `hash`. + fn matches( + &self, + hash: ::Hash, + backend: &impl backend::Backend, + ) -> bool { + let requested_block_number = backend.blockchain().number(hash).ok().flatten(); + + Some(self.block_number) <= requested_block_number + } +} + +/// Make a hash out of a byte string using the default rust hasher +fn make_hash(val: &K) -> Vec { + let mut state = DefaultHasher::new(); + val.hash(&mut state); + state.finish().to_le_bytes().to_vec() +} + +impl FetchRuntimeCode for WasmSubstitute { + fn fetch_runtime_code(&self) -> Option> { + Some(self.code.as_slice().into()) + } +} + +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum WasmSubstituteError { + #[error("Failed to get runtime version: {0}")] + VersionInvalid(String), +} + +impl From for sp_blockchain::Error { + fn from(err: WasmSubstituteError) -> Self { + Self::Application(Box::new(err)) + } +} + +/// Substitutes the on-chain wasm with some hard coded blobs. +#[derive(Debug)] +pub struct WasmSubstitutes { + /// spec_version -> WasmSubstitute + substitutes: Arc>>, + executor: Executor, + backend: Arc, +} + +impl Clone for WasmSubstitutes { + fn clone(&self) -> Self { + Self { + substitutes: self.substitutes.clone(), + executor: self.executor.clone(), + backend: self.backend.clone(), + } + } +} + +impl WasmSubstitutes +where + Executor: RuntimeVersionOf + Clone + 'static, + Backend: backend::Backend, + Block: BlockT, +{ + /// Create a new instance. + pub fn new( + substitutes: HashMap, Vec>, + executor: Executor, + backend: Arc, + ) -> Result { + let substitutes = substitutes + .into_iter() + .map(|(block_number, code)| { + let runtime_code = RuntimeCode { + code_fetcher: &WrappedRuntimeCode((&code).into()), + heap_pages: None, + hash: make_hash(&code), + }; + let version = Self::runtime_version(&executor, &runtime_code)?; + let spec_version = version.spec_version; + + let substitute = WasmSubstitute::new(code, block_number, version); + + Ok((spec_version, substitute)) + }) + .collect::>>()?; + + Ok(Self { + executor, + substitutes: Arc::new(substitutes), + backend, + }) + } + + /// Get a substitute. + /// + /// Returns `None` if there isn't any substitute required. + pub fn get( + &self, + spec: u32, + pages: Option, + hash: Block::Hash, + ) -> Option<(RuntimeCode<'_>, RuntimeVersion)> { + let s = self.substitutes.get(&spec)?; + s.matches(hash, &*self.backend) + .then(|| (s.runtime_code(pages), s.version.clone())) + } + + fn runtime_version(executor: &Executor, code: &RuntimeCode) -> Result { + let mut ext = BasicExternalities::default(); + executor + .runtime_version(&mut ext, code) + .map_err(|e| WasmSubstituteError::VersionInvalid(e.to_string()).into()) + } +} diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index bb2a6f0fb8..98c257d96e 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -68,6 +68,7 @@ use sc_service::{ }; use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use session_keys_primitives::VrfApi; use sp_api::{ConstructRuntimeApi, ProvideRuntimeApi}; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_consensus::SyncOracle; @@ -81,28 +82,29 @@ use substrate_prometheus_endpoint::Registry; pub use client::*; pub mod chain_spec; mod client; +#[cfg(feature = "lazy-loading")] +pub mod lazy_loading; type FullClient = TFullClient>; type FullBackend = TFullBackend; -type MaybeSelectChain = Option>; -type FrontierBlockImport = - TFrontierBlockImport>, FullClient>; -type ParachainBlockImport = - TParachainBlockImport, FullBackend>; -type PartialComponentsResult = Result< +type MaybeSelectChain = Option>; +type FrontierBlockImport = TFrontierBlockImport, Client>; +type ParachainBlockImport = + TParachainBlockImport, Backend>; +type PartialComponentsResult = Result< PartialComponents< - FullClient, - FullBackend, - MaybeSelectChain, + Client, + Backend, + MaybeSelectChain, sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool>, + sc_transaction_pool::FullPool, ( - BlockImportPipeline, ParachainBlockImport>, + BlockImportPipeline, ParachainBlockImport>, Option, Option, Option, - Arc>>, + Arc>, FeeHistoryCache, ), >, @@ -419,7 +421,7 @@ pub fn new_partial( config: &mut Configuration, rpc_config: &RpcConfig, dev_service: bool, -) -> PartialComponentsResult +) -> PartialComponentsResult, FullBackend> where RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, RuntimeApi::RuntimeApi: RuntimeApiCollection, @@ -938,7 +940,7 @@ fn start_consensus( async_backing: bool, backend: Arc, client: Arc>, - block_import: ParachainBlockImport, + block_import: ParachainBlockImport, FullBackend>, prometheus_registry: Option<&Registry>, telemetry: Option, task_manager: &TaskManager, @@ -957,8 +959,7 @@ fn start_consensus( where RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, RuntimeApi::RuntimeApi: RuntimeApiCollection, - sc_client_api::StateBackendFor, Block>: - sc_client_api::StateBackend, + sc_client_api::StateBackendFor: sc_client_api::StateBackend, SO: SyncOracle + Send + Sync + Clone + 'static, { let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( @@ -1772,29 +1773,29 @@ mod tests { } } -struct PendingConsensusDataProvider +struct PendingConsensusDataProvider where - RuntimeApi: Send + Sync, + Client: HeaderBackend + sp_api::ProvideRuntimeApi + Send + Sync, + Client::Api: VrfApi, { - client: Arc>, + client: Arc, keystore: Arc, } -impl PendingConsensusDataProvider +impl PendingConsensusDataProvider where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: RuntimeApiCollection, + Client: HeaderBackend + sp_api::ProvideRuntimeApi + Send + Sync, + Client::Api: VrfApi, { - pub fn new(client: Arc>, keystore: Arc) -> Self { + pub fn new(client: Arc, keystore: Arc) -> Self { Self { client, keystore } } } -impl fc_rpc::pending::ConsensusDataProvider - for PendingConsensusDataProvider +impl fc_rpc::pending::ConsensusDataProvider for PendingConsensusDataProvider where - RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, - RuntimeApi::RuntimeApi: RuntimeApiCollection, + Client: HeaderBackend + sp_api::ProvideRuntimeApi + Send + Sync, + Client::Api: VrfApi, { fn create_digest( &self, diff --git a/test/configs/lazyLoadingStateOverrides.json b/test/configs/lazyLoadingStateOverrides.json new file mode 100644 index 0000000000..fe51488c70 --- /dev/null +++ b/test/configs/lazyLoadingStateOverrides.json @@ -0,0 +1 @@ +[] diff --git a/test/moonwall.config.json b/test/moonwall.config.json index ffd18397f9..c43a9a55db 100644 --- a/test/moonwall.config.json +++ b/test/moonwall.config.json @@ -345,6 +345,51 @@ } ] }, + { + "name": "lazy_loading_moonbeam", + "testFileDir": ["suites/lazy-loading"], + "include": ["**/*test*"], + "timeout": 180000, + "contracts": "contracts/", + "runScripts": [ + "compile-contracts.ts compile", + "compile-wasm.ts compile -b ../target/release/moonbeam -o wasm -c moonbeam-dev", + "prepare-lazy-loading-overrides.ts process configs/lazyLoadingStateOverrides.json tmp/lazyLoadingStateOverrides.json ../target/release/wbuild/moonbeam-runtime/moonbeam_runtime.compact.compressed.wasm" + ], + "multiThreads": 4, + "envVars": ["DEBUG_COLORS=1"], + "reporters": ["basic", "html", "json"], + "reportFile": { + "json": "./tmp/testResultsLazyLoadingMoonbeam.json" + }, + "foundation": { + "type": "dev", + "launchSpec": [ + { + "name": "moonbeam", + "binPath": "../target/release/lazy-loading", + "newRpcBehaviour": true, + "options": [ + "--ethapi=txpool", + "--no-hardware-benchmarks", + "--no-telemetry", + "--wasmtime-precompiled=wasm", + "--unsafe-force-node-key-generation", + "--reserved-only", + "--no-grandpa", + "--no-prometheus", + "--force-authoring", + "--rpc-cors=all", + "--alice", + "--sealing=manual", + "--tmp", + "--fork-chain-from-rpc=https://moonbeam.unitedbloc.com", + "--fork-state-overrides=tmp/lazyLoadingStateOverrides.json" + ] + } + ] + } + }, { "name": "dev_moonbeam", "testFileDir": ["suites/dev/moonbeam", "suites/dev/common"], diff --git a/test/scripts/prepare-lazy-loading-overrides.ts b/test/scripts/prepare-lazy-loading-overrides.ts new file mode 100644 index 0000000000..8a570c0a1b --- /dev/null +++ b/test/scripts/prepare-lazy-loading-overrides.ts @@ -0,0 +1,73 @@ +import fs from "fs/promises"; +import yargs from "yargs"; +import { hideBin } from "yargs/helpers"; +import { convertExponentials } from "@zombienet/utils"; +import { u8aConcat, u8aToHex } from "@polkadot/util"; +import { blake2AsHex, xxhashAsU8a } from "@polkadot/util-crypto"; +import jsonBg from "json-bigint"; + +const JSONbig = jsonBg({ useNativeBigInt: true }); + +yargs(hideBin(process.argv)) + .usage("Usage: $0") + .version("2.0.0") + .command( + "process ", + "Preapproves a runtime blob into a raw spec for easy upgrade", + (yargs) => { + return yargs + .positional("inputPath", { + describe: "Input path for plainSpecFile to modify", + type: "string", + }) + .positional("outputPath", { + describe: "Output path for modified file", + type: "string", + }) + .positional("runtimePath", { + describe: "Input path for runtime blob to ", + type: "string", + }); + }, + async (argv) => { + if (!argv.inputPath) { + throw new Error("Input path is required"); + } + + if (!argv.outputPath) { + throw new Error("Output path is required"); + } + + if (!argv.runtimePath) { + throw new Error("Runtime path is required"); + } + + process.stdout.write(`Reading from: ${argv.runtimePath} ...`); + const runtimeBlob = await fs.readFile(argv.runtimePath); + process.stdout.write("Done ✅\n"); + + const runtimeHash = blake2AsHex(runtimeBlob); + process.stdout.write(`Runtime hash: ${runtimeHash}\n`); + + process.stdout.write(`Reading from: ${argv.inputPath} ...`); + const localRaw = JSONbig.parse((await fs.readFile(argv.inputPath)).toString()); + process.stdout.write("Done ✅\n"); + + const storageKey = u8aToHex( + u8aConcat(xxhashAsU8a("System", 128), xxhashAsU8a("AuthorizedUpgrade", 128)) + ); + + localRaw.push({ + key: storageKey, + value: `${runtimeHash}01`, // 01 sets RT version check = true + }); + + process.stdout.write(`Writing to: ${argv.outputPath} ...`); + await fs.writeFile( + argv.outputPath, + convertExponentials(JSONbig.stringify(localRaw, null, 3)) + ); + process.stdout.write("Done ✅\n"); + } + ) + .parse(); diff --git a/test/suites/lazy-loading/test-runtime-upgrade.ts b/test/suites/lazy-loading/test-runtime-upgrade.ts new file mode 100644 index 0000000000..9f6770186b --- /dev/null +++ b/test/suites/lazy-loading/test-runtime-upgrade.ts @@ -0,0 +1,92 @@ +import "@moonbeam-network/api-augment"; +import { beforeAll, describeSuite, expect } from "@moonwall/cli"; +import { RUNTIME_CONSTANTS } from "../../helpers"; +import { ApiPromise } from "@polkadot/api"; +import fs from "fs/promises"; +import { u8aToHex } from "@polkadot/util"; + +describeSuite({ + id: "LD01", + title: "Lazy Loading - Runtime Upgrade", + foundationMethods: "dev", + testCases: ({ it, context, log }) => { + let api: ApiPromise; + + beforeAll(async () => { + api = context.polkadotJs(); + + const runtimeChain = api.runtimeChain.toUpperCase(); + const runtime = runtimeChain + .split(" ") + .filter((v) => Object.keys(RUNTIME_CONSTANTS).includes(v)) + .join() + .toLowerCase(); + const wasmPath = `../target/release/wbuild/${runtime}-runtime/${runtime}_runtime.compact.compressed.wasm`; // editorconfig-checker-disable-line + + const runtimeWasmHex = u8aToHex(await fs.readFile(wasmPath)); + + const rtBefore = api.consts.system.version.specVersion.toNumber(); + log("Current runtime:", rtBefore); + log("About to upgrade to runtime at:", wasmPath); + + await context.createBlock([], { finalize: false }); + const { result } = await context.createBlock( + api.tx.system.applyAuthorizedUpgrade(runtimeWasmHex), + { finalize: false } + ); + const errors = result.events + // find/filter for failed events + .filter(({ event }) => api.events.system.ExtrinsicFailed.is(event)) + // we know that data for system.ExtrinsicFailed is + // (DispatchError, DispatchInfo) + .map( + ({ + event: { + data: [error, info], + }, + }) => { + if (error.isModule) { + // for module errors, we have the section indexed, lookup + const decoded = api.registry.findMetaError(error.asModule); + const { docs, method, section } = decoded; + + return `${section}.${method}: ${docs.join(" ")}`; + } else { + // Other, CannotLookup, BadOrigin, no extra info + return error.toString(); + } + } + ); + + if (errors.length) { + throw new Error(`Could not upgrade runtime. \nErrors:\n\n\t- ${errors.join("\n\t-")}\n`); + } + + // This next block will receive the GoAhead signal + await context.createBlock([], { finalize: false }); + // The next block will process the runtime upgrade + await context.createBlock([], { finalize: false }); + + const events = (await api.query.system.events()).filter(({ event }) => + api.events.migrations.RuntimeUpgradeCompleted.is(event) + ); + expect(events.length > 0, "Migrations should complete").to.be.true; + + const rtAfter = api.consts.system.version.specVersion.toNumber(); + log(`RT upgrade has increased specVersion from ${rtBefore} to ${rtAfter}`); + + expect(rtBefore).to.be.not.equal(rtAfter, "Runtime upgrade failed"); + + const specName = api.consts.system.version.specName.toString(); + log(`Currently connected to chain: ${specName}`); + }); + + it({ + id: "T01", + title: "Validate new applied runtime", + test: async function () { + // TODO + }, + }); + }, +});