From 30e29c4c2d0fd04c9199f9158adb9d7b5b4a751e Mon Sep 17 00:00:00 2001
From: Rodrigo Quelhas <22591718+RomarQ@users.noreply.github.com>
Date: Wed, 11 Sep 2024 11:52:53 +0100
Subject: [PATCH] Lazy loading (#2840)
* feat: implement lazy loading backend and call executor
experiments
* code cleanup
* add lazy loading chain spec
* feat(lazy-loading): replace async-std with tokio and retry failed requests
* remove local polkadot-sdk dependencies
* fix Cargo.toml
* chore: update Cargo.lock file
* fix editor config errors
* fix editor config errors
* fix build
* fix lazy-loading feature
* cleanup
* reset AsyncBacking > SlotInfo
* test: add lazy loading tests
* remove parent field
* remove files
* test: fix formatting
* fix(ci): lazy loading test
* fix test
* fix format
* test: move lazy loading state overrides to configs folder
* fix build and add mandatory storage override
* fix pipeline
* add state overrides for dev accounts
* fix build
* remove redundant test
* fix remark from review
* improve lazy loading test
---
.../workflow-templates/cargo-build/action.yml | 3 +
.github/workflows/build.yml | 57 +
Cargo.lock | 71 +-
Cargo.toml | 6 +-
node/Cargo.toml | 2 +
node/cli-opt/src/lib.rs | 10 +
node/cli/Cargo.toml | 2 +
node/cli/src/cli.rs | 22 +
node/cli/src/command.rs | 125 +-
node/service/Cargo.toml | 15 +-
node/service/src/chain_spec/test_spec.rs | 63 +
node/service/src/externalities.rs | 872 +++++++++
node/service/src/lazy_loading/backend.rs | 1626 +++++++++++++++++
.../service/src/lazy_loading/call_executor.rs | 346 ++++
node/service/src/lazy_loading/client.rs | 80 +
node/service/src/lazy_loading/helpers.rs | 107 ++
node/service/src/lazy_loading/mod.rs | 820 +++++++++
.../src/lazy_loading/state_overrides.rs | 206 +++
.../service/src/lazy_loading/wasm_override.rs | 381 ++++
.../src/lazy_loading/wasm_substitutes.rs | 174 ++
node/service/src/lib.rs | 55 +-
test/configs/lazyLoadingStateOverrides.json | 1 +
test/moonwall.config.json | 45 +
.../scripts/prepare-lazy-loading-overrides.ts | 73 +
.../lazy-loading/test-runtime-upgrade.ts | 92 +
25 files changed, 5176 insertions(+), 78 deletions(-)
create mode 100644 node/service/src/externalities.rs
create mode 100644 node/service/src/lazy_loading/backend.rs
create mode 100644 node/service/src/lazy_loading/call_executor.rs
create mode 100644 node/service/src/lazy_loading/client.rs
create mode 100644 node/service/src/lazy_loading/helpers.rs
create mode 100644 node/service/src/lazy_loading/mod.rs
create mode 100644 node/service/src/lazy_loading/state_overrides.rs
create mode 100644 node/service/src/lazy_loading/wasm_override.rs
create mode 100644 node/service/src/lazy_loading/wasm_substitutes.rs
create mode 100644 test/configs/lazyLoadingStateOverrides.json
create mode 100644 test/scripts/prepare-lazy-loading-overrides.ts
create mode 100644 test/suites/lazy-loading/test-runtime-upgrade.ts
diff --git a/.github/workflow-templates/cargo-build/action.yml b/.github/workflow-templates/cargo-build/action.yml
index dd9cf28ca0..a83d56b7db 100644
--- a/.github/workflow-templates/cargo-build/action.yml
+++ b/.github/workflow-templates/cargo-build/action.yml
@@ -48,6 +48,8 @@ runs:
params="$params --features ${{ inputs.features }}"
fi
echo "cargo build $params"
+ cargo build $params --features lazy-loading
+ cp target/release/moonbeam target/release/lazy-loading
cargo build $params
- name: Display binary comments
shell: bash
@@ -74,3 +76,4 @@ runs:
run: |
mkdir -p build
cp target/release/moonbeam build/moonbeam;
+ cp target/release/lazy-loading build/lazy-loading;
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index ce90f83fb2..234da2db29 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -700,6 +700,63 @@ jobs:
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.licenses=${{ github.event.repository.license.spdx_id }}
+ lazy-loading-tests:
+ runs-on:
+ labels: bare-metal
+ needs: ["set-tags", "build"]
+ strategy:
+ fail-fast: false
+ matrix:
+ chain: ["moonbeam"]
+ env:
+ GH_WORKFLOW_MATRIX_CHAIN: ${{ matrix.chain }}
+ DEBUG_COLORS: 1
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ needs.set-tags.outputs.git_ref }}
+ - uses: pnpm/action-setup@v4
+ with:
+ version: 8
+ - uses: actions/setup-node@v4
+ with:
+ node-version: 20.10.0
+ - name: Create local folders
+ run: |
+ mkdir -p target/release/wbuild/${{ matrix.chain }}-runtime/
+ mkdir -p test/tmp
+ - name: "Download branch built runtime"
+ uses: actions/download-artifact@v4
+ with:
+ name: runtimes
+ path: target/release/wbuild/${{ matrix.chain }}-runtime/
+ - name: "Download branch built node"
+ uses: actions/download-artifact@v4
+ with:
+ name: moonbeam
+ path: target/release
+ - name: "Run lazy loading tests"
+ run: |
+ cd test
+ pnpm install
+ chmod uog+x ../target/release/lazy-loading
+ pnpm moonwall test lazy_loading_${{ matrix.chain }}
+ - name: Zip and Upload Node Logs on Failure
+ if: failure()
+ run: |
+ TIMESTAMP=$(date +%Y%m%d%H%M%S)
+ export NODE_LOGS_ZIP="node_logs_$TIMESTAMP.zip"
+ MOST_RECENT_ZOMBIE_DIR=$(ls -td /tmp/zombie-* | head -n 1)
+ find $MOST_RECENT_ZOMBIE_DIR -maxdepth 1 -type f -name '*.log' -exec zip -r $NODE_LOGS_ZIP {} \;
+ echo "NODE_LOGS_ZIP=${NODE_LOGS_ZIP}" >> $GITHUB_ENV
+ - uses: actions/upload-artifact@v4
+ if: failure()
+ with:
+ name: failed-node-logs
+ path: ${{ env.NODE_LOGS_ZIP }}
+
chopsticks-upgrade-test:
runs-on:
labels: bare-metal
diff --git a/Cargo.lock b/Cargo.lock
index a123c86280..60f774d72d 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1587,7 +1587,7 @@ dependencies = [
[[package]]
name = "common"
version = "0.1.0"
-source = "git+https://github.com/w3f/ring-proof#b273d33f9981e2bb3375ab45faeb537f7ee35224"
+source = "git+https://github.com/w3f/ring-proof#665f5f51af5734c7b6d90b985dd6861d4c5b4752"
dependencies = [
"ark-ec",
"ark-ff",
@@ -5054,6 +5054,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfdb12a2381ea5b2e68c3469ec604a007b367778cdb14d09612c8069ebd616ad"
dependencies = [
"jsonrpsee-core",
+ "jsonrpsee-http-client",
"jsonrpsee-proc-macros",
"jsonrpsee-server",
"jsonrpsee-types",
@@ -5108,6 +5109,26 @@ dependencies = [
"tracing",
]
+[[package]]
+name = "jsonrpsee-http-client"
+version = "0.22.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ccf93fc4a0bfe05d851d37d7c32b7f370fe94336b52a2f0efc5f1981895c2e5"
+dependencies = [
+ "async-trait",
+ "hyper",
+ "hyper-rustls",
+ "jsonrpsee-core",
+ "jsonrpsee-types",
+ "serde",
+ "serde_json",
+ "thiserror",
+ "tokio",
+ "tower",
+ "tracing",
+ "url",
+]
+
[[package]]
name = "jsonrpsee-proc-macros"
version = "0.22.5"
@@ -7058,8 +7079,10 @@ dependencies = [
"fp-storage",
"frame-benchmarking",
"frame-benchmarking-cli",
+ "frame-system",
"frame-system-rpc-runtime-api",
"futures 0.3.30",
+ "hex",
"hex-literal 0.3.4",
"jsonrpsee",
"libsecp256k1",
@@ -7085,6 +7108,7 @@ dependencies = [
"nimbus-primitives",
"nix 0.23.2",
"pallet-author-inherent",
+ "pallet-balances",
"pallet-ethereum",
"pallet-parachain-staking",
"pallet-sudo",
@@ -7133,26 +7157,34 @@ dependencies = [
"sp-blockchain",
"sp-consensus",
"sp-core",
+ "sp-externalities",
"sp-inherents",
"sp-io",
"sp-keystore",
"sp-offchain",
+ "sp-rpc",
"sp-runtime",
"sp-session",
+ "sp-state-machine",
"sp-storage",
"sp-timestamp",
"sp-transaction-pool",
"sp-trie",
+ "sp-version",
"staging-xcm",
"substrate-build-script-utils",
"substrate-frame-rpc-system",
"substrate-prometheus-endpoint",
+ "substrate-rpc-client",
"substrate-test-client",
"substrate-test-runtime",
"substrate-test-runtime-client",
"tempfile",
+ "thiserror",
"tiny-bip39",
"tokio",
+ "tokio-retry",
+ "tracing",
"trie-root 0.15.2",
"xcm-fee-payment-runtime-api",
]
@@ -12469,7 +12501,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48"
dependencies = [
"anyhow",
- "itertools 0.11.0",
+ "itertools 0.12.1",
"proc-macro2",
"quote",
"syn 2.0.66",
@@ -12901,13 +12933,14 @@ dependencies = [
[[package]]
name = "ring"
version = "0.1.0"
-source = "git+https://github.com/w3f/ring-proof#b273d33f9981e2bb3375ab45faeb537f7ee35224"
+source = "git+https://github.com/w3f/ring-proof#665f5f51af5734c7b6d90b985dd6861d4c5b4752"
dependencies = [
"ark-ec",
"ark-ff",
"ark-poly",
"ark-serialize",
"ark-std",
+ "arrayvec 0.7.4",
"blake2 0.10.6",
"common",
"fflonk",
@@ -16541,6 +16574,19 @@ dependencies = [
"tokio",
]
+[[package]]
+name = "substrate-rpc-client"
+version = "0.33.0"
+source = "git+https://github.com/moonbeam-foundation/polkadot-sdk?branch=moonbeam-polkadot-v1.11.0#125e709e299d83556c21d668660fe37e2e3962cb"
+dependencies = [
+ "async-trait",
+ "jsonrpsee",
+ "log",
+ "sc-rpc-api",
+ "serde",
+ "sp-runtime",
+]
+
[[package]]
name = "substrate-state-trie-migration-rpc"
version = "27.0.0"
@@ -16821,9 +16867,9 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "thiserror"
-version = "1.0.61"
+version = "1.0.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709"
+checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724"
dependencies = [
"thiserror-impl",
]
@@ -16850,9 +16896,9 @@ dependencies = [
[[package]]
name = "thiserror-impl"
-version = "1.0.61"
+version = "1.0.63"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533"
+checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261"
dependencies = [
"proc-macro2",
"quote",
@@ -17022,6 +17068,17 @@ dependencies = [
"syn 2.0.66",
]
+[[package]]
+name = "tokio-retry"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f"
+dependencies = [
+ "pin-project",
+ "rand 0.8.5",
+ "tokio",
+]
+
[[package]]
name = "tokio-rustls"
version = "0.24.1"
diff --git a/Cargo.toml b/Cargo.toml
index 350512607c..088d6e6319 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -224,6 +224,7 @@ sp-consensus = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", br
sp-storage = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" }
sp-timestamp = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" }
sp-wasm-interface = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" }
+sp-rpc = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" }
substrate-build-script-utils = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" }
substrate-frame-rpc-system = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" }
substrate-prometheus-endpoint = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" }
@@ -231,6 +232,7 @@ substrate-test-client = { git = "https://github.com/moonbeam-foundation/polkadot
substrate-test-runtime = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" }
substrate-test-runtime-client = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" }
substrate-wasm-builder = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" }
+substrate-rpc-client = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" }
# Frontier (wasm)
@@ -404,10 +406,12 @@ schnorrkel = { version = "0.11.4", default-features = false, features = [
"preaudit_deprecated",
] }
tokio = { version = "1.36" }
+tokio-retry = { version = "0.3.0" }
tracing = "0.1.34"
tracing-core = "0.1.29"
trie-root = "0.15.2"
url = "2.2.2"
+thiserror = "1.0.63"
# The list of dependencies below (which can be both direct and indirect dependencies) are crates
# that are suspected to be CPU-intensive, and that are unlikely to require debugging (as some of
@@ -493,4 +497,4 @@ inherits = "release"
overflow-checks = true
[patch."https://github.com/paritytech/polkadot-sdk"]
-sp-crypto-ec-utils = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" }
+sp-crypto-ec-utils = { git = "https://github.com/moonbeam-foundation/polkadot-sdk", branch = "moonbeam-polkadot-v1.11.0" }
\ No newline at end of file
diff --git a/node/Cargo.toml b/node/Cargo.toml
index bb5a483867..eb291b1e07 100644
--- a/node/Cargo.toml
+++ b/node/Cargo.toml
@@ -42,6 +42,8 @@ moonriver-native = [ "moonbeam-cli/moonriver-native", "moonbeam-service/moonrive
metadata-hash = ["moonbeam-service/metadata-hash"]
+lazy-loading = ["moonbeam-service/lazy-loading", "moonbeam-cli/lazy-loading"]
+
test-spec = []
runtime-benchmarks = [
diff --git a/node/cli-opt/src/lib.rs b/node/cli-opt/src/lib.rs
index 93259730cb..b18676c645 100644
--- a/node/cli-opt/src/lib.rs
+++ b/node/cli-opt/src/lib.rs
@@ -11,8 +11,10 @@
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
+use std::path::PathBuf;
// You should have received a copy of the GNU General Public License
// along with Moonbeam. If not, see .
+use primitive_types::H256;
use std::str::FromStr;
pub mod account_key;
@@ -110,3 +112,11 @@ pub struct RpcConfig {
pub frontier_backend_config: FrontierBackendConfig,
pub no_prometheus_prefix: bool,
}
+
+#[derive(Clone)]
+pub struct LazyLoadingConfig {
+ pub state_rpc: String,
+ pub from_block: Option,
+ pub state_overrides_path: Option,
+ pub runtime_override: Option,
+}
diff --git a/node/cli/Cargo.toml b/node/cli/Cargo.toml
index 805bd19b35..2abb7aa2e5 100644
--- a/node/cli/Cargo.toml
+++ b/node/cli/Cargo.toml
@@ -54,6 +54,8 @@ default = [
"westend-native",
]
+lazy-loading = ["sc-service/test-helpers", "moonbeam-service/lazy-loading"]
+
westend-native = ["polkadot-service/westend-native"]
moonbase-native = ["moonbeam-service/moonbase-native", "westend-native"]
diff --git a/node/cli/src/cli.rs b/node/cli/src/cli.rs
index 7758aab380..1e48e3f570 100644
--- a/node/cli/src/cli.rs
+++ b/node/cli/src/cli.rs
@@ -26,6 +26,12 @@ use sc_cli::{Error as CliError, SubstrateCli};
use std::path::PathBuf;
use std::time::Duration;
+#[cfg(feature = "lazy-loading")]
+fn parse_block_hash(s: &str) -> Result {
+ use std::str::FromStr;
+ sp_core::H256::from_str(s).map_err(|err| err.to_string())
+}
+
/// Sub-commands supported by the collator.
#[derive(Debug, clap::Subcommand)]
pub enum Subcommand {
@@ -136,6 +142,22 @@ pub struct RunCmd {
#[clap(long)]
pub dev_service: bool,
+ #[cfg(feature = "lazy-loading")]
+ #[clap(long)]
+ pub fork_chain_from_rpc: Option,
+
+ #[cfg(feature = "lazy-loading")]
+ #[arg(long, value_name = "BLOCK", value_parser = parse_block_hash)]
+ pub block: Option,
+
+ #[cfg(feature = "lazy-loading")]
+ #[clap(long, value_name = "PATH", value_parser)]
+ pub fork_state_overrides: Option,
+
+ #[cfg(feature = "lazy-loading")]
+ #[clap(long, value_name = "PATH", value_parser)]
+ pub runtime_override: Option,
+
/// When blocks should be sealed in the dev service.
///
/// Options are "instant", "manual", or timer interval in milliseconds
diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs
index 937a243716..22f6adb37e 100644
--- a/node/cli/src/command.rs
+++ b/node/cli/src/command.rs
@@ -693,7 +693,10 @@ pub fn run() -> Result<()> {
None => {
let runner = cli.create_runner(&(*cli.run).normalize())?;
let collator_options = cli.run.collator_options();
- runner.run_node_until_exit(|config| async move {
+
+ // It is used when feature "lazy-loading" is enabled
+ #[allow(unused_mut)]
+ runner.run_node_until_exit(|mut config| async move {
let hwbench = if !cli.run.no_hardware_benchmarks {
config.database.path().map(|database_path| {
let _ = std::fs::create_dir_all(&database_path);
@@ -704,23 +707,11 @@ pub fn run() -> Result<()> {
};
let extension = chain_spec::Extensions::try_get(&*config.chain_spec);
- let para_id = extension.map(|e| e.para_id);
- let id = ParaId::from(cli.run.parachain_id.clone().or(para_id).unwrap_or(1000));
let rpc_config = cli.run.new_rpc_config();
- // If dev service was requested, start up manual or instant seal.
- // Otherwise continue with the normal parachain node.
- // Dev service can be requested in two ways.
- // 1. by providing the --dev-service flag to the CLI
- // 2. by specifying "dev-service" in the chain spec's "relay-chain" field.
- // NOTE: the --dev flag triggers the dev service by way of number 2
- let relay_chain_id = extension.map(|e| e.relay_chain.as_str());
- let dev_service = cli.run.dev_service
- || config.chain_spec.is_dev()
- || relay_chain_id == Some("dev-service");
-
- if dev_service {
+ #[cfg(feature = "lazy-loading")]
+ if let Some(fork_chain_from_rpc) = cli.run.fork_chain_from_rpc {
// When running the dev service, just use Alice's author inherent
//TODO maybe make the --alice etc flags work here, and consider bringing back
// the author-id flag. For now, this will work.
@@ -728,34 +719,81 @@ pub fn run() -> Result<()> {
"Alice",
));
- return match &config.chain_spec {
- #[cfg(feature = "moonriver-native")]
- spec if spec.is_moonriver() => moonbeam_service::new_dev::<
- moonbeam_service::moonriver_runtime::RuntimeApi,
- moonbeam_service::MoonriverCustomizations,
- sc_network::NetworkWorker<_, _>,
- >(config, author_id, cli.run.sealing, rpc_config, hwbench)
- .await
- .map_err(Into::into),
- #[cfg(feature = "moonbeam-native")]
- spec if spec.is_moonbeam() => moonbeam_service::new_dev::<
- moonbeam_service::moonbeam_runtime::RuntimeApi,
- moonbeam_service::MoonbeamCustomizations,
- sc_network::NetworkWorker<_, _>,
- >(config, author_id, cli.run.sealing, rpc_config, hwbench)
- .await
- .map_err(Into::into),
- #[cfg(feature = "moonbase-native")]
- _ => moonbeam_service::new_dev::<
- moonbeam_service::moonbase_runtime::RuntimeApi,
- moonbeam_service::MoonbaseCustomizations,
- sc_network::NetworkWorker<_, _>,
- >(config, author_id, cli.run.sealing, rpc_config, hwbench)
- .await
- .map_err(Into::into),
- #[cfg(not(feature = "moonbase-native"))]
- _ => panic!("invalid chain spec"),
+ let lazy_loading_config = moonbeam_cli_opt::LazyLoadingConfig {
+ state_rpc: fork_chain_from_rpc,
+ from_block: cli.run.block,
+ state_overrides_path: cli.run.fork_state_overrides,
+ runtime_override: cli.run.runtime_override,
};
+
+ let spec_builder =
+ chain_spec::test_spec::lazy_loading_spec_builder(Default::default());
+ config.chain_spec = Box::new(spec_builder.build());
+
+ return moonbeam_service::lazy_loading::new_lazy_loading_service::<
+ moonbeam_runtime::RuntimeApi,
+ moonbeam_service::MoonbeamCustomizations,
+ sc_network::NetworkWorker<_, _>,
+ >(
+ config,
+ author_id,
+ cli.run.sealing,
+ rpc_config,
+ lazy_loading_config,
+ hwbench,
+ )
+ .await
+ .map_err(Into::into);
+ }
+ #[cfg(not(feature = "lazy-loading"))]
+ {
+ // If dev service was requested, start up manual or instant seal.
+ // Otherwise continue with the normal parachain node.
+ // Dev service can be requested in two ways.
+ // 1. by providing the --dev-service flag to the CLI
+ // 2. by specifying "dev-service" in the chain spec's "relay-chain" field.
+ // NOTE: the --dev flag triggers the dev service by way of number 2
+ let relay_chain_id = extension.map(|e| e.relay_chain.as_str());
+ let dev_service = cli.run.dev_service
+ || config.chain_spec.is_dev()
+ || relay_chain_id == Some("dev-service");
+ if dev_service {
+ // When running the dev service, just use Alice's author inherent
+ //TODO maybe make the --alice etc flags work here, and consider bringing back
+ // the author-id flag. For now, this will work.
+ let author_id = Some(chain_spec::get_from_seed::<
+ nimbus_primitives::NimbusId,
+ >("Alice"));
+
+ return match &config.chain_spec {
+ #[cfg(feature = "moonriver-native")]
+ spec if spec.is_moonriver() => moonbeam_service::new_dev::<
+ moonbeam_service::moonriver_runtime::RuntimeApi,
+ moonbeam_service::MoonriverCustomizations,
+ sc_network::NetworkWorker<_, _>,
+ >(config, author_id, cli.run.sealing, rpc_config, hwbench)
+ .await
+ .map_err(Into::into),
+ #[cfg(feature = "moonbeam-native")]
+ spec if spec.is_moonbeam() => moonbeam_service::new_dev::<
+ moonbeam_service::moonbeam_runtime::RuntimeApi,
+ moonbeam_service::MoonbeamCustomizations,
+ sc_network::NetworkWorker<_, _>,
+ >(config, author_id, cli.run.sealing, rpc_config, hwbench)
+ .await
+ .map_err(Into::into),
+ #[cfg(feature = "moonbase-native")]
+ _ => moonbeam_service::new_dev::<
+ moonbeam_service::moonbase_runtime::RuntimeApi,
+ moonbeam_service::MoonbaseCustomizations,
+ sc_network::NetworkWorker<_, _>,
+ >(config, author_id, cli.run.sealing, rpc_config, hwbench)
+ .await
+ .map_err(Into::into),
+ #[cfg(not(feature = "moonbase-native"))]
+ _ => panic!("invalid chain spec"),
+ };
+ }
}
let polkadot_cli = RelayChainCli::new(
@@ -765,6 +803,9 @@ pub fn run() -> Result<()> {
.chain(cli.relaychain_args.iter()),
);
+ let para_id = extension.map(|e| e.para_id);
+ let id = ParaId::from(cli.run.parachain_id.clone().or(para_id).unwrap_or(1000));
+
let parachain_account =
AccountIdConversion::::into_account_truncating(&id);
diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml
index 61bce2a190..5fbf6d898b 100644
--- a/node/service/Cargo.toml
+++ b/node/service/Cargo.toml
@@ -15,7 +15,7 @@ exit-future = { workspace = true }
flume = { workspace = true }
futures = { workspace = true, features = ["compat"] }
hex-literal = { workspace = true }
-jsonrpsee = { workspace = true, features = ["macros", "server"] }
+jsonrpsee = { workspace = true, features = ["macros", "server", "http-client"] }
libsecp256k1 = { workspace = true, features = ["hmac"] }
log = { workspace = true }
maplit = { workspace = true }
@@ -26,6 +26,11 @@ sha3 = { workspace = true }
tiny-bip39 = { workspace = true }
tokio = { workspace = true, features = ["macros", "sync"] }
trie-root = { workspace = true }
+tokio-retry = { workspace = true }
+substrate-rpc-client = { workspace = true }
+hex = { workspace = true, features = ["std"] }
+thiserror = { workspace = true }
+tracing = { workspace = true }
# Moonbeam
moonbeam-dev-rpc = { workspace = true }
@@ -52,6 +57,8 @@ moonriver-runtime = { workspace = true, optional = true }
# Substrate
frame-system-rpc-runtime-api = { workspace = true, features = ["std"] }
+frame-system = { workspace = true, features = ["std"] }
+pallet-balances = { workspace = true, features = ["std"] }
pallet-transaction-payment = { workspace = true, features = ["std"] }
pallet-transaction-payment-rpc = { workspace = true }
pallet-transaction-payment-rpc-runtime-api = { workspace = true, features = [
@@ -95,6 +102,10 @@ sp-storage = { workspace = true, features = ["std"] }
sp-timestamp = { workspace = true, features = ["std"] }
sp-transaction-pool = { workspace = true, features = ["std"] }
sp-trie = { workspace = true, features = ["std"] }
+sp-state-machine = { workspace = true }
+sp-rpc = { workspace = true }
+sp-externalities = { workspace = true }
+sp-version = { workspace = true }
substrate-frame-rpc-system = { workspace = true }
substrate-prometheus-endpoint = { workspace = true }
@@ -178,6 +189,8 @@ default = [
"westend-native",
]
+lazy-loading = ["sc-service/test-helpers"]
+
rococo-native = ["polkadot-cli/rococo-native", "polkadot-service/rococo-native"]
westend-native = [
"polkadot-cli/westend-native",
diff --git a/node/service/src/chain_spec/test_spec.rs b/node/service/src/chain_spec/test_spec.rs
index 19bb7c0706..48715de217 100644
--- a/node/service/src/chain_spec/test_spec.rs
+++ b/node/service/src/chain_spec/test_spec.rs
@@ -87,3 +87,66 @@ pub fn staking_spec(para_id: ParaId) -> ChainSpec {
))
.build()
}
+
+#[cfg(feature = "lazy-loading")]
+pub fn lazy_loading_spec_builder(
+ para_id: ParaId,
+) -> sc_chain_spec::ChainSpecBuilder {
+ crate::chain_spec::moonbeam::ChainSpec::builder(
+ moonbeam_runtime::WASM_BINARY.expect("WASM binary was not build, please build it!"),
+ Default::default(),
+ )
+ .with_name("Lazy Loading")
+ .with_id("lazy_loading")
+ .with_chain_type(ChainType::Development)
+ .with_properties(
+ serde_json::from_str(
+ "{\"tokenDecimals\": 18, \"tokenSymbol\": \"GLMR\", \"SS58Prefix\": 1284}",
+ )
+ .expect("Provided valid json map"),
+ )
+ .with_genesis_config(crate::chain_spec::moonbeam::testnet_genesis(
+ // Treasury Council members: Baltathar, Charleth and Dorothy
+ vec![
+ AccountId::from(hex!("3Cd0A705a2DC65e5b1E1205896BaA2be8A07c6e0")),
+ AccountId::from(hex!("798d4Ba9baf0064Ec19eB4F0a1a45785ae9D6DFc")),
+ AccountId::from(hex!("773539d4Ac0e786233D90A233654ccEE26a613D9")),
+ ],
+ // Open Tech Committee members: Alith and Baltathar
+ vec![
+ AccountId::from(hex!("6Be02d1d3665660d22FF9624b7BE0551ee1Ac91b")),
+ AccountId::from(hex!("3Cd0A705a2DC65e5b1E1205896BaA2be8A07c6e0")),
+ ],
+ // Collators
+ vec![
+ (
+ AccountId::from(hex!("6Be02d1d3665660d22FF9624b7BE0551ee1Ac91b")),
+ get_from_seed::("Alice"),
+ 1_000 * moonbeam_runtime::currency::GLMR,
+ ),
+ (
+ AccountId::from(hex!("C0F0f4ab324C46e55D02D0033343B4Be8A55532d")),
+ get_from_seed::("Faith"),
+ 1_000 * moonbeam_runtime::currency::GLMR,
+ ),
+ ],
+ // Delegations
+ vec![],
+ // Endowed accounts (each minted 1 << 80 balance)
+ vec![
+ // Alith, Baltathar, Charleth, Dorothy and Faith
+ AccountId::from(hex!("6Be02d1d3665660d22FF9624b7BE0551ee1Ac91b")),
+ AccountId::from(hex!("3Cd0A705a2DC65e5b1E1205896BaA2be8A07c6e0")),
+ AccountId::from(hex!("798d4Ba9baf0064Ec19eB4F0a1a45785ae9D6DFc")),
+ AccountId::from(hex!("773539d4Ac0e786233D90A233654ccEE26a613D9")),
+ AccountId::from(hex!("C0F0f4ab324C46e55D02D0033343B4Be8A55532d")),
+ // Additional accounts
+ AccountId::from(hex!("Ff64d3F6efE2317EE2807d223a0Bdc4c0c49dfDB")),
+ AccountId::from(hex!("f24FF3a9CF04c71Dbc94D0b566f7A27B94566cac")),
+ ],
+ 3_000_000 * moonbeam_runtime::currency::GLMR,
+ para_id,
+ // Chain ID
+ 1280,
+ ))
+}
diff --git a/node/service/src/externalities.rs b/node/service/src/externalities.rs
new file mode 100644
index 0000000000..f8db22959d
--- /dev/null
+++ b/node/service/src/externalities.rs
@@ -0,0 +1,872 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Concrete externalities implementation.
+
+extern crate alloc;
+
+#[cfg(feature = "std")]
+use crate::overlayed_changes::OverlayedExtensions;
+use hash_db::Hasher;
+use parity_scale_codec::{Encode, EncodeAppend};
+#[cfg(feature = "std")]
+use sp_core::hexdisplay::HexDisplay;
+use sp_core::storage::{
+ well_known_keys::is_child_storage_key, ChildInfo, StateVersion, TrackedStorageKey,
+};
+use sp_externalities::{Extension, ExtensionStore, Externalities, MultiRemovalResults};
+use sp_state_machine::{
+ backend::Backend, IndexOperation, IterArgs, OverlayedChanges, StorageKey, StorageValue,
+};
+
+use alloc::{boxed::Box, vec, vec::Vec};
+use core::{
+ any::{Any, TypeId},
+ cmp::Ordering,
+};
+use sp_core::hexdisplay::HexDisplay;
+use sp_state_machine::{log_error, trace, warn};
+#[cfg(feature = "std")]
+use std::error;
+
+const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within runtime";
+const BENCHMARKING_FN: &str = "\
+ This is a special fn only for benchmarking where a database commit happens from the runtime.
+ For that reason client started transactions before calling into runtime are not allowed.
+ Without client transactions the loop condition guarantees the success of the tx close.";
+
+#[cfg(feature = "std")]
+fn guard() -> sp_panic_handler::AbortGuard {
+ sp_panic_handler::AbortGuard::force_abort()
+}
+
+#[cfg(not(feature = "std"))]
+fn guard() -> () {
+ ()
+}
+
+/// Errors that can occur when interacting with the externalities.
+#[cfg(feature = "std")]
+#[derive(Debug, Copy, Clone)]
+pub enum Error {
+ /// Failure to load state data from the backend.
+ #[allow(unused)]
+ Backend(B),
+ /// Failure to execute a function.
+ #[allow(unused)]
+ Executor(E),
+}
+
+#[cfg(feature = "std")]
+impl std::fmt::Display for Error {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ match *self {
+ Error::Backend(ref e) => write!(f, "Storage backend error: {}", e),
+ Error::Executor(ref e) => write!(f, "Sub-call execution error: {}", e),
+ }
+ }
+}
+
+#[cfg(feature = "std")]
+impl error::Error for Error {
+ fn description(&self) -> &str {
+ match *self {
+ Error::Backend(..) => "backend error",
+ Error::Executor(..) => "executor error",
+ }
+ }
+}
+
+/// Wraps a read-only backend, call executor, and current overlayed changes.
+pub struct LazyLoadingExt<'a, H, B>
+where
+ H: Hasher,
+ B: 'a + Backend,
+{
+ /// The overlayed changes to write to.
+ overlay: &'a mut OverlayedChanges,
+ /// The storage backend to read from.
+ backend: &'a B,
+ /// Pseudo-unique id used for tracing.
+ pub id: u16,
+ /// Extensions registered with this instance.
+ #[cfg(feature = "std")]
+ extensions: Option>,
+}
+
+impl<'a, H, B> LazyLoadingExt<'a, H, B>
+where
+ H: Hasher,
+ B: Backend,
+{
+ /// Create a new `Ext`.
+ #[cfg(not(feature = "std"))]
+ pub fn new(overlay: &'a mut OverlayedChanges, backend: &'a B) -> Self {
+ LazyLoadingExt {
+ overlay,
+ backend,
+ id: 0,
+ }
+ }
+
+ /// Create a new `Ext` from overlayed changes and read-only backend
+ #[cfg(feature = "std")]
+ pub fn new(
+ overlay: &'a mut OverlayedChanges,
+ backend: &'a B,
+ extensions: Option<&'a mut sp_externalities::Extensions>,
+ ) -> Self {
+ Self {
+ overlay,
+ backend,
+ id: rand::random(),
+ extensions: extensions.map(OverlayedExtensions::new),
+ }
+ }
+}
+
+#[cfg(test)]
+impl<'a, H, B> LazyLoadingExt<'a, H, B>
+where
+ H: Hasher,
+ H::Out: Ord + 'static,
+ B: 'a + Backend,
+{
+ pub fn storage_pairs(&self) -> Vec<(StorageKey, StorageValue)> {
+ use std::collections::HashMap;
+
+ self.backend
+ .pairs(Default::default())
+ .expect("never fails in tests; qed.")
+ .map(|key_value| key_value.expect("never fails in tests; qed."))
+ .map(|(k, v)| (k, Some(v)))
+ .chain(
+ self.overlay
+ .changes()
+ .map(|(k, v)| (k.clone(), v.value().cloned())),
+ )
+ .collect::>()
+ .into_iter()
+ .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
+ .collect()
+ }
+}
+
+impl<'a, H, B> Externalities for LazyLoadingExt<'a, H, B>
+where
+ H: Hasher,
+ H::Out: Ord + 'static + parity_scale_codec::Codec,
+ B: Backend,
+{
+ fn set_offchain_storage(&mut self, key: &[u8], value: Option<&[u8]>) {
+ self.overlay.set_offchain_storage(key, value)
+ }
+
+ fn storage(&self, key: &[u8]) -> Option {
+ let _guard = guard();
+
+ let result = self
+ .overlay
+ .storage(key)
+ .map(|x| x.map(|x| x.to_vec()))
+ .unwrap_or_else(|| self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL));
+
+ // NOTE: be careful about touching the key names – used outside substrate!
+ trace!(
+ target: "state",
+ method = "Get",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ key = %HexDisplay::from(&key),
+ result = ?result.as_ref().map(HexDisplay::from),
+ result_encoded = %HexDisplay::from(
+ &result
+ .as_ref()
+ .map(|v| EncodeOpaqueValue(v.clone()))
+ .encode()
+ ),
+ );
+
+ result
+ }
+
+ fn storage_hash(&self, key: &[u8]) -> Option> {
+ let _guard = guard();
+ let result = self
+ .overlay
+ .storage(key)
+ .map(|x| x.map(|x| H::hash(x)))
+ .unwrap_or_else(|| {
+ self.backend
+ .storage_hash(key)
+ .expect(EXT_NOT_ALLOWED_TO_FAIL)
+ });
+
+ trace!(
+ target: "state",
+ method = "Hash",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ key = %HexDisplay::from(&key),
+ ?result,
+ );
+ result.map(|r| r.encode())
+ }
+
+ fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option {
+ let _guard = guard();
+ let result = self
+ .overlay
+ .child_storage(child_info, key)
+ .map(|x| x.map(|x| x.to_vec()))
+ .unwrap_or_else(|| {
+ self.backend
+ .child_storage(child_info, key)
+ .expect(EXT_NOT_ALLOWED_TO_FAIL)
+ });
+
+ trace!(
+ target: "state",
+ method = "ChildGet",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ child_info = %HexDisplay::from(&child_info.storage_key()),
+ key = %HexDisplay::from(&key),
+ result = ?result.as_ref().map(HexDisplay::from)
+ );
+
+ result
+ }
+
+ fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> {
+ let _guard = guard();
+ let result = self
+ .overlay
+ .child_storage(child_info, key)
+ .map(|x| x.map(|x| H::hash(x)))
+ .unwrap_or_else(|| {
+ self.backend
+ .child_storage_hash(child_info, key)
+ .expect(EXT_NOT_ALLOWED_TO_FAIL)
+ });
+
+ trace!(
+ target: "state",
+ method = "ChildHash",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ child_info = %HexDisplay::from(&child_info.storage_key()),
+ key = %HexDisplay::from(&key),
+ ?result,
+ );
+
+ result.map(|r| r.encode())
+ }
+
+ fn exists_storage(&self, key: &[u8]) -> bool {
+ let _guard = guard();
+ let result = match self.overlay.storage(key) {
+ Some(x) => x.is_some(),
+ _ => self
+ .backend
+ .exists_storage(key)
+ .expect(EXT_NOT_ALLOWED_TO_FAIL),
+ };
+
+ trace!(
+ target: "state",
+ method = "Exists",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ key = %HexDisplay::from(&key),
+ %result,
+ );
+
+ result
+ }
+
+ fn exists_child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> bool {
+ let _guard = guard();
+
+ let result = match self.overlay.child_storage(child_info, key) {
+ Some(x) => x.is_some(),
+ _ => self
+ .backend
+ .exists_child_storage(child_info, key)
+ .expect(EXT_NOT_ALLOWED_TO_FAIL),
+ };
+
+ trace!(
+ target: "state",
+ method = "ChildExists",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ child_info = %HexDisplay::from(&child_info.storage_key()),
+ key = %HexDisplay::from(&key),
+ %result,
+ );
+ result
+ }
+
+ fn next_storage_key(&self, key: &[u8]) -> Option {
+ let mut next_backend_key = self
+ .backend
+ .next_storage_key(key)
+ .expect(EXT_NOT_ALLOWED_TO_FAIL);
+ let mut overlay_changes = self.overlay.iter_after(key).peekable();
+
+ match (&next_backend_key, overlay_changes.peek()) {
+ (_, None) => next_backend_key,
+ (Some(_), Some(_)) => {
+ for overlay_key in overlay_changes {
+ let cmp = next_backend_key.as_deref().map(|v| v.cmp(overlay_key.0));
+
+ // If `backend_key` is less than the `overlay_key`, we found out next key.
+ if cmp == Some(Ordering::Less) {
+ return next_backend_key;
+ } else if overlay_key.1.value().is_some() {
+ // If there exists a value for the `overlay_key` in the overlay
+ // (aka the key is still valid), it means we have found our next key.
+ return Some(overlay_key.0.to_vec());
+ } else if cmp == Some(Ordering::Equal) {
+ // If the `backend_key` and `overlay_key` are equal, it means that we need
+ // to search for the next backend key, because the overlay has overwritten
+ // this key.
+ next_backend_key = self
+ .backend
+ .next_storage_key(overlay_key.0)
+ .expect(EXT_NOT_ALLOWED_TO_FAIL);
+ }
+ }
+
+ next_backend_key
+ }
+ (None, Some(_)) => {
+ // Find the next overlay key that has a value attached.
+ overlay_changes.find_map(|k| k.1.value().as_ref().map(|_| k.0.to_vec()))
+ }
+ }
+ }
+
+ fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option {
+ let mut next_backend_key = self
+ .backend
+ .next_child_storage_key(child_info, key)
+ .expect(EXT_NOT_ALLOWED_TO_FAIL);
+ let mut overlay_changes = self
+ .overlay
+ .child_iter_after(child_info.storage_key(), key)
+ .peekable();
+
+ match (&next_backend_key, overlay_changes.peek()) {
+ (_, None) => next_backend_key,
+ (Some(_), Some(_)) => {
+ for overlay_key in overlay_changes {
+ let cmp = next_backend_key.as_deref().map(|v| v.cmp(overlay_key.0));
+
+ // If `backend_key` is less than the `overlay_key`, we found out next key.
+ if cmp == Some(Ordering::Less) {
+ return next_backend_key;
+ } else if overlay_key.1.value().is_some() {
+ // If there exists a value for the `overlay_key` in the overlay
+ // (aka the key is still valid), it means we have found our next key.
+ return Some(overlay_key.0.to_vec());
+ } else if cmp == Some(Ordering::Equal) {
+ // If the `backend_key` and `overlay_key` are equal, it means that we need
+ // to search for the next backend key, because the overlay has overwritten
+ // this key.
+ next_backend_key = self
+ .backend
+ .next_child_storage_key(child_info, overlay_key.0)
+ .expect(EXT_NOT_ALLOWED_TO_FAIL);
+ }
+ }
+
+ next_backend_key
+ }
+ (None, Some(_)) => {
+ // Find the next overlay key that has a value attached.
+ overlay_changes.find_map(|k| k.1.value().as_ref().map(|_| k.0.to_vec()))
+ }
+ }
+ }
+
+ fn place_storage(&mut self, key: StorageKey, value: Option) {
+ let _guard = guard();
+ if is_child_storage_key(&key) {
+ warn!(target: "trie", "Refuse to directly set child storage key");
+ return;
+ }
+
+ // NOTE: be careful about touching the key names – used outside substrate!
+ trace!(
+ target: "state",
+ method = "Put",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ key = %HexDisplay::from(&key),
+ value = ?value.as_ref().map(HexDisplay::from),
+ value_encoded = %HexDisplay::from(
+ &value
+ .as_ref()
+ .map(|v| EncodeOpaqueValue(v.clone()))
+ .encode()
+ ),
+ );
+
+ self.overlay.set_storage(key, value);
+ }
+
+ fn place_child_storage(
+ &mut self,
+ child_info: &ChildInfo,
+ key: StorageKey,
+ value: Option,
+ ) {
+ trace!(
+ target: "state",
+ method = "ChildPut",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ child_info = %HexDisplay::from(&child_info.storage_key()),
+ key = %HexDisplay::from(&key),
+ value = ?value.as_ref().map(HexDisplay::from),
+ );
+ let _guard = guard();
+
+ self.overlay.set_child_storage(child_info, key, value);
+ }
+
+ fn kill_child_storage(
+ &mut self,
+ child_info: &ChildInfo,
+ maybe_limit: Option,
+ maybe_cursor: Option<&[u8]>,
+ ) -> MultiRemovalResults {
+ trace!(
+ target: "state",
+ method = "ChildKill",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ child_info = %HexDisplay::from(&child_info.storage_key()),
+ );
+ let _guard = guard();
+ let overlay = self.overlay.clear_child_storage(child_info);
+ let (maybe_cursor, backend, loops) =
+ self.limit_remove_from_backend(Some(child_info), None, maybe_limit, maybe_cursor);
+ MultiRemovalResults {
+ maybe_cursor,
+ backend,
+ unique: overlay + backend,
+ loops,
+ }
+ }
+
+ fn clear_prefix(
+ &mut self,
+ prefix: &[u8],
+ maybe_limit: Option,
+ maybe_cursor: Option<&[u8]>,
+ ) -> MultiRemovalResults {
+ trace!(
+ target: "state",
+ method = "ClearPrefix",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ prefix = %HexDisplay::from(&prefix),
+ );
+ let _guard = guard();
+
+ if sp_core::storage::well_known_keys::starts_with_child_storage_key(prefix) {
+ warn!(
+ target: "trie",
+ "Refuse to directly clear prefix that is part or contains of child storage key",
+ );
+ return MultiRemovalResults {
+ maybe_cursor: None,
+ backend: 0,
+ unique: 0,
+ loops: 0,
+ };
+ }
+
+ let overlay = self.overlay.clear_prefix(prefix);
+ let (maybe_cursor, backend, loops) =
+ self.limit_remove_from_backend(None, Some(prefix), maybe_limit, maybe_cursor);
+ MultiRemovalResults {
+ maybe_cursor,
+ backend,
+ unique: overlay + backend,
+ loops,
+ }
+ }
+
+ fn clear_child_prefix(
+ &mut self,
+ child_info: &ChildInfo,
+ prefix: &[u8],
+ maybe_limit: Option,
+ maybe_cursor: Option<&[u8]>,
+ ) -> MultiRemovalResults {
+ trace!(
+ target: "state",
+ method = "ChildClearPrefix",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ child_info = %HexDisplay::from(&child_info.storage_key()),
+ prefix = %HexDisplay::from(&prefix),
+ );
+ let _guard = guard();
+
+ let overlay = self.overlay.clear_child_prefix(child_info, prefix);
+ let (maybe_cursor, backend, loops) = self.limit_remove_from_backend(
+ Some(child_info),
+ Some(prefix),
+ maybe_limit,
+ maybe_cursor,
+ );
+ MultiRemovalResults {
+ maybe_cursor,
+ backend,
+ unique: overlay + backend,
+ loops,
+ }
+ }
+
+ fn storage_append(&mut self, key: Vec, value: Vec) {
+ trace!(
+ target: "state",
+ method = "Append",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ key = %HexDisplay::from(&key),
+ value = %HexDisplay::from(&value),
+ );
+
+ let _guard = guard();
+
+ let backend = &mut self.backend;
+ let current_value = self.overlay.value_mut_or_insert_with(&key, || {
+ backend
+ .storage(&key)
+ .expect(EXT_NOT_ALLOWED_TO_FAIL)
+ .unwrap_or_default()
+ });
+ StorageAppend::new(current_value).append(value);
+ }
+
+ fn storage_root(&mut self, state_version: StateVersion) -> Vec {
+ let _guard = guard();
+
+ let (root, _cached) = self.overlay.storage_root(self.backend, state_version);
+
+ trace!(
+ target: "state",
+ method = "StorageRoot",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ storage_root = %HexDisplay::from(&root.as_ref()),
+ cached = %_cached,
+ );
+
+ root.encode()
+ }
+
+ fn child_storage_root(
+ &mut self,
+ child_info: &ChildInfo,
+ state_version: StateVersion,
+ ) -> Vec {
+ let _guard = guard();
+
+ let (root, _cached) = self
+ .overlay
+ .child_storage_root(child_info, self.backend, state_version)
+ .expect(EXT_NOT_ALLOWED_TO_FAIL);
+
+ trace!(
+ target: "state",
+ method = "ChildStorageRoot",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ child_info = %HexDisplay::from(&child_info.storage_key()),
+ storage_root = %HexDisplay::from(&root.as_ref()),
+ cached = %_cached,
+ );
+
+ root.encode()
+ }
+
+ fn storage_index_transaction(&mut self, index: u32, hash: &[u8], size: u32) {
+ trace!(
+ target: "state",
+ method = "IndexTransaction",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ %index,
+ tx_hash = %HexDisplay::from(&hash),
+ %size,
+ );
+
+ self.overlay.add_transaction_index(IndexOperation::Insert {
+ extrinsic: index,
+ hash: hash.to_vec(),
+ size,
+ });
+ }
+
+ /// Renew existing piece of data storage.
+ fn storage_renew_transaction_index(&mut self, index: u32, hash: &[u8]) {
+ trace!(
+ target: "state",
+ method = "RenewTransactionIndex",
+ ext_id = %HexDisplay::from(&self.id.to_le_bytes()),
+ %index,
+ tx_hash = %HexDisplay::from(&hash),
+ );
+
+ self.overlay.add_transaction_index(IndexOperation::Renew {
+ extrinsic: index,
+ hash: hash.to_vec(),
+ });
+ }
+
+ fn storage_start_transaction(&mut self) {
+ self.overlay.start_transaction()
+ }
+
+ fn storage_rollback_transaction(&mut self) -> Result<(), ()> {
+ self.overlay.rollback_transaction().map_err(|_| ())
+ }
+
+ fn storage_commit_transaction(&mut self) -> Result<(), ()> {
+ self.overlay.commit_transaction().map_err(|_| ())
+ }
+
+ fn wipe(&mut self) {
+ for _ in 0..self.overlay.transaction_depth() {
+ self.overlay.rollback_transaction().expect(BENCHMARKING_FN);
+ }
+ self.overlay
+ .drain_storage_changes(self.backend, Default::default())
+ .expect(EXT_NOT_ALLOWED_TO_FAIL);
+ self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL);
+ self.overlay
+ .enter_runtime()
+ .expect("We have reset the overlay above, so we can not be in the runtime; qed");
+ }
+
+ fn commit(&mut self) {
+ // Bench always use latest state.
+ let state_version = StateVersion::default();
+ for _ in 0..self.overlay.transaction_depth() {
+ self.overlay.commit_transaction().expect(BENCHMARKING_FN);
+ }
+ let changes = self
+ .overlay
+ .drain_storage_changes(self.backend, state_version)
+ .expect(EXT_NOT_ALLOWED_TO_FAIL);
+ self.backend
+ .commit(
+ changes.transaction_storage_root,
+ changes.transaction,
+ changes.main_storage_changes,
+ changes.child_storage_changes,
+ )
+ .expect(EXT_NOT_ALLOWED_TO_FAIL);
+ self.overlay
+ .enter_runtime()
+ .expect("We have reset the overlay above, so we can not be in the runtime; qed");
+ }
+
+ fn read_write_count(&self) -> (u32, u32, u32, u32) {
+ self.backend.read_write_count()
+ }
+
+ fn reset_read_write_count(&mut self) {
+ self.backend.reset_read_write_count()
+ }
+
+ fn get_whitelist(&self) -> Vec {
+ self.backend.get_whitelist()
+ }
+
+ fn set_whitelist(&mut self, new: Vec) {
+ self.backend.set_whitelist(new)
+ }
+
+ fn proof_size(&self) -> Option {
+ self.backend.proof_size()
+ }
+
+ fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> {
+ self.backend.get_read_and_written_keys()
+ }
+}
+
+impl<'a, H, B> LazyLoadingExt<'a, H, B>
+where
+ H: Hasher,
+ H::Out: Ord + 'static + parity_scale_codec::Codec,
+ B: Backend,
+{
+ fn limit_remove_from_backend(
+ &mut self,
+ child_info: Option<&ChildInfo>,
+ prefix: Option<&[u8]>,
+ maybe_limit: Option,
+ start_at: Option<&[u8]>,
+ ) -> (Option>, u32, u32) {
+ let mut args = IterArgs::default();
+ args.prefix = prefix;
+ args.start_at = start_at;
+ args.child_info = child_info.cloned();
+
+ let iter = match self.backend.keys(args) {
+ Ok(iter) => iter,
+ Err(error) => {
+ log::debug!(target: "trie", "Error while iterating the storage: {}", error);
+ return (None, 0, 0);
+ }
+ };
+
+ let mut delete_count: u32 = 0;
+ let mut loop_count: u32 = 0;
+ let mut maybe_next_key = None;
+ for key in iter {
+ let key = match key {
+ Ok(key) => key,
+ Err(error) => {
+ log::debug!(target: "trie", "Error while iterating the storage: {}", error);
+ break;
+ }
+ };
+
+ if maybe_limit.map_or(false, |limit| loop_count == limit) {
+ maybe_next_key = Some(key);
+ break;
+ }
+ let overlay = match child_info {
+ Some(child_info) => self.overlay.child_storage(child_info, &key),
+ None => self.overlay.storage(&key),
+ };
+ if !matches!(overlay, Some(None)) {
+ // not pending deletion from the backend - delete it.
+ if let Some(child_info) = child_info {
+ self.overlay.set_child_storage(child_info, key, None);
+ } else {
+ self.overlay.set_storage(key, None);
+ }
+ delete_count = delete_count.saturating_add(1);
+ }
+ loop_count = loop_count.saturating_add(1);
+ }
+
+ (maybe_next_key, delete_count, loop_count)
+ }
+}
+
+/// Implement `Encode` by forwarding the stored raw vec.
+struct EncodeOpaqueValue(Vec);
+
+impl Encode for EncodeOpaqueValue {
+ fn using_encoded R>(&self, f: F) -> R {
+ f(&self.0)
+ }
+}
+
+/// Auxiliary structure for appending a value to a storage item.
+pub(crate) struct StorageAppend<'a>(&'a mut Vec);
+
+impl<'a> StorageAppend<'a> {
+ /// Create a new instance using the given `storage` reference.
+ pub fn new(storage: &'a mut Vec) -> Self {
+ Self(storage)
+ }
+
+ /// Append the given `value` to the storage item.
+ ///
+ /// If appending fails, `[value]` is stored in the storage item.
+ pub fn append(&mut self, value: Vec) {
+ let value = vec![EncodeOpaqueValue(value)];
+
+ let item = core::mem::take(self.0);
+
+ *self.0 = match Vec::::append_or_new(item, &value) {
+ Ok(item) => item,
+ Err(_) => {
+ log_error!(
+ target: "runtime",
+ "Failed to append value, resetting storage item to `[value]`.",
+ );
+ value.encode()
+ }
+ };
+ }
+}
+
+#[cfg(not(feature = "std"))]
+impl<'a, H, B> ExtensionStore for LazyLoadingExt<'a, H, B>
+where
+ H: Hasher,
+ H::Out: Ord + 'static + parity_scale_codec::Codec,
+ B: Backend,
+{
+ fn extension_by_type_id(&mut self, _type_id: TypeId) -> Option<&mut dyn Any> {
+ None
+ }
+
+ fn register_extension_with_type_id(
+ &mut self,
+ _type_id: TypeId,
+ _extension: Box,
+ ) -> Result<(), sp_externalities::Error> {
+ Err(sp_externalities::Error::ExtensionsAreNotSupported)
+ }
+
+ fn deregister_extension_by_type_id(
+ &mut self,
+ _type_id: TypeId,
+ ) -> Result<(), sp_externalities::Error> {
+ Err(sp_externalities::Error::ExtensionsAreNotSupported)
+ }
+}
+
+#[cfg(feature = "std")]
+impl<'a, H, B> ExtensionStore for Ext<'a, H, B>
+where
+ H: Hasher,
+ B: 'a + Backend,
+{
+ fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> {
+ self.extensions
+ .as_mut()
+ .and_then(|exts| exts.get_mut(type_id))
+ }
+
+ fn register_extension_with_type_id(
+ &mut self,
+ type_id: TypeId,
+ extension: Box,
+ ) -> Result<(), sp_externalities::Error> {
+ if let Some(ref mut extensions) = self.extensions {
+ extensions.register(type_id, extension)
+ } else {
+ Err(sp_externalities::Error::ExtensionsAreNotSupported)
+ }
+ }
+
+ fn deregister_extension_by_type_id(
+ &mut self,
+ type_id: TypeId,
+ ) -> Result<(), sp_externalities::Error> {
+ if let Some(ref mut extensions) = self.extensions {
+ if extensions.deregister(type_id) {
+ Ok(())
+ } else {
+ Err(sp_externalities::Error::ExtensionIsNotRegistered(type_id))
+ }
+ } else {
+ Err(sp_externalities::Error::ExtensionsAreNotSupported)
+ }
+ }
+}
diff --git a/node/service/src/lazy_loading/backend.rs b/node/service/src/lazy_loading/backend.rs
new file mode 100644
index 0000000000..e2f2e1a3aa
--- /dev/null
+++ b/node/service/src/lazy_loading/backend.rs
@@ -0,0 +1,1626 @@
+// Copyright 2024 Moonbeam foundation
+// This file is part of Moonbeam.
+
+// Moonbeam is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Moonbeam is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Moonbeam. If not, see .
+
+use parking_lot::RwLock;
+use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata};
+use sp_core::storage::well_known_keys;
+use sp_runtime::{
+ generic::BlockId,
+ traits::{Block as BlockT, HashingFor, Header as HeaderT, NumberFor, Zero},
+ Justification, Justifications, StateVersion, Storage,
+};
+use sp_state_machine::{
+ BackendTransaction, ChildStorageCollection, IndexOperation, StorageCollection, TrieBackend,
+};
+use std::future::Future;
+use std::marker::PhantomData;
+use std::ops::AddAssign;
+use std::time::Duration;
+use std::{
+ collections::{HashMap, HashSet},
+ ptr,
+ sync::Arc,
+};
+
+use sc_client_api::{
+ backend::{self, NewBlockState},
+ blockchain::{self, BlockStatus, HeaderBackend},
+ leaves::LeafSet,
+ UsageInfo,
+};
+
+use jsonrpsee::http_client::HttpClient;
+use sp_runtime::generic::SignedBlock;
+
+use crate::chain_spec;
+use crate::lazy_loading::state_overrides::StateEntry;
+use crate::lazy_loading::{helpers, state_overrides};
+use moonbeam_cli_opt::LazyLoadingConfig;
+use moonbeam_core_primitives::BlockNumber;
+use sc_client_api::StorageKey;
+use sc_service::{Configuration, Error};
+use serde::de::DeserializeOwned;
+use sp_core::offchain::storage::InMemOffchainStorage;
+use sp_core::{twox_128, H256};
+use sp_rpc::list::ListOrValue;
+use sp_rpc::number::NumberOrHex;
+use sp_storage::{ChildInfo, StorageData};
+use sp_trie::PrefixedMemoryDB;
+use tokio_retry::strategy::FixedInterval;
+use tokio_retry::Retry;
+
+struct PendingBlock {
+ block: StoredBlock,
+ state: NewBlockState,
+}
+
+#[derive(PartialEq, Eq, Clone)]
+enum StoredBlock {
+ Header(B::Header, Option),
+ Full(B, Option),
+}
+
+impl StoredBlock {
+ fn new(
+ header: B::Header,
+ body: Option>,
+ just: Option,
+ ) -> Self {
+ match body {
+ Some(body) => StoredBlock::Full(B::new(header, body), just),
+ None => StoredBlock::Header(header, just),
+ }
+ }
+
+ fn header(&self) -> &B::Header {
+ match *self {
+ StoredBlock::Header(ref h, _) => h,
+ StoredBlock::Full(ref b, _) => b.header(),
+ }
+ }
+
+ fn justifications(&self) -> Option<&Justifications> {
+ match *self {
+ StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref(),
+ }
+ }
+
+ fn extrinsics(&self) -> Option<&[B::Extrinsic]> {
+ match *self {
+ StoredBlock::Header(_, _) => None,
+ StoredBlock::Full(ref b, _) => Some(b.extrinsics()),
+ }
+ }
+
+ fn into_inner(self) -> (B::Header, Option>, Option) {
+ match self {
+ StoredBlock::Header(header, just) => (header, None, just),
+ StoredBlock::Full(block, just) => {
+ let (header, body) = block.deconstruct();
+ (header, Some(body), just)
+ }
+ }
+ }
+}
+
+#[derive(Clone)]
+struct BlockchainStorage {
+ blocks: HashMap>,
+ hashes: HashMap, Block::Hash>,
+ best_hash: Block::Hash,
+ best_number: NumberFor,
+ finalized_hash: Block::Hash,
+ finalized_number: NumberFor,
+ genesis_hash: Block::Hash,
+ header_cht_roots: HashMap, Block::Hash>,
+ leaves: LeafSet>,
+ aux: HashMap, Vec>,
+}
+
+/// In-memory blockchain. Supports concurrent reads.
+#[derive(Clone)]
+pub struct Blockchain {
+ rpc_client: Arc,
+ storage: Arc>>,
+}
+
+impl Blockchain {
+ /// Get header hash of given block.
+ pub fn id(&self, id: BlockId) -> Option {
+ match id {
+ BlockId::Hash(h) => Some(h),
+ BlockId::Number(n) => self.storage.read().hashes.get(&n).cloned(),
+ }
+ }
+
+ /// Create new in-memory blockchain storage.
+ fn new(rpc_client: Arc) -> Blockchain {
+ let storage = Arc::new(RwLock::new(BlockchainStorage {
+ blocks: HashMap::new(),
+ hashes: HashMap::new(),
+ best_hash: Default::default(),
+ best_number: Zero::zero(),
+ finalized_hash: Default::default(),
+ finalized_number: Zero::zero(),
+ genesis_hash: Default::default(),
+ header_cht_roots: HashMap::new(),
+ leaves: LeafSet::new(),
+ aux: HashMap::new(),
+ }));
+ Blockchain {
+ rpc_client,
+ storage,
+ }
+ }
+
+ /// Insert a block header and associated data.
+ pub fn insert(
+ &self,
+ hash: Block::Hash,
+ header: ::Header,
+ justifications: Option,
+ body: Option::Extrinsic>>,
+ new_state: NewBlockState,
+ ) -> sp_blockchain::Result<()> {
+ let number = *header.number();
+ if new_state.is_best() {
+ self.apply_head(&header)?;
+ }
+
+ {
+ let mut storage = self.storage.write();
+ storage.leaves.import(hash, number, *header.parent_hash());
+ storage
+ .blocks
+ .insert(hash, StoredBlock::new(header, body, justifications));
+
+ if let NewBlockState::Final = new_state {
+ storage.finalized_hash = hash;
+ storage.finalized_number = number;
+ }
+
+ if number == Zero::zero() {
+ storage.genesis_hash = hash;
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Get total number of blocks.
+ pub fn blocks_count(&self) -> usize {
+ let count = self.storage.read().blocks.len();
+ log::error!("Total number of blocks: {:?}", count);
+
+ count
+ }
+
+ /// Compare this blockchain with another in-mem blockchain
+ pub fn equals_to(&self, other: &Self) -> bool {
+ // Check ptr equality first to avoid double read locks.
+ if ptr::eq(self, other) {
+ return true;
+ }
+ self.canon_equals_to(other) && self.storage.read().blocks == other.storage.read().blocks
+ }
+
+ /// Compare canonical chain to other canonical chain.
+ pub fn canon_equals_to(&self, other: &Self) -> bool {
+ // Check ptr equality first to avoid double read locks.
+ if ptr::eq(self, other) {
+ return true;
+ }
+ let this = self.storage.read();
+ let other = other.storage.read();
+ this.hashes == other.hashes
+ && this.best_hash == other.best_hash
+ && this.best_number == other.best_number
+ && this.genesis_hash == other.genesis_hash
+ }
+
+ /// Insert header CHT root.
+ pub fn insert_cht_root(&self, block: NumberFor, cht_root: Block::Hash) {
+ self.storage
+ .write()
+ .header_cht_roots
+ .insert(block, cht_root);
+ }
+
+ /// Set an existing block as head.
+ pub fn set_head(&self, hash: Block::Hash) -> sp_blockchain::Result<()> {
+ let header = self
+ .header(hash)?
+ .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", hash)))?;
+
+ self.apply_head(&header)
+ }
+
+ fn apply_head(&self, header: &::Header) -> sp_blockchain::Result<()> {
+ let hash = header.hash();
+ let number = header.number();
+ /*
+ // Note: this may lock storage, so it must happen before obtaining storage
+ // write lock.
+ let best_tree_route = {
+ let best_hash = self.storage.read().best_hash;
+ if &best_hash == header.parent_hash() {
+ None
+ } else {
+ let route = sp_blockchain::tree_route(self, best_hash, *header.parent_hash())?;
+ Some(route)
+ }
+ };
+ */
+
+ let mut storage = self.storage.write();
+ /*
+ if let Some(tree_route) = best_tree_route {
+ // apply retraction and enaction when reorganizing up to parent hash
+ let enacted = tree_route.enacted();
+
+ for entry in enacted {
+ storage.hashes.insert(entry.number, entry.hash);
+ }
+
+ for entry in tree_route.retracted().iter().skip(enacted.len()) {
+ storage.hashes.remove(&entry.number);
+ }
+ }
+ */
+ storage.best_hash = hash;
+ storage.best_number = *number;
+ storage.hashes.insert(*number, hash);
+
+ Ok(())
+ }
+
+ fn finalize_header(
+ &self,
+ block: Block::Hash,
+ justification: Option,
+ ) -> sp_blockchain::Result<()> {
+ let mut storage = self.storage.write();
+ storage.finalized_hash = block;
+
+ if justification.is_some() {
+ let block = storage
+ .blocks
+ .get_mut(&block)
+ .expect("hash was fetched from a block in the db; qed");
+
+ let block_justifications = match block {
+ StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j,
+ };
+
+ *block_justifications = justification.map(Justifications::from);
+ }
+
+ Ok(())
+ }
+
+ fn append_justification(
+ &self,
+ hash: Block::Hash,
+ justification: Justification,
+ ) -> sp_blockchain::Result<()> {
+ let mut storage = self.storage.write();
+
+ let block = storage
+ .blocks
+ .get_mut(&hash)
+ .expect("hash was fetched from a block in the db; qed");
+
+ let block_justifications = match block {
+ StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j,
+ };
+
+ if let Some(stored_justifications) = block_justifications {
+ if !stored_justifications.append(justification) {
+ return Err(sp_blockchain::Error::BadJustification(
+ "Duplicate consensus engine ID".into(),
+ ));
+ }
+ } else {
+ *block_justifications = Some(Justifications::from(justification));
+ };
+
+ Ok(())
+ }
+
+ fn write_aux(&self, ops: Vec<(Vec, Option>)>) {
+ let mut storage = self.storage.write();
+ for (k, v) in ops {
+ match v {
+ Some(v) => storage.aux.insert(k, v),
+ None => storage.aux.remove(&k),
+ };
+ }
+ }
+}
+
+impl HeaderBackend for Blockchain {
+ fn header(
+ &self,
+ hash: Block::Hash,
+ ) -> sp_blockchain::Result