diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml
index 891f43e605c0..567e996b8fd9 100644
--- a/.github/workflows/release-50_publish-docker.yml
+++ b/.github/workflows/release-50_publish-docker.yml
@@ -268,7 +268,7 @@ jobs:
- name: Build and push
id: docker_build
- uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0
+ uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0
with:
push: true
file: docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile
diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml
index 4071fdf9758b..429491fb1742 100644
--- a/.gitlab/pipeline/check.yml
+++ b/.gitlab/pipeline/check.yml
@@ -151,6 +151,31 @@ check-runtime-migration-asset-hub-westend:
PACKAGE: "asset-hub-westend-runtime"
WASM: "asset_hub_westend_runtime.compact.compressed.wasm"
URI: "wss://westend-asset-hub-rpc.polkadot.io:443"
+
+check-runtime-migration-asset-hub-rococo:
+ stage: check
+ extends:
+ - .docker-env
+ - .test-pr-refs
+ - .check-runtime-migration
+ variables:
+ NETWORK: "asset-hub-rococo"
+ PACKAGE: "asset-hub-rococo-runtime"
+ WASM: "asset_hub_rococo_runtime.compact.compressed.wasm"
+ URI: "wss://rococo-asset-hub-rpc.polkadot.io:443"
+
+# Check runtime migrations for Parity managed bridge hub chains
+check-runtime-migration-bridge-hub-westend:
+ stage: check
+ extends:
+ - .docker-env
+ - .test-pr-refs
+ - .check-runtime-migration
+ variables:
+ NETWORK: "bridge-hub-westend"
+ PACKAGE: "bridge-hub-westend-runtime"
+ WASM: "bridge_hub_westend_runtime.compact.compressed.wasm"
+ URI: "wss://westend-bridge-hub-rpc.polkadot.io:443"
check-runtime-migration-bridge-hub-rococo:
stage: check
diff --git a/Cargo.lock b/Cargo.lock
index 9de6bb8fe4cc..73e6179fc119 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -544,7 +544,7 @@ dependencies = [
[[package]]
name = "ark-secret-scalar"
version = "0.0.2"
-source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502"
+source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754"
dependencies = [
"ark-ec",
"ark-ff",
@@ -593,7 +593,7 @@ dependencies = [
[[package]]
name = "ark-transcript"
version = "0.0.2"
-source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502"
+source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754"
dependencies = [
"ark-ff",
"ark-serialize",
@@ -1369,8 +1369,8 @@ dependencies = [
[[package]]
name = "bandersnatch_vrfs"
-version = "0.0.3"
-source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502"
+version = "0.0.4"
+source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754"
dependencies = [
"ark-bls12-381",
"ark-ec",
@@ -1493,8 +1493,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f"
dependencies = [
"bitcoin_hashes",
- "rand 0.8.5",
- "rand_core 0.6.4",
+ "rand 0.7.3",
+ "rand_core 0.5.1",
"serde",
"unicode-normalization",
]
@@ -1585,16 +1585,15 @@ dependencies = [
[[package]]
name = "blake3"
-version = "1.4.1"
+version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "199c42ab6972d92c9f8995f086273d25c42fc0f7b2a1fcefba465c1352d25ba5"
+checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87"
dependencies = [
"arrayref",
"arrayvec 0.7.4",
"cc",
"cfg-if",
"constant_time_eq 0.3.0",
- "digest 0.10.7",
]
[[package]]
@@ -3065,7 +3064,7 @@ dependencies = [
[[package]]
name = "common"
version = "0.1.0"
-source = "git+https://github.com/w3f/ring-proof#edd1e90b847e560bf60fc2e8712235ccfa11a9a9"
+source = "git+https://github.com/burdges/ring-proof?branch=patch-1#05a756076cb20f981a52afea3a620168de49f95f"
dependencies = [
"ark-ec",
"ark-ff",
@@ -4814,7 +4813,7 @@ checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632"
[[package]]
name = "dleq_vrf"
version = "0.0.2"
-source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502"
+source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754"
dependencies = [
"ark-ec",
"ark-ff",
@@ -5342,11 +5341,12 @@ dependencies = [
[[package]]
name = "fdlimit"
-version = "0.2.1"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2c4c9e43643f5a3be4ca5b67d26b98031ff9db6806c3440ae32e02e3ceac3f1b"
+checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5"
dependencies = [
"libc",
+ "thiserror",
]
[[package]]
@@ -5388,7 +5388,7 @@ dependencies = [
[[package]]
name = "fflonk"
version = "0.1.0"
-source = "git+https://github.com/w3f/fflonk#26a5045b24e169cffc1f9328ca83d71061145c40"
+source = "git+https://github.com/w3f/fflonk#1beb0585e1c8488956fac7f05da061f9b41e8948"
dependencies = [
"ark-ec",
"ark-ff",
@@ -7408,6 +7408,17 @@ dependencies = [
"rle-decode-fast",
]
+[[package]]
+name = "libfuzzer-sys"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a96cfd5557eb82f2b83fed4955246c988d331975a002961b07c81584d107e7f7"
+dependencies = [
+ "arbitrary",
+ "cc",
+ "once_cell",
+]
+
[[package]]
name = "libloading"
version = "0.7.4"
@@ -8182,9 +8193,9 @@ dependencies = [
[[package]]
name = "memchr"
-version = "2.5.0"
+version = "2.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167"
[[package]]
name = "memfd"
@@ -12556,6 +12567,7 @@ version = "1.0.0"
dependencies = [
"always-assert",
"assert_matches",
+ "blake3",
"cfg-if",
"criterion 0.4.0",
"futures",
@@ -12634,6 +12646,7 @@ dependencies = [
"sp-externalities 0.19.0",
"sp-io",
"sp-tracing 10.0.0",
+ "substrate-build-script-utils",
"tempfile",
"thiserror",
"tracing-gum",
@@ -12658,6 +12671,7 @@ dependencies = [
name = "polkadot-node-core-pvf-prepare-worker"
version = "1.0.0"
dependencies = [
+ "blake3",
"cfg-if",
"criterion 0.4.0",
"libc",
@@ -14297,14 +14311,14 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.9.3"
+version = "1.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a"
+checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343"
dependencies = [
"aho-corasick",
"memchr",
- "regex-automata 0.3.6",
- "regex-syntax 0.7.4",
+ "regex-automata 0.4.3",
+ "regex-syntax 0.8.2",
]
[[package]]
@@ -14321,10 +14335,16 @@ name = "regex-automata"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69"
+
+[[package]]
+name = "regex-automata"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f"
dependencies = [
"aho-corasick",
"memchr",
- "regex-syntax 0.7.4",
+ "regex-syntax 0.8.2",
]
[[package]]
@@ -14335,9 +14355,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "regex-syntax"
-version = "0.7.4"
+version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2"
+checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
[[package]]
name = "remote-ext-tests-bags-list"
@@ -14427,7 +14447,7 @@ dependencies = [
[[package]]
name = "ring"
version = "0.1.0"
-source = "git+https://github.com/w3f/ring-proof#edd1e90b847e560bf60fc2e8712235ccfa11a9a9"
+source = "git+https://github.com/burdges/ring-proof?branch=patch-1#05a756076cb20f981a52afea3a620168de49f95f"
dependencies = [
"ark-ec",
"ark-ff",
@@ -16584,18 +16604,18 @@ dependencies = [
[[package]]
name = "secp256k1"
-version = "0.24.3"
+version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62"
+checksum = "2acea373acb8c21ecb5a23741452acd2593ed44ee3d343e72baaa143bc89d0d5"
dependencies = [
"secp256k1-sys",
]
[[package]]
name = "secp256k1-sys"
-version = "0.6.1"
+version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b"
+checksum = "09e67c467c38fd24bd5499dc9a18183b31575c12ee549197e3e20d57aa4fe3b7"
dependencies = [
"cc",
]
@@ -17535,6 +17555,16 @@ dependencies = [
"zeroize",
]
+[[package]]
+name = "sp-core-fuzz"
+version = "0.0.0"
+dependencies = [
+ "lazy_static",
+ "libfuzzer-sys",
+ "regex",
+ "sp-core",
+]
+
[[package]]
name = "sp-core-hashing"
version = "9.0.0"
@@ -17579,7 +17609,7 @@ dependencies = [
[[package]]
name = "sp-crypto-ec-utils"
version = "0.4.1"
-source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b"
+source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f"
dependencies = [
"ark-bls12-377",
"ark-bls12-377-ext",
@@ -17617,7 +17647,7 @@ dependencies = [
[[package]]
name = "sp-debug-derive"
version = "8.0.0"
-source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b"
+source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f"
dependencies = [
"proc-macro2",
"quote",
@@ -17637,7 +17667,7 @@ dependencies = [
[[package]]
name = "sp-externalities"
version = "0.19.0"
-source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b"
+source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f"
dependencies = [
"environmental",
"parity-scale-codec",
@@ -17869,7 +17899,7 @@ dependencies = [
[[package]]
name = "sp-runtime-interface"
version = "17.0.0"
-source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b"
+source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f"
dependencies = [
"bytes",
"impl-trait-for-tuples",
@@ -17898,7 +17928,7 @@ dependencies = [
[[package]]
name = "sp-runtime-interface-proc-macro"
version = "11.0.0"
-source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b"
+source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f"
dependencies = [
"Inflector",
"proc-macro-crate",
@@ -18026,7 +18056,7 @@ version = "8.0.0"
[[package]]
name = "sp-std"
version = "8.0.0"
-source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b"
+source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f"
[[package]]
name = "sp-storage"
@@ -18043,7 +18073,7 @@ dependencies = [
[[package]]
name = "sp-storage"
version = "13.0.0"
-source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b"
+source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f"
dependencies = [
"impl-serde",
"parity-scale-codec",
@@ -18092,7 +18122,7 @@ dependencies = [
[[package]]
name = "sp-tracing"
version = "10.0.0"
-source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b"
+source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f"
dependencies = [
"parity-scale-codec",
"sp-std 8.0.0 (git+https://github.com/paritytech/polkadot-sdk)",
@@ -18193,7 +18223,7 @@ dependencies = [
[[package]]
name = "sp-wasm-interface"
version = "14.0.0"
-source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b"
+source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f"
dependencies = [
"anyhow",
"impl-trait-for-tuples",
@@ -19294,9 +19324,9 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
[[package]]
name = "thiserror"
-version = "1.0.48"
+version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7"
+checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2"
dependencies = [
"thiserror-impl",
]
@@ -19323,9 +19353,9 @@ dependencies = [
[[package]]
name = "thiserror-impl"
-version = "1.0.48"
+version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35"
+checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8"
dependencies = [
"proc-macro2",
"quote",
@@ -19980,7 +20010,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675"
dependencies = [
"cfg-if",
"digest 0.10.7",
- "rand 0.8.5",
+ "rand 0.7.3",
"static_assertions",
]
diff --git a/Cargo.toml b/Cargo.toml
index ed252e07053f..57079aa4d03d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -405,6 +405,7 @@ members = [
"substrate/primitives/consensus/sassafras",
"substrate/primitives/consensus/slots",
"substrate/primitives/core",
+ "substrate/primitives/core/fuzz",
"substrate/primitives/core/hashing",
"substrate/primitives/core/hashing/proc-macro",
"substrate/primitives/crypto/ec-utils",
diff --git a/cumulus/client/cli/src/lib.rs b/cumulus/client/cli/src/lib.rs
index 1b18ed064373..a2238b73b2b5 100644
--- a/cumulus/client/cli/src/lib.rs
+++ b/cumulus/client/cli/src/lib.rs
@@ -296,7 +296,14 @@ pub struct RunCmd {
#[arg(long, conflicts_with = "validator")]
pub collator: bool,
- /// EXPERIMENTAL: Specify an URL to a relay chain full node to communicate with.
+ /// Creates a less resource-hungry node that retrieves relay chain data from an RPC endpoint.
+ ///
+ /// The provided URLs should point to RPC endpoints of the relay chain.
+ /// This node connects to the remote nodes following the order they were specified in. If the
+ /// connection fails, it attempts to connect to the next endpoint in the list.
+ ///
+ /// Note: This option doesn't stop the node from connecting to the relay chain network but
+ /// reduces bandwidth use.
#[arg(
long,
value_parser = validate_relay_chain_url,
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
index 4b4ae61a3e8d..b274f45877b3 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs
@@ -39,7 +39,9 @@ use sp_api::impl_runtime_apis;
use sp_core::{crypto::KeyTypeId, OpaqueMetadata};
use sp_runtime::{
create_runtime_str, generic, impl_opaque_keys,
- traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Verify},
+ traits::{
+ AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Saturating, Verify,
+ },
transaction_validity::{TransactionSource, TransactionValidity},
ApplyExtrinsicResult, Permill,
};
@@ -959,7 +961,60 @@ pub type SignedExtra = (
pub type UncheckedExtrinsic =
generic::UncheckedExtrinsic
;
/// Migrations to apply on runtime upgrade.
-pub type Migrations = (pallet_collator_selection::migration::v1::MigrateToV1,);
+pub type Migrations =
+ (pallet_collator_selection::migration::v1::MigrateToV1, InitStorageVersions);
+
+/// Migration to initialize storage versions for pallets added after genesis.
+///
+/// This is now done automatically (see ),
+/// but some pallets had made it in and had storage set in them for this parachain before it was
+/// merged.
+pub struct InitStorageVersions;
+
+impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions {
+ fn on_runtime_upgrade() -> Weight {
+ use frame_support::traits::{GetStorageVersion, StorageVersion};
+
+ let mut writes = 0;
+
+ if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) {
+ PolkadotXcm::current_storage_version().put::();
+ writes.saturating_inc();
+ }
+
+ if Multisig::on_chain_storage_version() == StorageVersion::new(0) {
+ Multisig::current_storage_version().put::();
+ writes.saturating_inc();
+ }
+
+ if Assets::on_chain_storage_version() == StorageVersion::new(0) {
+ Assets::current_storage_version().put::();
+ writes.saturating_inc();
+ }
+
+ if Uniques::on_chain_storage_version() == StorageVersion::new(0) {
+ Uniques::current_storage_version().put::();
+ writes.saturating_inc();
+ }
+
+ if Nfts::on_chain_storage_version() == StorageVersion::new(0) {
+ Nfts::current_storage_version().put::();
+ writes.saturating_inc();
+ }
+
+ if ForeignAssets::on_chain_storage_version() == StorageVersion::new(0) {
+ ForeignAssets::current_storage_version().put::();
+ writes.saturating_inc();
+ }
+
+ if PoolAssets::on_chain_storage_version() == StorageVersion::new(0) {
+ PoolAssets::current_storage_version().put::();
+ writes.saturating_inc();
+ }
+
+ ::DbWeight::get().reads_writes(7, writes)
+ }
+}
/// Executive: handles dispatch to the various modules.
pub type Executive = frame_executive::Executive<
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs
index b85cb76642fb..4da0a2500a54 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs
@@ -109,7 +109,7 @@ pub type CurrencyTransactor = CurrencyAdapter<
(),
>;
-/// `AssetId`/`Balance` converter for `PoolAssets`.
+/// `AssetId`/`Balance` converter for `TrustBackedAssets`.
pub type TrustBackedAssetsConvertedConcreteId =
assets_common::TrustBackedAssetsConvertedConcreteId;
@@ -130,7 +130,7 @@ pub type FungiblesTransactor = FungiblesAdapter<
CheckingAccount,
>;
-/// `AssetId/Balance` converter for `TrustBackedAssets`
+/// `AssetId`/`Balance` converter for `ForeignAssets`.
pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConvertedConcreteId<
(
// Ignore `TrustBackedAssets` explicitly
@@ -275,13 +275,15 @@ impl Contains for SafeCallFilter {
matches!(
call,
- RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) |
- RuntimeCall::System(
- frame_system::Call::set_heap_pages { .. } |
- frame_system::Call::set_code { .. } |
- frame_system::Call::set_code_without_checks { .. } |
- frame_system::Call::kill_prefix { .. },
- ) | RuntimeCall::ParachainSystem(..) |
+ RuntimeCall::PolkadotXcm(
+ pallet_xcm::Call::force_xcm_version { .. } |
+ pallet_xcm::Call::force_default_xcm_version { .. }
+ ) | RuntimeCall::System(
+ frame_system::Call::set_heap_pages { .. } |
+ frame_system::Call::set_code { .. } |
+ frame_system::Call::set_code_without_checks { .. } |
+ frame_system::Call::kill_prefix { .. },
+ ) | RuntimeCall::ParachainSystem(..) |
RuntimeCall::Timestamp(..) |
RuntimeCall::Balances(..) |
RuntimeCall::CollatorSelection(
diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs
index 17312c0f46ef..4760e087e24f 100644
--- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs
+++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs
@@ -109,7 +109,7 @@ pub type CurrencyTransactor = CurrencyAdapter<
(),
>;
-/// `AssetId/Balance` converter for `TrustBackedAssets`
+/// `AssetId`/`Balance` converter for `TrustBackedAssets`.
pub type TrustBackedAssetsConvertedConcreteId =
assets_common::TrustBackedAssetsConvertedConcreteId;
@@ -130,7 +130,7 @@ pub type FungiblesTransactor = FungiblesAdapter<
CheckingAccount,
>;
-/// `AssetId/Balance` converter for `TrustBackedAssets`
+/// `AssetId`/`Balance` converter for `ForeignAssets`.
pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConvertedConcreteId<
(
// Ignore `TrustBackedAssets` explicitly
@@ -272,13 +272,15 @@ impl Contains for SafeCallFilter {
matches!(
call,
- RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) |
- RuntimeCall::System(
- frame_system::Call::set_heap_pages { .. } |
- frame_system::Call::set_code { .. } |
- frame_system::Call::set_code_without_checks { .. } |
- frame_system::Call::kill_prefix { .. },
- ) | RuntimeCall::ParachainSystem(..) |
+ RuntimeCall::PolkadotXcm(
+ pallet_xcm::Call::force_xcm_version { .. } |
+ pallet_xcm::Call::force_default_xcm_version { .. }
+ ) | RuntimeCall::System(
+ frame_system::Call::set_heap_pages { .. } |
+ frame_system::Call::set_code { .. } |
+ frame_system::Call::set_code_without_checks { .. } |
+ frame_system::Call::kill_prefix { .. },
+ ) | RuntimeCall::ParachainSystem(..) |
RuntimeCall::Timestamp(..) |
RuntimeCall::Balances(..) |
RuntimeCall::CollatorSelection(
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
index 5a44ccbb75a2..8e138822696e 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs
@@ -498,7 +498,7 @@ construct_runtime!(
// XCM helpers.
XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30,
- PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31,
+ PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31,
CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32,
DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33,
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs
index 1436c5b96a33..de7b5315c883 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs
@@ -161,13 +161,15 @@ impl Contains for SafeCallFilter {
matches!(
call,
- RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) |
- RuntimeCall::System(
- frame_system::Call::set_heap_pages { .. } |
- frame_system::Call::set_code { .. } |
- frame_system::Call::set_code_without_checks { .. } |
- frame_system::Call::kill_prefix { .. },
- ) | RuntimeCall::ParachainSystem(..) |
+ RuntimeCall::PolkadotXcm(
+ pallet_xcm::Call::force_xcm_version { .. } |
+ pallet_xcm::Call::force_default_xcm_version { .. }
+ ) | RuntimeCall::System(
+ frame_system::Call::set_heap_pages { .. } |
+ frame_system::Call::set_code { .. } |
+ frame_system::Call::set_code_without_checks { .. } |
+ frame_system::Call::kill_prefix { .. },
+ ) | RuntimeCall::ParachainSystem(..) |
RuntimeCall::Timestamp(..) |
RuntimeCall::Balances(..) |
RuntimeCall::CollatorSelection(
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
index d1d2b4a41595..9c97728058f7 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs
@@ -497,7 +497,7 @@ construct_runtime!(
// XCM helpers.
XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30,
- PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31,
+ PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31,
CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32,
DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33,
diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs
index 7084882c41f9..c89ee91c5e44 100644
--- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs
+++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs
@@ -150,13 +150,15 @@ impl Contains for SafeCallFilter {
matches!(
call,
- RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) |
- RuntimeCall::System(
- frame_system::Call::set_heap_pages { .. } |
- frame_system::Call::set_code { .. } |
- frame_system::Call::set_code_without_checks { .. } |
- frame_system::Call::kill_prefix { .. },
- ) | RuntimeCall::ParachainSystem(..) |
+ RuntimeCall::PolkadotXcm(
+ pallet_xcm::Call::force_xcm_version { .. } |
+ pallet_xcm::Call::force_default_xcm_version { .. }
+ ) | RuntimeCall::System(
+ frame_system::Call::set_heap_pages { .. } |
+ frame_system::Call::set_code { .. } |
+ frame_system::Call::set_code_without_checks { .. } |
+ frame_system::Call::kill_prefix { .. },
+ ) | RuntimeCall::ParachainSystem(..) |
RuntimeCall::Timestamp(..) |
RuntimeCall::Balances(..) |
RuntimeCall::CollatorSelection(
diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs
index d58995827fa4..cefc099c96f9 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs
@@ -178,8 +178,10 @@ impl Contains for SafeCallFilter {
pallet_collator_selection::Call::add_invulnerable { .. } |
pallet_collator_selection::Call::remove_invulnerable { .. },
) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) |
- RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) |
- RuntimeCall::XcmpQueue(..) |
+ RuntimeCall::PolkadotXcm(
+ pallet_xcm::Call::force_xcm_version { .. } |
+ pallet_xcm::Call::force_default_xcm_version { .. }
+ ) | RuntimeCall::XcmpQueue(..) |
RuntimeCall::MessageQueue(..) |
RuntimeCall::Alliance(
// `init_members` accepts unbounded vecs as arguments,
diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
index 5b828bad0c7f..9d6a53c5ed34 100644
--- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
+++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs
@@ -405,7 +405,6 @@ construct_runtime!(
XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30,
PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31,
CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32,
- DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33,
MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34,
// Smart Contracts.
diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs
index faee1c68fe6c..2ac93aed3f8d 100644
--- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs
+++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs
@@ -304,9 +304,3 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime {
parameter_types! {
pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent;
}
-
-impl cumulus_pallet_dmp_queue::Config for Runtime {
- type WeightInfo = cumulus_pallet_dmp_queue::weights::SubstrateWeight;
- type RuntimeEvent = crate::RuntimeEvent;
- type DmpSink = frame_support::traits::EnqueueWithOrigin;
-}
diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs
index 94f7fcaf9411..ef01727b7eb6 100644
--- a/polkadot/node/core/approval-voting/src/lib.rs
+++ b/polkadot/node/core/approval-voting/src/lib.rs
@@ -54,7 +54,7 @@ use polkadot_node_subsystem_util::{
};
use polkadot_primitives::{
ApprovalVote, BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement,
- ExecutorParams, GroupIndex, Hash, PvfExecTimeoutKind, SessionIndex, SessionInfo,
+ ExecutorParams, GroupIndex, Hash, PvfExecKind, SessionIndex, SessionInfo,
ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature,
};
use sc_keystore::LocalKeystore;
@@ -2867,7 +2867,7 @@ async fn launch_approval(
candidate_receipt: candidate.clone(),
pov: available_data.pov,
executor_params,
- exec_timeout_kind: PvfExecTimeoutKind::Approval,
+ exec_kind: PvfExecKind::Approval,
response_sender: val_tx,
})
.await;
diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs
index 0c0dcfde9b66..11bcba9c3882 100644
--- a/polkadot/node/core/approval-voting/src/tests.rs
+++ b/polkadot/node/core/approval-voting/src/tests.rs
@@ -2705,10 +2705,10 @@ async fn handle_double_assignment_import(
assert_matches!(
overseer_recv(virtual_overseer).await,
AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive {
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
- }) if exec_timeout_kind == PvfExecTimeoutKind::Approval => {
+ }) if exec_kind == PvfExecKind::Approval => {
response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default())))
.unwrap();
}
diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs
index a91eefe5e04f..434051f1b00f 100644
--- a/polkadot/node/core/backing/src/lib.rs
+++ b/polkadot/node/core/backing/src/lib.rs
@@ -106,7 +106,7 @@ use polkadot_node_subsystem_util::{
use polkadot_primitives::{
BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt,
CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, Hash, Id as ParaId,
- PersistedValidationData, PvfExecTimeoutKind, SigningContext, ValidationCode, ValidatorId,
+ PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, ValidatorId,
ValidatorIndex, ValidatorSignature, ValidityAttestation,
};
use sp_keystore::KeystorePtr;
@@ -566,7 +566,7 @@ async fn request_candidate_validation(
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind: PvfExecTimeoutKind::Backing,
+ exec_kind: PvfExecKind::Backing,
response_sender: tx,
})
.await;
diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs
index caa85c12989c..c12be72556e3 100644
--- a/polkadot/node/core/backing/src/tests/mod.rs
+++ b/polkadot/node/core/backing/src/tests/mod.rs
@@ -33,7 +33,7 @@ use polkadot_node_subsystem::{
};
use polkadot_node_subsystem_test_helpers as test_helpers;
use polkadot_primitives::{
- CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecTimeoutKind,
+ CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecKind,
ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES,
};
use sp_application_crypto::AppCrypto;
@@ -344,14 +344,14 @@ async fn assert_validate_from_exhaustive(
validation_data,
validation_code,
candidate_receipt,
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
},
) if validation_data == *assert_pvd &&
validation_code == *assert_validation_code &&
*pov == *assert_pov && &candidate_receipt.descriptor == assert_candidate.descriptor() &&
- exec_timeout_kind == PvfExecTimeoutKind::Backing &&
+ exec_kind == PvfExecKind::Backing &&
candidate_receipt.commitments_hash == assert_candidate.commitments.hash() =>
{
response_sender.send(Ok(ValidationResult::Valid(
@@ -550,14 +550,14 @@ fn backing_works() {
validation_code,
candidate_receipt,
pov,
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
},
) if validation_data == pvd_ab &&
validation_code == validation_code_ab &&
*pov == pov_ab && &candidate_receipt.descriptor == candidate_a.descriptor() &&
- exec_timeout_kind == PvfExecTimeoutKind::Backing &&
+ exec_kind == PvfExecKind::Backing &&
candidate_receipt.commitments_hash == candidate_a_commitments_hash =>
{
response_sender.send(Ok(
@@ -729,14 +729,14 @@ fn backing_works_while_validation_ongoing() {
validation_code,
candidate_receipt,
pov,
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
},
) if validation_data == pvd_abc &&
validation_code == validation_code_abc &&
*pov == pov_abc && &candidate_receipt.descriptor == candidate_a.descriptor() &&
- exec_timeout_kind == PvfExecTimeoutKind::Backing &&
+ exec_kind == PvfExecKind::Backing &&
candidate_a_commitments_hash == candidate_receipt.commitments_hash =>
{
// we never validate the candidate. our local node
@@ -890,14 +890,14 @@ fn backing_misbehavior_works() {
validation_code,
candidate_receipt,
pov,
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
},
) if validation_data == pvd_a &&
validation_code == validation_code_a &&
*pov == pov_a && &candidate_receipt.descriptor == candidate_a.descriptor() &&
- exec_timeout_kind == PvfExecTimeoutKind::Backing &&
+ exec_kind == PvfExecKind::Backing &&
candidate_a_commitments_hash == candidate_receipt.commitments_hash =>
{
response_sender.send(Ok(
@@ -1057,14 +1057,14 @@ fn backing_dont_second_invalid() {
validation_code,
candidate_receipt,
pov,
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
},
) if validation_data == pvd_a &&
validation_code == validation_code_a &&
*pov == pov_block_a && &candidate_receipt.descriptor == candidate_a.descriptor() &&
- exec_timeout_kind == PvfExecTimeoutKind::Backing &&
+ exec_kind == PvfExecKind::Backing &&
candidate_a.commitments.hash() == candidate_receipt.commitments_hash =>
{
response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap();
@@ -1097,14 +1097,14 @@ fn backing_dont_second_invalid() {
validation_code,
candidate_receipt,
pov,
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
},
) if validation_data == pvd_b &&
validation_code == validation_code_b &&
*pov == pov_block_b && &candidate_receipt.descriptor == candidate_b.descriptor() &&
- exec_timeout_kind == PvfExecTimeoutKind::Backing &&
+ exec_kind == PvfExecKind::Backing &&
candidate_b.commitments.hash() == candidate_receipt.commitments_hash =>
{
response_sender.send(Ok(
@@ -1224,14 +1224,14 @@ fn backing_second_after_first_fails_works() {
validation_code,
candidate_receipt,
pov,
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
},
) if validation_data == pvd_a &&
validation_code == validation_code_a &&
*pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() &&
- exec_timeout_kind == PvfExecTimeoutKind::Backing &&
+ exec_kind == PvfExecKind::Backing &&
candidate.commitments.hash() == candidate_receipt.commitments_hash =>
{
response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap();
@@ -1368,14 +1368,14 @@ fn backing_works_after_failed_validation() {
validation_code,
candidate_receipt,
pov,
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
},
) if validation_data == pvd_a &&
validation_code == validation_code_a &&
*pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() &&
- exec_timeout_kind == PvfExecTimeoutKind::Backing &&
+ exec_kind == PvfExecKind::Backing &&
candidate.commitments.hash() == candidate_receipt.commitments_hash =>
{
response_sender.send(Err(ValidationFailed("Internal test error".into()))).unwrap();
@@ -1634,13 +1634,13 @@ fn retry_works() {
validation_code,
candidate_receipt,
pov,
- exec_timeout_kind,
+ exec_kind,
..
},
) if validation_data == pvd_a &&
validation_code == validation_code_a &&
*pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() &&
- exec_timeout_kind == PvfExecTimeoutKind::Backing &&
+ exec_kind == PvfExecKind::Backing &&
candidate.commitments.hash() == candidate_receipt.commitments_hash
);
virtual_overseer
diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs
index fc4bd7d98e7d..e7c29e11bb47 100644
--- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs
+++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs
@@ -232,14 +232,14 @@ async fn assert_validate_seconded_candidate(
validation_code,
candidate_receipt,
pov,
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
}) if &validation_data == assert_pvd &&
&validation_code == assert_validation_code &&
&*pov == assert_pov &&
&candidate_receipt.descriptor == candidate.descriptor() &&
- exec_timeout_kind == PvfExecTimeoutKind::Backing &&
+ exec_kind == PvfExecKind::Backing &&
candidate.commitments.hash() == candidate_receipt.commitments_hash =>
{
response_sender.send(Ok(ValidationResult::Valid(
diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs
index a3d6f0473136..f5d17af6c689 100644
--- a/polkadot/node/core/candidate-validation/src/lib.rs
+++ b/polkadot/node/core/candidate-validation/src/lib.rs
@@ -25,7 +25,7 @@
use polkadot_node_core_pvf::{
InternalValidationError, InvalidCandidate as WasmInvalidCandidate, PrepareError,
- PrepareJobKind, PrepareStats, PvfPrepData, ValidationError, ValidationHost,
+ PrepareJobKind, PvfPrepData, ValidationError, ValidationHost,
};
use polkadot_node_primitives::{
BlockData, InvalidCandidate, PoV, ValidationResult, POV_BOMB_LIMIT, VALIDATION_CODE_BOMB_LIMIT,
@@ -49,8 +49,8 @@ use polkadot_primitives::{
DEFAULT_LENIENT_PREPARATION_TIMEOUT, DEFAULT_PRECHECK_PREPARATION_TIMEOUT,
},
CandidateCommitments, CandidateDescriptor, CandidateReceipt, ExecutorParams, Hash,
- OccupiedCoreAssumption, PersistedValidationData, PvfExecTimeoutKind, PvfPrepTimeoutKind,
- ValidationCode, ValidationCodeHash,
+ OccupiedCoreAssumption, PersistedValidationData, PvfExecKind, PvfPrepKind, ValidationCode,
+ ValidationCodeHash,
};
use parity_scale_codec::Encode;
@@ -73,12 +73,6 @@ mod tests;
const LOG_TARGET: &'static str = "parachain::candidate-validation";
-/// The amount of time to wait before retrying after a retry-able backing validation error. We use a
-/// lower value for the backing case, to fit within the lower backing timeout.
-#[cfg(not(test))]
-const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(500);
-#[cfg(test)]
-const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(200);
/// The amount of time to wait before retrying after a retry-able approval validation error. We use
/// a higher value for the approval case since we have more time, and if we wait longer it is more
/// likely that transient conditions will resolve.
@@ -163,7 +157,7 @@ async fn run(
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
} => {
@@ -180,7 +174,7 @@ async fn run(
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
&metrics,
)
.await;
@@ -198,7 +192,7 @@ async fn run(
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
} => {
@@ -215,7 +209,7 @@ async fn run(
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
&metrics,
)
.await;
@@ -357,7 +351,7 @@ where
return PreCheckOutcome::Invalid
};
- let timeout = pvf_prep_timeout(&executor_params, PvfPrepTimeoutKind::Precheck);
+ let timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Precheck);
let pvf = match sp_maybe_compressed_blob::decompress(
&validation_code.0,
@@ -501,7 +495,7 @@ async fn validate_from_chain_state(
candidate_receipt: CandidateReceipt,
pov: Arc,
executor_params: ExecutorParams,
- exec_timeout_kind: PvfExecTimeoutKind,
+ exec_kind: PvfExecKind,
metrics: &Metrics,
) -> Result
where
@@ -521,7 +515,7 @@ where
candidate_receipt.clone(),
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
metrics,
)
.await;
@@ -557,7 +551,7 @@ async fn validate_candidate_exhaustive(
candidate_receipt: CandidateReceipt,
pov: Arc,
executor_params: ExecutorParams,
- exec_timeout_kind: PvfExecTimeoutKind,
+ exec_kind: PvfExecKind,
metrics: &Metrics,
) -> Result {
let _timer = metrics.time_validate_candidate_exhaustive();
@@ -616,15 +610,32 @@ async fn validate_candidate_exhaustive(
relay_parent_storage_root: persisted_validation_data.relay_parent_storage_root,
};
- let result = validation_backend
- .validate_candidate_with_retry(
- raw_validation_code.to_vec(),
- pvf_exec_timeout(&executor_params, exec_timeout_kind),
- exec_timeout_kind,
- params,
- executor_params,
- )
- .await;
+ let result = match exec_kind {
+ // Retry is disabled to reduce the chance of nondeterministic blocks getting backed and
+ // honest backers getting slashed.
+ PvfExecKind::Backing => {
+ let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare);
+ let exec_timeout = pvf_exec_timeout(&executor_params, exec_kind);
+ let pvf = PvfPrepData::from_code(
+ raw_validation_code.to_vec(),
+ executor_params,
+ prep_timeout,
+ PrepareJobKind::Compilation,
+ );
+
+ validation_backend.validate_candidate(pvf, exec_timeout, params.encode()).await
+ },
+ PvfExecKind::Approval =>
+ validation_backend
+ .validate_candidate_with_retry(
+ raw_validation_code.to_vec(),
+ pvf_exec_timeout(&executor_params, exec_kind),
+ params,
+ executor_params,
+ PVF_APPROVAL_EXECUTION_RETRY_DELAY,
+ )
+ .await,
+ };
if let Err(ref error) = result {
gum::info!(target: LOG_TARGET, ?para_id, ?error, "Failed to validate candidate");
@@ -709,8 +720,8 @@ trait ValidationBackend {
encoded_params: Vec,
) -> Result;
- /// Tries executing a PVF. Will retry once if an error is encountered that may have been
- /// transient.
+ /// Tries executing a PVF for the approval subsystem. Will retry once if an error is encountered
+ /// that may have been transient.
///
/// NOTE: Should retry only on errors that are a result of execution itself, and not of
/// preparation.
@@ -718,11 +729,11 @@ trait ValidationBackend {
&mut self,
raw_validation_code: Vec,
exec_timeout: Duration,
- exec_timeout_kind: PvfExecTimeoutKind,
params: ValidationParams,
executor_params: ExecutorParams,
+ retry_delay: Duration,
) -> Result {
- let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepTimeoutKind::Lenient);
+ let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare);
// Construct the PVF a single time, since it is an expensive operation. Cloning it is cheap.
let pvf = PvfPrepData::from_code(
raw_validation_code,
@@ -740,11 +751,6 @@ trait ValidationBackend {
return validation_result
}
- let retry_delay = match exec_timeout_kind {
- PvfExecTimeoutKind::Backing => PVF_BACKING_EXECUTION_RETRY_DELAY,
- PvfExecTimeoutKind::Approval => PVF_APPROVAL_EXECUTION_RETRY_DELAY,
- };
-
// Allow limited retries for each kind of error.
let mut num_death_retries_left = 1;
let mut num_job_error_retries_left = 1;
@@ -794,7 +800,7 @@ trait ValidationBackend {
validation_result
}
- async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result;
+ async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result<(), PrepareError>;
}
#[async_trait]
@@ -824,7 +830,7 @@ impl ValidationBackend for ValidationHost {
})?
}
- async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result {
+ async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result<(), PrepareError> {
let (tx, rx) = oneshot::channel();
if let Err(err) = self.precheck_pvf(pvf, tx).await {
// Return an IO error if there was an error communicating with the host.
@@ -867,22 +873,41 @@ fn perform_basic_checks(
Ok(())
}
-fn pvf_prep_timeout(executor_params: &ExecutorParams, kind: PvfPrepTimeoutKind) -> Duration {
+/// To determine the amount of timeout time for the pvf execution.
+///
+/// Precheck
+/// The time period after which the preparation worker is considered
+/// unresponsive and will be killed.
+///
+/// Prepare
+///The time period after which the preparation worker is considered
+/// unresponsive and will be killed.
+fn pvf_prep_timeout(executor_params: &ExecutorParams, kind: PvfPrepKind) -> Duration {
if let Some(timeout) = executor_params.pvf_prep_timeout(kind) {
return timeout
}
match kind {
- PvfPrepTimeoutKind::Precheck => DEFAULT_PRECHECK_PREPARATION_TIMEOUT,
- PvfPrepTimeoutKind::Lenient => DEFAULT_LENIENT_PREPARATION_TIMEOUT,
+ PvfPrepKind::Precheck => DEFAULT_PRECHECK_PREPARATION_TIMEOUT,
+ PvfPrepKind::Prepare => DEFAULT_LENIENT_PREPARATION_TIMEOUT,
}
}
-fn pvf_exec_timeout(executor_params: &ExecutorParams, kind: PvfExecTimeoutKind) -> Duration {
+/// To determine the amount of timeout time for the pvf execution.
+///
+/// Backing subsystem
+/// The amount of time to spend on execution during backing.
+///
+/// Approval subsystem
+/// The amount of time to spend on execution during approval or disputes.
+/// This should be much longer than the backing execution timeout to ensure that in the
+/// absence of extremely large disparities between hardware, blocks that pass backing are
+/// considered executable by approval checkers or dispute participants.
+fn pvf_exec_timeout(executor_params: &ExecutorParams, kind: PvfExecKind) -> Duration {
if let Some(timeout) = executor_params.pvf_exec_timeout(kind) {
return timeout
}
match kind {
- PvfExecTimeoutKind::Backing => DEFAULT_BACKING_EXECUTION_TIMEOUT,
- PvfExecTimeoutKind::Approval => DEFAULT_APPROVAL_EXECUTION_TIMEOUT,
+ PvfExecKind::Backing => DEFAULT_BACKING_EXECUTION_TIMEOUT,
+ PvfExecKind::Approval => DEFAULT_APPROVAL_EXECUTION_TIMEOUT,
}
}
diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs
index cab823e1e637..5e2585d68735 100644
--- a/polkadot/node/core/candidate-validation/src/tests.rs
+++ b/polkadot/node/core/candidate-validation/src/tests.rs
@@ -377,7 +377,7 @@ impl ValidationBackend for MockValidateCandidateBackend {
result
}
- async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result {
+ async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result<(), PrepareError> {
unreachable!()
}
}
@@ -436,7 +436,7 @@ fn candidate_validation_ok_is_ok() {
candidate_receipt,
Arc::new(pov),
ExecutorParams::default(),
- PvfExecTimeoutKind::Backing,
+ PvfExecKind::Backing,
&Default::default(),
))
.unwrap();
@@ -488,7 +488,7 @@ fn candidate_validation_bad_return_is_invalid() {
candidate_receipt,
Arc::new(pov),
ExecutorParams::default(),
- PvfExecTimeoutKind::Backing,
+ PvfExecKind::Backing,
&Default::default(),
))
.unwrap();
@@ -496,23 +496,20 @@ fn candidate_validation_bad_return_is_invalid() {
assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::Timeout));
}
-// Test that we vote valid if we get `AmbiguousWorkerDeath`, retry, and then succeed.
-#[test]
-fn candidate_validation_one_ambiguous_error_is_valid() {
- let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() };
-
- let pov = PoV { block_data: BlockData(vec![1; 32]) };
- let head_data = HeadData(vec![1, 1, 1]);
- let validation_code = ValidationCode(vec![2; 16]);
-
+fn perform_basic_checks_on_valid_candidate(
+ pov: &PoV,
+ validation_code: &ValidationCode,
+ validation_data: &PersistedValidationData,
+ head_data_hash: Hash,
+) -> CandidateDescriptor {
let descriptor = make_valid_candidate_descriptor(
ParaId::from(1_u32),
dummy_hash(),
validation_data.hash(),
pov.hash(),
validation_code.hash(),
- head_data.hash(),
- dummy_hash(),
+ head_data_hash,
+ head_data_hash,
Sr25519Keyring::Alice,
);
@@ -523,6 +520,24 @@ fn candidate_validation_one_ambiguous_error_is_valid() {
&validation_code.hash(),
);
assert!(check.is_ok());
+ descriptor
+}
+
+// Test that we vote valid if we get `AmbiguousWorkerDeath`, retry, and then succeed.
+#[test]
+fn candidate_validation_one_ambiguous_error_is_valid() {
+ let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() };
+
+ let pov = PoV { block_data: BlockData(vec![1; 32]) };
+ let head_data = HeadData(vec![1, 1, 1]);
+ let validation_code = ValidationCode(vec![2; 16]);
+
+ let descriptor = perform_basic_checks_on_valid_candidate(
+ &pov,
+ &validation_code,
+ &validation_data,
+ head_data.hash(),
+ );
let validation_result = WasmValidationResult {
head_data,
@@ -554,7 +569,7 @@ fn candidate_validation_one_ambiguous_error_is_valid() {
candidate_receipt,
Arc::new(pov),
ExecutorParams::default(),
- PvfExecTimeoutKind::Backing,
+ PvfExecKind::Approval,
&Default::default(),
))
.unwrap();
@@ -576,24 +591,12 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() {
let pov = PoV { block_data: BlockData(vec![1; 32]) };
let validation_code = ValidationCode(vec![2; 16]);
- let descriptor = make_valid_candidate_descriptor(
- ParaId::from(1_u32),
- dummy_hash(),
- validation_data.hash(),
- pov.hash(),
- validation_code.hash(),
- dummy_hash(),
- dummy_hash(),
- Sr25519Keyring::Alice,
- );
-
- let check = perform_basic_checks(
- &descriptor,
- validation_data.max_pov_size,
+ let descriptor = perform_basic_checks_on_valid_candidate(
&pov,
- &validation_code.hash(),
+ &validation_code,
+ &validation_data,
+ dummy_hash(),
);
- assert!(check.is_ok());
let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() };
@@ -607,7 +610,7 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() {
candidate_receipt,
Arc::new(pov),
ExecutorParams::default(),
- PvfExecTimeoutKind::Backing,
+ PvfExecKind::Approval,
&Default::default(),
))
.unwrap();
@@ -615,58 +618,79 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() {
assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::ExecutionError(_)));
}
-// Test that we retry on internal errors.
+// Test that we retry for approval on internal errors.
#[test]
fn candidate_validation_retry_internal_errors() {
- let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() };
-
- let pov = PoV { block_data: BlockData(vec![1; 32]) };
- let validation_code = ValidationCode(vec![2; 16]);
-
- let descriptor = make_valid_candidate_descriptor(
- ParaId::from(1_u32),
- dummy_hash(),
- validation_data.hash(),
- pov.hash(),
- validation_code.hash(),
- dummy_hash(),
- dummy_hash(),
- Sr25519Keyring::Alice,
- );
-
- let check = perform_basic_checks(
- &descriptor,
- validation_data.max_pov_size,
- &pov,
- &validation_code.hash(),
+ let v = candidate_validation_retry_on_error_helper(
+ PvfExecKind::Approval,
+ vec![
+ Err(InternalValidationError::HostCommunication("foo".into()).into()),
+ // Throw an AJD error, we should still retry again.
+ Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousJobDeath(
+ "baz".into(),
+ ))),
+ // Throw another internal error.
+ Err(InternalValidationError::HostCommunication("bar".into()).into()),
+ ],
);
- assert!(check.is_ok());
-
- let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() };
+ assert_matches!(v, Err(ValidationFailed(s)) if s.contains("bar"));
+}
- let v = executor::block_on(validate_candidate_exhaustive(
- MockValidateCandidateBackend::with_hardcoded_result_list(vec![
+// Test that we don't retry for backing on internal errors.
+#[test]
+fn candidate_validation_dont_retry_internal_errors() {
+ let v = candidate_validation_retry_on_error_helper(
+ PvfExecKind::Backing,
+ vec![
Err(InternalValidationError::HostCommunication("foo".into()).into()),
// Throw an AWD error, we should still retry again.
Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)),
// Throw another internal error.
Err(InternalValidationError::HostCommunication("bar".into()).into()),
- ]),
- validation_data,
- validation_code,
- candidate_receipt,
- Arc::new(pov),
- ExecutorParams::default(),
- PvfExecTimeoutKind::Backing,
- &Default::default(),
- ));
+ ],
+ );
- assert_matches!(v, Err(ValidationFailed(s)) if s.contains("bar"));
+ assert_matches!(v, Err(ValidationFailed(s)) if s.contains("foo"));
}
-// Test that we retry on panic errors.
+// Test that we retry for approval on panic errors.
#[test]
fn candidate_validation_retry_panic_errors() {
+ let v = candidate_validation_retry_on_error_helper(
+ PvfExecKind::Approval,
+ vec![
+ Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("foo".into()))),
+ // Throw an AWD error, we should still retry again.
+ Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)),
+ // Throw another panic error.
+ Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("bar".into()))),
+ ],
+ );
+
+ assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(s))) if s == "bar".to_string());
+}
+
+// Test that we don't retry for backing on panic errors.
+#[test]
+fn candidate_validation_dont_retry_panic_errors() {
+ let v = candidate_validation_retry_on_error_helper(
+ PvfExecKind::Backing,
+ vec![
+ Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("foo".into()))),
+ // Throw an AWD error, we should still retry again.
+ Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)),
+ // Throw another panic error.
+ Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("bar".into()))),
+ ],
+ );
+
+ assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(s))) if s == "foo".to_string());
+}
+
+fn candidate_validation_retry_on_error_helper(
+ exec_kind: PvfExecKind,
+ mock_errors: Vec>,
+) -> Result {
let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() };
let pov = PoV { block_data: BlockData(vec![1; 32]) };
@@ -693,26 +717,16 @@ fn candidate_validation_retry_panic_errors() {
let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() };
- let v = executor::block_on(validate_candidate_exhaustive(
- MockValidateCandidateBackend::with_hardcoded_result_list(vec![
- Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("foo".into()))),
- // Throw an AJD error, we should still retry again.
- Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousJobDeath(
- "baz".into(),
- ))),
- // Throw another panic error.
- Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("bar".into()))),
- ]),
+ return executor::block_on(validate_candidate_exhaustive(
+ MockValidateCandidateBackend::with_hardcoded_result_list(mock_errors),
validation_data,
validation_code,
candidate_receipt,
Arc::new(pov),
ExecutorParams::default(),
- PvfExecTimeoutKind::Backing,
+ exec_kind,
&Default::default(),
));
-
- assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(s))) if s == "bar".to_string());
}
#[test]
@@ -752,7 +766,7 @@ fn candidate_validation_timeout_is_internal_error() {
candidate_receipt,
Arc::new(pov),
ExecutorParams::default(),
- PvfExecTimeoutKind::Backing,
+ PvfExecKind::Backing,
&Default::default(),
));
@@ -797,7 +811,7 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() {
candidate_receipt,
Arc::new(pov),
ExecutorParams::default(),
- PvfExecTimeoutKind::Backing,
+ PvfExecKind::Backing,
&Default::default(),
))
.unwrap();
@@ -846,7 +860,7 @@ fn candidate_validation_code_mismatch_is_invalid() {
candidate_receipt,
Arc::new(pov),
ExecutorParams::default(),
- PvfExecTimeoutKind::Backing,
+ PvfExecKind::Backing,
&Default::default(),
))
.unwrap();
@@ -903,7 +917,7 @@ fn compressed_code_works() {
candidate_receipt,
Arc::new(pov),
ExecutorParams::default(),
- PvfExecTimeoutKind::Backing,
+ PvfExecKind::Backing,
&Default::default(),
));
@@ -954,7 +968,7 @@ fn code_decompression_failure_is_error() {
candidate_receipt,
Arc::new(pov),
ExecutorParams::default(),
- PvfExecTimeoutKind::Backing,
+ PvfExecKind::Backing,
&Default::default(),
));
@@ -1006,7 +1020,7 @@ fn pov_decompression_failure_is_invalid() {
candidate_receipt,
Arc::new(pov),
ExecutorParams::default(),
- PvfExecTimeoutKind::Backing,
+ PvfExecKind::Backing,
&Default::default(),
));
@@ -1014,11 +1028,11 @@ fn pov_decompression_failure_is_invalid() {
}
struct MockPreCheckBackend {
- result: Result,
+ result: Result<(), PrepareError>,
}
impl MockPreCheckBackend {
- fn with_hardcoded_result(result: Result) -> Self {
+ fn with_hardcoded_result(result: Result<(), PrepareError>) -> Self {
Self { result }
}
}
@@ -1034,7 +1048,7 @@ impl ValidationBackend for MockPreCheckBackend {
unreachable!()
}
- async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result {
+ async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result<(), PrepareError> {
self.result.clone()
}
}
@@ -1051,7 +1065,7 @@ fn precheck_works() {
let (check_fut, check_result) = precheck_pvf(
ctx.sender(),
- MockPreCheckBackend::with_hardcoded_result(Ok(PrepareStats::default())),
+ MockPreCheckBackend::with_hardcoded_result(Ok(())),
relay_parent,
validation_code_hash,
)
@@ -1113,7 +1127,7 @@ fn precheck_invalid_pvf_blob_compression() {
let (check_fut, check_result) = precheck_pvf(
ctx.sender(),
- MockPreCheckBackend::with_hardcoded_result(Ok(PrepareStats::default())),
+ MockPreCheckBackend::with_hardcoded_result(Ok(())),
relay_parent,
validation_code_hash,
)
diff --git a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs
index 90268516e9df..05ea7323af14 100644
--- a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs
+++ b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs
@@ -32,7 +32,7 @@ use polkadot_node_subsystem::{
};
use polkadot_node_subsystem_util::runtime::get_validation_code_by_hash;
use polkadot_primitives::{
- BlockNumber, CandidateHash, CandidateReceipt, Hash, PvfExecTimeoutKind, SessionIndex,
+ BlockNumber, CandidateHash, CandidateReceipt, Hash, PvfExecKind, SessionIndex,
};
use crate::LOG_TARGET;
@@ -386,7 +386,7 @@ async fn participate(
candidate_receipt: req.candidate_receipt().clone(),
pov: available_data.pov,
executor_params: req.executor_params(),
- exec_timeout_kind: PvfExecTimeoutKind::Approval,
+ exec_kind: PvfExecKind::Approval,
response_sender: validation_tx,
})
.await;
diff --git a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs
index 0aa0d7720051..012df51d0cd3 100644
--- a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs
+++ b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs
@@ -115,8 +115,8 @@ pub async fn participation_full_happy_path(
assert_matches!(
ctx_handle.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromExhaustive { candidate_receipt, exec_timeout_kind, response_sender, .. }
- ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => {
+ CandidateValidationMessage::ValidateFromExhaustive { candidate_receipt, exec_kind, response_sender, .. }
+ ) if exec_kind == PvfExecKind::Approval => {
if expected_commitments_hash != candidate_receipt.commitments_hash {
response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap();
} else {
@@ -450,8 +450,8 @@ fn cast_invalid_vote_if_validation_fails_or_is_invalid() {
assert_matches!(
ctx_handle.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromExhaustive { exec_timeout_kind, response_sender, .. }
- ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => {
+ CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. }
+ ) if exec_kind == PvfExecKind::Approval => {
response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))).unwrap();
},
"overseer did not receive candidate validation message",
@@ -487,8 +487,8 @@ fn cast_invalid_vote_if_commitments_dont_match() {
assert_matches!(
ctx_handle.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromExhaustive { exec_timeout_kind, response_sender, .. }
- ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => {
+ CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. }
+ ) if exec_kind == PvfExecKind::Approval => {
response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap();
},
"overseer did not receive candidate validation message",
@@ -524,8 +524,8 @@ fn cast_valid_vote_if_validation_passes() {
assert_matches!(
ctx_handle.recv().await,
AllMessages::CandidateValidation(
- CandidateValidationMessage::ValidateFromExhaustive { exec_timeout_kind, response_sender, .. }
- ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => {
+ CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. }
+ ) if exec_kind == PvfExecKind::Approval => {
response_sender.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap();
},
"overseer did not receive candidate validation message",
diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml
index 3e72ca9e5326..27da484fe4f1 100644
--- a/polkadot/node/core/pvf/Cargo.toml
+++ b/polkadot/node/core/pvf/Cargo.toml
@@ -8,6 +8,7 @@ license.workspace = true
[dependencies]
always-assert = "0.1"
+blake3 = "1.5"
cfg-if = "1.0"
futures = "0.3.21"
futures-timer = "3.0.2"
diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs
index 378374a10b39..c02a0b595da3 100644
--- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs
+++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs
@@ -56,7 +56,7 @@ impl TestHost {
&self,
code: &[u8],
executor_params: ExecutorParams,
- ) -> Result {
+ ) -> Result<(), PrepareError> {
let (result_tx, result_rx) = futures::channel::oneshot::channel();
let code = sp_maybe_compressed_blob::decompress(code, 16 * 1024 * 1024)
diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml
index e3fda06963e3..bfe1be9156fc 100644
--- a/polkadot/node/core/pvf/common/Cargo.toml
+++ b/polkadot/node/core/pvf/common/Cargo.toml
@@ -36,6 +36,9 @@ seccompiler = "0.4.0"
assert_matches = "1.4.0"
tempfile = "3.3.0"
+[build-dependencies]
+substrate-build-script-utils = { path = "../../../../../substrate/utils/build-script-utils" }
+
[features]
# This feature is used to export test code to other crates without putting it in the production build.
test-utils = []
diff --git a/polkadot/node/core/pvf/common/build.rs b/polkadot/node/core/pvf/common/build.rs
new file mode 100644
index 000000000000..5531ad411da8
--- /dev/null
+++ b/polkadot/node/core/pvf/common/build.rs
@@ -0,0 +1,19 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+fn main() {
+ substrate_build_script_utils::generate_wasmtime_version();
+}
diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs
index 34475c481f73..6bf05ece78ef 100644
--- a/polkadot/node/core/pvf/common/src/error.rs
+++ b/polkadot/node/core/pvf/common/src/error.rs
@@ -14,16 +14,24 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
-use crate::prepare::PrepareStats;
+use crate::prepare::{PrepareSuccess, PrepareWorkerSuccess};
use parity_scale_codec::{Decode, Encode};
use std::fmt;
-/// Result of PVF preparation performed by the validation host. Contains stats about the preparation
-/// if successful
-pub type PrepareResult = Result;
+/// Result of PVF preparation from a worker, with checksum of the compiled PVF and stats of the
+/// preparation if successful.
+pub type PrepareWorkerResult = Result;
+
+/// Result of PVF preparation propagated all the way back to the host, with path to the concluded
+/// artifact and stats of the preparation if successful.
+pub type PrepareResult = Result;
+
+/// Result of prechecking PVF performed by the validation host. Contains stats about the preparation
+/// if successful.
+pub type PrecheckResult = Result<(), PrepareError>;
/// An error that occurred during the prepare part of the PVF pipeline.
-// Codec indexes are intended to stabilize pre-encoded payloads (see `OOM_PAYLOAD` below)
+// Codec indexes are intended to stabilize pre-encoded payloads (see `OOM_PAYLOAD`)
#[derive(Debug, Clone, Encode, Decode)]
pub enum PrepareError {
/// During the prevalidation stage of preparation an issue was found with the PVF.
diff --git a/polkadot/node/core/pvf/common/src/lib.rs b/polkadot/node/core/pvf/common/src/lib.rs
index e2211b97d87b..282d2f7c41d0 100644
--- a/polkadot/node/core/pvf/common/src/lib.rs
+++ b/polkadot/node/core/pvf/common/src/lib.rs
@@ -31,6 +31,8 @@ pub use sp_tracing;
const LOG_TARGET: &str = "parachain::pvf-common";
+pub const RUNTIME_VERSION: &str = env!("SUBSTRATE_WASMTIME_VERSION");
+
use std::{
io::{self, Read, Write},
mem,
diff --git a/polkadot/node/core/pvf/common/src/prepare.rs b/polkadot/node/core/pvf/common/src/prepare.rs
index 4436ebe4861e..28ab682ec136 100644
--- a/polkadot/node/core/pvf/common/src/prepare.rs
+++ b/polkadot/node/core/pvf/common/src/prepare.rs
@@ -15,6 +15,25 @@
// along with Polkadot. If not, see .
use parity_scale_codec::{Decode, Encode};
+use std::path::PathBuf;
+
+/// Result from prepare worker if successful.
+#[derive(Debug, Clone, Default, Encode, Decode)]
+pub struct PrepareWorkerSuccess {
+ /// Checksum of the compiled PVF.
+ pub checksum: String,
+ /// Stats of the current preparation run.
+ pub stats: PrepareStats,
+}
+
+/// Result of PVF preparation if successful.
+#[derive(Debug, Clone, Default)]
+pub struct PrepareSuccess {
+ /// Canonical path to the compiled artifact.
+ pub path: PathBuf,
+ /// Stats of the current preparation run.
+ pub stats: PrepareStats,
+}
/// Preparation statistics, including the CPU time and memory taken.
#[derive(Debug, Clone, Default, Encode, Decode)]
diff --git a/polkadot/node/core/pvf/common/src/pvf.rs b/polkadot/node/core/pvf/common/src/pvf.rs
index 0cc86434c195..2d8f6430187b 100644
--- a/polkadot/node/core/pvf/common/src/pvf.rs
+++ b/polkadot/node/core/pvf/common/src/pvf.rs
@@ -115,7 +115,7 @@ impl fmt::Debug for PvfPrepData {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
- "Pvf {{ code, code_hash: {:?}, executor_params: {:?}, prep_timeout: {:?} }}",
+ "Pvf {{ code: [...], code_hash: {:?}, executor_params: {:?}, prep_timeout: {:?} }}",
self.code_hash, self.executor_params, self.prep_timeout
)
}
diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml
index 1cd221533f48..005f2e935117 100644
--- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml
+++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml
@@ -7,6 +7,7 @@ edition.workspace = true
license.workspace = true
[dependencies]
+blake3 = "1.5"
cfg-if = "1.0"
gum = { package = "tracing-gum", path = "../../../gum" }
libc = "0.2.139"
diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs
index 151b54efc2d1..34e6a78c26ae 100644
--- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs
+++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs
@@ -40,10 +40,10 @@ use nix::{
use os_pipe::{self, PipeReader, PipeWriter};
use parity_scale_codec::{Decode, Encode};
use polkadot_node_core_pvf_common::{
- error::{PrepareError, PrepareResult},
+ error::{PrepareError, PrepareWorkerResult},
executor_intf::create_runtime_from_artifact_bytes,
framed_recv_blocking, framed_send_blocking,
- prepare::{MemoryStats, PrepareJobKind, PrepareStats},
+ prepare::{MemoryStats, PrepareJobKind, PrepareStats, PrepareWorkerSuccess},
pvf::PvfPrepData,
worker::{
cpu_time_monitor_loop, run_worker, stringify_panic_payload,
@@ -106,7 +106,7 @@ fn recv_request(stream: &mut UnixStream) -> io::Result {
}
/// Send a worker response.
-fn send_response(stream: &mut UnixStream, result: PrepareResult) -> io::Result<()> {
+fn send_response(stream: &mut UnixStream, result: PrepareWorkerResult) -> io::Result<()> {
framed_send_blocking(stream, &result.encode())
}
@@ -186,8 +186,8 @@ fn end_memory_tracking() -> isize {
///
/// 7. If compilation succeeded, write the compiled artifact into a temporary file.
///
-/// 8. Send the result of preparation back to the host. If any error occurred in the above steps, we
-/// send that in the `PrepareResult`.
+/// 8. Send the result of preparation back to the host, including the checksum of the artifact. If
+/// any error occurred in the above steps, we send that in the `PrepareWorkerResult`.
pub fn worker_entrypoint(
socket_path: PathBuf,
worker_dir_path: PathBuf,
@@ -439,11 +439,11 @@ fn handle_child_process(
Err(err) => Err(err),
Ok(ok) => {
cfg_if::cfg_if! {
- if #[cfg(target_os = "linux")] {
- let (artifact, max_rss) = ok;
- } else {
- let artifact = ok;
- }
+ if #[cfg(target_os = "linux")] {
+ let (artifact, max_rss) = ok;
+ } else {
+ let artifact = ok;
+ }
}
// Stop the memory stats worker and get its observed memory stats.
@@ -511,7 +511,7 @@ fn handle_parent_process(
worker_pid: u32,
usage_before: Usage,
timeout: Duration,
-) -> Result {
+) -> Result {
// Read from the child. Don't decode unless the process exited normally, which we check later.
let mut received_data = Vec::new();
pipe_read
@@ -554,7 +554,7 @@ fn handle_parent_process(
match result {
Err(err) => Err(err),
- Ok(response) => {
+ Ok(JobResponse { artifact, memory_stats }) => {
// The exit status should have been zero if no error occurred.
if exit_status != 0 {
return Err(PrepareError::JobError(format!(
@@ -577,13 +577,14 @@ fn handle_parent_process(
temp_artifact_dest.display(),
);
// Write to the temp file created by the host.
- if let Err(err) = fs::write(&temp_artifact_dest, &response.artifact) {
+ if let Err(err) = fs::write(&temp_artifact_dest, &artifact) {
return Err(PrepareError::IoErr(err.to_string()))
};
- Ok(PrepareStats {
- memory_stats: response.memory_stats,
- cpu_time_elapsed: cpu_tv,
+ let checksum = blake3::hash(&artifact.as_ref()).to_hex().to_string();
+ Ok(PrepareWorkerSuccess {
+ checksum,
+ stats: PrepareStats { memory_stats, cpu_time_elapsed: cpu_tv },
})
},
}
@@ -657,13 +658,13 @@ fn error_from_errno(context: &'static str, errno: Errno) -> PrepareError {
type JobResult = Result;
-/// Pre-encoded length-prefixed `Result::Err(PrepareError::OutOfMemory)`
+/// Pre-encoded length-prefixed `JobResult::Err(PrepareError::OutOfMemory)`
const OOM_PAYLOAD: &[u8] = b"\x02\x00\x00\x00\x00\x00\x00\x00\x01\x08";
#[test]
fn pre_encoded_payloads() {
// NOTE: This must match the type of `response` in `send_child_response`.
- let oom_unencoded: JobResult = Result::Err(PrepareError::OutOfMemory);
+ let oom_unencoded: JobResult = JobResult::Err(PrepareError::OutOfMemory);
let oom_encoded = oom_unencoded.encode();
// The payload is prefixed with its length in `framed_send`.
let mut oom_payload = oom_encoded.len().to_le_bytes().to_vec();
diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs
index dd83f76494ed..53085eade3cb 100644
--- a/polkadot/node/core/pvf/src/artifacts.rs
+++ b/polkadot/node/core/pvf/src/artifacts.rs
@@ -16,10 +16,10 @@
//! PVF artifacts (final compiled code blobs).
//!
-//! # Lifecycle of an artifact
+//! # Lifecycle of an artifact
//!
-//! 1. During node start-up, the artifacts cache is cleaned up. This means that all local artifacts
-//! stored on-disk are cleared, and we start with an empty [`Artifacts`] table.
+//! 1. During node start-up, we will check the cached artifacts, if any. The stale and corrupted
+//! ones are pruned. The valid ones are registered in the [`Artifacts`] table.
//!
//! 2. In order to be executed, a PVF should be prepared first. This means that artifacts should
//! have an [`ArtifactState::Prepared`] entry for that artifact in the table. If not, the
@@ -55,18 +55,29 @@
//! older by a predefined parameter. This process is run very rarely (say, once a day). Once the
//! artifact is expired it is removed from disk eagerly atomically.
-use crate::host::PrepareResultSender;
+use crate::{host::PrecheckResultSender, LOG_TARGET};
use always_assert::always;
-use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData};
+use polkadot_core_primitives::Hash;
+use polkadot_node_core_pvf_common::{
+ error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData, RUNTIME_VERSION,
+};
use polkadot_node_primitives::NODE_VERSION;
use polkadot_parachain_primitives::primitives::ValidationCodeHash;
use polkadot_primitives::ExecutorParamsHash;
use std::{
collections::HashMap,
path::{Path, PathBuf},
+ str::FromStr as _,
time::{Duration, SystemTime},
};
+const RUNTIME_PREFIX: &str = "wasmtime_v";
+const NODE_PREFIX: &str = "polkadot_v";
+
+fn artifact_prefix() -> String {
+ format!("{}{}_{}{}", RUNTIME_PREFIX, RUNTIME_VERSION, NODE_PREFIX, NODE_VERSION)
+}
+
/// Identifier of an artifact. Encodes a code hash of the PVF and a hash of executor parameter set.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ArtifactId {
@@ -75,9 +86,6 @@ pub struct ArtifactId {
}
impl ArtifactId {
- const PREFIX: &'static str = "wasmtime_";
- const NODE_VERSION_PREFIX: &'static str = "polkadot_v";
-
/// Creates a new artifact ID with the given hash.
pub fn new(code_hash: ValidationCodeHash, executor_params_hash: ExecutorParamsHash) -> Self {
Self { code_hash, executor_params_hash }
@@ -88,38 +96,34 @@ impl ArtifactId {
Self::new(pvf.code_hash(), pvf.executor_params().hash())
}
- /// Tries to recover the artifact id from the given file name.
- #[cfg(test)]
- pub fn from_file_name(file_name: &str) -> Option {
- use polkadot_core_primitives::Hash;
- use std::str::FromStr as _;
-
- let file_name =
- file_name.strip_prefix(Self::PREFIX)?.strip_prefix(Self::NODE_VERSION_PREFIX)?;
-
- // [ node version | code hash | param hash ]
- let parts: Vec<&str> = file_name.split('_').collect();
- let (_node_ver, code_hash_str, executor_params_hash_str) = (parts[0], parts[1], parts[2]);
-
- let code_hash = Hash::from_str(code_hash_str).ok()?.into();
- let executor_params_hash =
- ExecutorParamsHash::from_hash(Hash::from_str(executor_params_hash_str).ok()?);
-
- Some(Self { code_hash, executor_params_hash })
- }
-
- /// Returns the expected path to this artifact given the root of the cache.
- pub fn path(&self, cache_path: &Path) -> PathBuf {
+ /// Returns the canonical path to the concluded artifact.
+ pub(crate) fn path(&self, cache_path: &Path, checksum: &str) -> PathBuf {
let file_name = format!(
- "{}{}{}_{:#x}_{:#x}",
- Self::PREFIX,
- Self::NODE_VERSION_PREFIX,
- NODE_VERSION,
+ "{}_{:#x}_{:#x}_0x{}",
+ artifact_prefix(),
self.code_hash,
- self.executor_params_hash
+ self.executor_params_hash,
+ checksum
);
cache_path.join(file_name)
}
+
+ /// Tries to recover the artifact id from the given file name.
+ /// Return `None` if the given file name is invalid.
+ /// VALID_NAME := _ _ _
+ fn from_file_name(file_name: &str) -> Option {
+ let file_name = file_name.strip_prefix(&artifact_prefix())?.strip_prefix('_')?;
+ let parts: Vec<&str> = file_name.split('_').collect();
+
+ if let [code_hash, param_hash, _checksum] = parts[..] {
+ let code_hash = Hash::from_str(code_hash).ok()?.into();
+ let executor_params_hash =
+ ExecutorParamsHash::from_hash(Hash::from_str(param_hash).ok()?);
+ return Some(Self { code_hash, executor_params_hash })
+ }
+
+ None
+ }
}
/// A bundle of the artifact ID and the path.
@@ -136,8 +140,8 @@ pub struct ArtifactPathId {
}
impl ArtifactPathId {
- pub(crate) fn new(artifact_id: ArtifactId, cache_path: &Path) -> Self {
- Self { path: artifact_id.path(cache_path), id: artifact_id }
+ pub(crate) fn new(artifact_id: ArtifactId, path: &Path) -> Self {
+ Self { id: artifact_id, path: path.to_owned() }
}
}
@@ -148,6 +152,8 @@ pub enum ArtifactState {
/// That means that the artifact should be accessible through the path obtained by the artifact
/// id (unless, it was removed externally).
Prepared {
+ /// The path of the compiled artifact.
+ path: PathBuf,
/// The time when the artifact was last needed.
///
/// This is updated when we get the heads up for this artifact or when we just discover
@@ -159,7 +165,7 @@ pub enum ArtifactState {
/// A task to prepare this artifact is scheduled.
Preparing {
/// List of result senders that are waiting for a response.
- waiting_for_response: Vec,
+ waiting_for_response: Vec,
/// The number of times this artifact has failed to prepare.
num_failures: u32,
},
@@ -177,32 +183,148 @@ pub enum ArtifactState {
/// A container of all known artifact ids and their states.
pub struct Artifacts {
- artifacts: HashMap,
+ inner: HashMap,
}
impl Artifacts {
- /// Initialize a blank cache at the given path. This will clear everything present at the
- /// given path, to be populated over time.
- ///
- /// The recognized artifacts will be filled in the table and unrecognized will be removed.
- pub async fn new(cache_path: &Path) -> Self {
- // First delete the entire cache. This includes artifacts and any leftover worker dirs (see
- // [`WorkerDir`]). Nodes are long-running so this should populate shortly.
- let _ = tokio::fs::remove_dir_all(cache_path).await;
+ #[cfg(test)]
+ pub(crate) fn empty() -> Self {
+ Self { inner: HashMap::new() }
+ }
+
+ #[cfg(test)]
+ pub(crate) fn len(&self) -> usize {
+ self.inner.len()
+ }
+
+ /// Create an empty table and populate it with valid artifacts as [`ArtifactState::Prepared`],
+ /// if any. The existing caches will be checked by their file name to determine whether they are
+ /// valid, e.g., matching the current node version. The ones deemed invalid will be pruned.
+ pub async fn new_and_prune(cache_path: &Path) -> Self {
+ let mut artifacts = Self { inner: HashMap::new() };
+ artifacts.insert_and_prune(cache_path).await;
+ artifacts
+ }
+
+ async fn insert_and_prune(&mut self, cache_path: &Path) {
+ async fn is_corrupted(path: &Path) -> bool {
+ let checksum = match tokio::fs::read(path).await {
+ Ok(bytes) => blake3::hash(&bytes),
+ Err(err) => {
+ // just remove the file if we cannot read it
+ gum::warn!(
+ target: LOG_TARGET,
+ ?err,
+ "unable to read artifact {:?} when checking integrity, removing...",
+ path,
+ );
+ return true
+ },
+ };
+
+ if let Some(file_name) = path.file_name() {
+ if let Some(file_name) = file_name.to_str() {
+ return !file_name.ends_with(checksum.to_hex().as_str())
+ }
+ }
+ true
+ }
+
+ // Insert the entry into the artifacts table if it is valid.
+ // Otherwise, prune it.
+ async fn insert_or_prune(
+ artifacts: &mut Artifacts,
+ entry: &tokio::fs::DirEntry,
+ cache_path: &Path,
+ ) {
+ let file_type = entry.file_type().await;
+ let file_name = entry.file_name();
+
+ match file_type {
+ Ok(file_type) =>
+ if !file_type.is_file() {
+ return
+ },
+ Err(err) => {
+ gum::warn!(
+ target: LOG_TARGET,
+ ?err,
+ "unable to get file type for {:?}",
+ file_name,
+ );
+ return
+ },
+ }
+
+ if let Some(file_name) = file_name.to_str() {
+ let id = ArtifactId::from_file_name(file_name);
+ let path = cache_path.join(file_name);
+
+ if id.is_none() || is_corrupted(&path).await {
+ gum::warn!(
+ target: LOG_TARGET,
+ "discarding invalid artifact {:?}",
+ &path,
+ );
+ let _ = tokio::fs::remove_file(&path).await;
+ return
+ }
+
+ if let Some(id) = id {
+ gum::debug!(
+ target: LOG_TARGET,
+ "reusing existing {:?} for node version v{}",
+ &path,
+ NODE_VERSION,
+ );
+ artifacts.insert_prepared(id, path, SystemTime::now(), Default::default());
+ }
+ } else {
+ gum::warn!(
+ target: LOG_TARGET,
+ "non-Unicode file name {:?} found in {:?}",
+ file_name,
+ cache_path,
+ );
+ }
+ }
+
// Make sure that the cache path directory and all its parents are created.
let _ = tokio::fs::create_dir_all(cache_path).await;
- Self { artifacts: HashMap::new() }
- }
+ let mut dir = match tokio::fs::read_dir(cache_path).await {
+ Ok(dir) => dir,
+ Err(err) => {
+ gum::error!(
+ target: LOG_TARGET,
+ ?err,
+ "failed to read dir {:?}",
+ cache_path,
+ );
+ return
+ },
+ };
- #[cfg(test)]
- pub(crate) fn empty() -> Self {
- Self { artifacts: HashMap::new() }
+ loop {
+ match dir.next_entry().await {
+ Ok(Some(entry)) => insert_or_prune(self, &entry, cache_path).await,
+ Ok(None) => break,
+ Err(err) => {
+ gum::warn!(
+ target: LOG_TARGET,
+ ?err,
+ "error processing artifacts in {:?}",
+ cache_path,
+ );
+ break
+ },
+ }
+ }
}
/// Returns the state of the given artifact by its ID.
pub fn artifact_state_mut(&mut self, artifact_id: &ArtifactId) -> Option<&mut ArtifactState> {
- self.artifacts.get_mut(artifact_id)
+ self.inner.get_mut(artifact_id)
}
/// Inform the table about the artifact with the given ID. The state will be set to "preparing".
@@ -212,53 +334,52 @@ impl Artifacts {
pub fn insert_preparing(
&mut self,
artifact_id: ArtifactId,
- waiting_for_response: Vec,
+ waiting_for_response: Vec,
) {
// See the precondition.
always!(self
- .artifacts
+ .inner
.insert(artifact_id, ArtifactState::Preparing { waiting_for_response, num_failures: 0 })
.is_none());
}
/// Insert an artifact with the given ID as "prepared".
///
- /// This function must be used only for brand-new artifacts and should never be used for
- /// replacing existing ones.
- #[cfg(test)]
- pub fn insert_prepared(
+ /// This function should only be used to build the artifact table at startup with valid
+ /// artifact caches.
+ pub(crate) fn insert_prepared(
&mut self,
artifact_id: ArtifactId,
+ path: PathBuf,
last_time_needed: SystemTime,
prepare_stats: PrepareStats,
) {
// See the precondition.
always!(self
- .artifacts
- .insert(artifact_id, ArtifactState::Prepared { last_time_needed, prepare_stats })
+ .inner
+ .insert(artifact_id, ArtifactState::Prepared { path, last_time_needed, prepare_stats })
.is_none());
}
- /// Remove and retrieve the artifacts from the table that are older than the supplied
- /// Time-To-Live.
- pub fn prune(&mut self, artifact_ttl: Duration) -> Vec {
+ /// Remove artifacts older than the given TTL and return id and path of the removed ones.
+ pub fn prune(&mut self, artifact_ttl: Duration) -> Vec<(ArtifactId, PathBuf)> {
let now = SystemTime::now();
let mut to_remove = vec![];
- for (k, v) in self.artifacts.iter() {
- if let ArtifactState::Prepared { last_time_needed, .. } = *v {
+ for (k, v) in self.inner.iter() {
+ if let ArtifactState::Prepared { last_time_needed, ref path, .. } = *v {
if now
.duration_since(last_time_needed)
.map(|age| age > artifact_ttl)
.unwrap_or(false)
{
- to_remove.push(k.clone());
+ to_remove.push((k.clone(), path.clone()));
}
}
}
for artifact in &to_remove {
- self.artifacts.remove(artifact);
+ self.inner.remove(&artifact.0);
}
to_remove
@@ -267,13 +388,72 @@ impl Artifacts {
#[cfg(test)]
mod tests {
- use super::{ArtifactId, Artifacts, NODE_VERSION};
+ use super::{artifact_prefix as prefix, ArtifactId, Artifacts, NODE_VERSION, RUNTIME_VERSION};
use polkadot_primitives::ExecutorParamsHash;
+ use rand::Rng;
use sp_core::H256;
- use std::{path::Path, str::FromStr};
+ use std::{
+ fs,
+ io::Write,
+ path::{Path, PathBuf},
+ str::FromStr,
+ };
+
+ fn rand_hash(len: usize) -> String {
+ let mut rng = rand::thread_rng();
+ let hex: Vec<_> = "0123456789abcdef".chars().collect();
+ (0..len).map(|_| hex[rng.gen_range(0..hex.len())]).collect()
+ }
+
+ fn file_name(code_hash: &str, param_hash: &str, checksum: &str) -> String {
+ format!("{}_0x{}_0x{}_0x{}", prefix(), code_hash, param_hash, checksum)
+ }
- fn file_name(code_hash: &str, param_hash: &str) -> String {
- format!("wasmtime_polkadot_v{}_0x{}_0x{}", NODE_VERSION, code_hash, param_hash)
+ fn create_artifact(
+ dir: impl AsRef,
+ prefix: &str,
+ code_hash: impl AsRef,
+ params_hash: impl AsRef,
+ ) -> (PathBuf, String) {
+ fn artifact_path_without_checksum(
+ dir: impl AsRef,
+ prefix: &str,
+ code_hash: impl AsRef,
+ params_hash: impl AsRef,
+ ) -> PathBuf {
+ let mut path = dir.as_ref().to_path_buf();
+ let file_name =
+ format!("{}_0x{}_0x{}", prefix, code_hash.as_ref(), params_hash.as_ref(),);
+ path.push(file_name);
+ path
+ }
+
+ let (code_hash, params_hash) = (code_hash.as_ref(), params_hash.as_ref());
+ let path = artifact_path_without_checksum(dir, prefix, code_hash, params_hash);
+ let mut file = fs::File::create(&path).unwrap();
+
+ let content = format!("{}{}", code_hash, params_hash).into_bytes();
+ file.write_all(&content).unwrap();
+ let checksum = blake3::hash(&content).to_hex().to_string();
+
+ (path, checksum)
+ }
+
+ fn create_rand_artifact(dir: impl AsRef, prefix: &str) -> (PathBuf, String) {
+ create_artifact(dir, prefix, rand_hash(64), rand_hash(64))
+ }
+
+ fn concluded_path(path: impl AsRef, checksum: &str) -> PathBuf {
+ let path = path.as_ref();
+ let mut file_name = path.file_name().unwrap().to_os_string();
+ file_name.push("_0x");
+ file_name.push(checksum);
+ path.with_file_name(file_name)
+ }
+
+ #[test]
+ fn artifact_prefix() {
+ assert_eq!(prefix(), format!("wasmtime_v{}_polkadot_v{}", RUNTIME_VERSION, NODE_VERSION));
}
#[test]
@@ -284,6 +464,7 @@ mod tests {
let file_name = file_name(
"0022800000000000000000000000000000000000000000000000000000000000",
"0033900000000000000000000000000000000000000000000000000000000000",
+ "00000000000000000000000000000000",
);
assert_eq!(
@@ -305,40 +486,54 @@ mod tests {
let dir = Path::new("/test");
let code_hash = "1234567890123456789012345678901234567890123456789012345678901234";
let params_hash = "4321098765432109876543210987654321098765432109876543210987654321";
- let file_name = file_name(code_hash, params_hash);
+ let checksum = "34567890123456789012345678901234";
+ let file_name = file_name(code_hash, params_hash, checksum);
let code_hash = H256::from_str(code_hash).unwrap();
let params_hash = H256::from_str(params_hash).unwrap();
+ let path = ArtifactId::new(code_hash.into(), ExecutorParamsHash::from_hash(params_hash))
+ .path(dir, checksum);
- assert_eq!(
- ArtifactId::new(code_hash.into(), ExecutorParamsHash::from_hash(params_hash))
- .path(dir)
- .to_str(),
- Some(format!("/test/{}", file_name).as_str()),
- );
+ assert_eq!(path.to_str().unwrap(), format!("/test/{}", file_name));
}
#[tokio::test]
- async fn artifacts_removes_cache_on_startup() {
- let fake_cache_path = crate::worker_intf::tmppath("test-cache").await.unwrap();
- let fake_artifact_path = {
- let mut p = fake_cache_path.clone();
- p.push("wasmtime_0x1234567890123456789012345678901234567890123456789012345678901234");
- p
- };
+ async fn remove_stale_cache_on_startup() {
+ let cache_dir = crate::worker_intf::tmppath("test-cache").await.unwrap();
+ fs::create_dir_all(&cache_dir).unwrap();
+
+ // invalid prefix
+ create_rand_artifact(&cache_dir, "");
+ create_rand_artifact(&cache_dir, "wasmtime_polkadot_v");
+ create_rand_artifact(&cache_dir, "wasmtime_v8.0.0_polkadot_v1.0.0");
+
+ let prefix = prefix();
+
+ // no checksum
+ create_rand_artifact(&cache_dir, &prefix);
+
+ // invalid hashes
+ let (path, checksum) = create_artifact(&cache_dir, &prefix, "000", "000001");
+ let new_path = concluded_path(&path, &checksum);
+ fs::rename(&path, &new_path).unwrap();
- // create a tmp cache with 1 artifact.
+ // checksum tampered
+ let (path, checksum) = create_rand_artifact(&cache_dir, &prefix);
+ let new_path = concluded_path(&path, checksum.chars().rev().collect::().as_str());
+ fs::rename(&path, &new_path).unwrap();
- std::fs::create_dir_all(&fake_cache_path).unwrap();
- std::fs::File::create(fake_artifact_path).unwrap();
+ // valid
+ let (path, checksum) = create_rand_artifact(&cache_dir, &prefix);
+ let new_path = concluded_path(&path, &checksum);
+ fs::rename(&path, &new_path).unwrap();
- // this should remove it and re-create.
+ assert_eq!(fs::read_dir(&cache_dir).unwrap().count(), 7);
- let p = &fake_cache_path;
- Artifacts::new(p).await;
+ let artifacts = Artifacts::new_and_prune(&cache_dir).await;
- assert_eq!(std::fs::read_dir(&fake_cache_path).unwrap().count(), 0);
+ assert_eq!(fs::read_dir(&cache_dir).unwrap().count(), 1);
+ assert_eq!(artifacts.len(), 1);
- std::fs::remove_dir_all(fake_cache_path).unwrap();
+ fs::remove_dir_all(cache_dir).unwrap();
}
}
diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs
index 5919b9ba32c9..f67934e4171c 100644
--- a/polkadot/node/core/pvf/src/host.rs
+++ b/polkadot/node/core/pvf/src/host.rs
@@ -32,14 +32,15 @@ use futures::{
Future, FutureExt, SinkExt, StreamExt,
};
use polkadot_node_core_pvf_common::{
- error::{PrepareError, PrepareResult},
+ error::{PrecheckResult, PrepareError},
+ prepare::PrepareSuccess,
pvf::PvfPrepData,
};
use polkadot_node_subsystem::SubsystemResult;
use polkadot_parachain_primitives::primitives::ValidationResult;
use std::{
collections::HashMap,
- path::{Path, PathBuf},
+ path::PathBuf,
time::{Duration, SystemTime},
};
@@ -63,7 +64,7 @@ pub const EXECUTE_BINARY_NAME: &str = "polkadot-execute-worker";
pub(crate) type ResultSender = oneshot::Sender>;
/// Transmission end used for sending the PVF preparation result.
-pub(crate) type PrepareResultSender = oneshot::Sender;
+pub(crate) type PrecheckResultSender = oneshot::Sender;
/// A handle to the async process serving the validation host requests.
#[derive(Clone)]
@@ -83,7 +84,7 @@ impl ValidationHost {
pub async fn precheck_pvf(
&mut self,
pvf: PvfPrepData,
- result_tx: PrepareResultSender,
+ result_tx: PrecheckResultSender,
) -> Result<(), String> {
self.to_host_tx
.send(ToHost::PrecheckPvf { pvf, result_tx })
@@ -133,7 +134,7 @@ impl ValidationHost {
}
enum ToHost {
- PrecheckPvf { pvf: PvfPrepData, result_tx: PrepareResultSender },
+ PrecheckPvf { pvf: PvfPrepData, result_tx: PrecheckResultSender },
ExecutePvf(ExecutePvfInputs),
HeadsUp { active_pvfs: Vec },
}
@@ -249,10 +250,9 @@ pub async fn start(
let run_sweeper = sweeper_task(to_sweeper_rx);
let run_host = async move {
- let artifacts = Artifacts::new(&config.cache_path).await;
+ let artifacts = Artifacts::new_and_prune(&config.cache_path).await;
run(Inner {
- cache_path: config.cache_path,
cleanup_pulse_interval: Duration::from_secs(3600),
artifact_ttl: Duration::from_secs(3600 * 24),
artifacts,
@@ -296,7 +296,6 @@ impl AwaitingPrepare {
}
struct Inner {
- cache_path: PathBuf,
cleanup_pulse_interval: Duration,
artifact_ttl: Duration,
artifacts: Artifacts,
@@ -317,7 +316,6 @@ struct Fatal;
async fn run(
Inner {
- cache_path,
cleanup_pulse_interval,
artifact_ttl,
mut artifacts,
@@ -361,7 +359,6 @@ async fn run(
// will notice it.
break_if_fatal!(handle_cleanup_pulse(
- &cache_path,
&mut to_sweeper_tx,
&mut artifacts,
artifact_ttl,
@@ -380,7 +377,6 @@ async fn run(
// If the artifact failed before, it could be re-scheduled for preparation here if
// the preparation failure cooldown has elapsed.
break_if_fatal!(handle_to_host(
- &cache_path,
&mut artifacts,
&mut to_prepare_queue_tx,
&mut to_execute_queue_tx,
@@ -402,7 +398,6 @@ async fn run(
// We could be eager in terms of reporting and plumb the result from the preparation
// worker but we don't for the sake of simplicity.
break_if_fatal!(handle_prepare_done(
- &cache_path,
&mut artifacts,
&mut to_execute_queue_tx,
&mut awaiting_prepare,
@@ -414,7 +409,6 @@ async fn run(
}
async fn handle_to_host(
- cache_path: &Path,
artifacts: &mut Artifacts,
prepare_queue: &mut mpsc::Sender,
execute_queue: &mut mpsc::Sender,
@@ -426,15 +420,8 @@ async fn handle_to_host(
handle_precheck_pvf(artifacts, prepare_queue, pvf, result_tx).await?;
},
ToHost::ExecutePvf(inputs) => {
- handle_execute_pvf(
- cache_path,
- artifacts,
- prepare_queue,
- execute_queue,
- awaiting_prepare,
- inputs,
- )
- .await?;
+ handle_execute_pvf(artifacts, prepare_queue, execute_queue, awaiting_prepare, inputs)
+ .await?;
},
ToHost::HeadsUp { active_pvfs } =>
handle_heads_up(artifacts, prepare_queue, active_pvfs).await?,
@@ -454,21 +441,21 @@ async fn handle_precheck_pvf(
artifacts: &mut Artifacts,
prepare_queue: &mut mpsc::Sender,
pvf: PvfPrepData,
- result_sender: PrepareResultSender,
+ result_sender: PrecheckResultSender,
) -> Result<(), Fatal> {
let artifact_id = ArtifactId::from_pvf_prep_data(&pvf);
if let Some(state) = artifacts.artifact_state_mut(&artifact_id) {
match state {
- ArtifactState::Prepared { last_time_needed, prepare_stats } => {
+ ArtifactState::Prepared { last_time_needed, .. } => {
*last_time_needed = SystemTime::now();
- let _ = result_sender.send(Ok(prepare_stats.clone()));
+ let _ = result_sender.send(Ok(()));
},
ArtifactState::Preparing { waiting_for_response, num_failures: _ } =>
waiting_for_response.push(result_sender),
ArtifactState::FailedToProcess { error, .. } => {
// Do not retry an artifact that previously failed preparation.
- let _ = result_sender.send(PrepareResult::Err(error.clone()));
+ let _ = result_sender.send(PrecheckResult::Err(error.clone()));
},
}
} else {
@@ -491,7 +478,6 @@ async fn handle_precheck_pvf(
/// When preparing for execution, we use a more lenient timeout ([`LENIENT_PREPARATION_TIMEOUT`])
/// than when prechecking.
async fn handle_execute_pvf(
- cache_path: &Path,
artifacts: &mut Artifacts,
prepare_queue: &mut mpsc::Sender,
execute_queue: &mut mpsc::Sender,
@@ -504,8 +490,8 @@ async fn handle_execute_pvf(
if let Some(state) = artifacts.artifact_state_mut(&artifact_id) {
match state {
- ArtifactState::Prepared { last_time_needed, .. } => {
- let file_metadata = std::fs::metadata(artifact_id.path(cache_path));
+ ArtifactState::Prepared { ref path, last_time_needed, .. } => {
+ let file_metadata = std::fs::metadata(path);
if file_metadata.is_ok() {
*last_time_needed = SystemTime::now();
@@ -514,7 +500,7 @@ async fn handle_execute_pvf(
send_execute(
execute_queue,
execute::ToQueue::Enqueue {
- artifact: ArtifactPathId::new(artifact_id, cache_path),
+ artifact: ArtifactPathId::new(artifact_id, path),
pending_execution_request: PendingExecutionRequest {
exec_timeout,
params,
@@ -677,7 +663,6 @@ async fn handle_heads_up(
}
async fn handle_prepare_done(
- cache_path: &Path,
artifacts: &mut Artifacts,
execute_queue: &mut mpsc::Sender,
awaiting_prepare: &mut AwaitingPrepare,
@@ -718,7 +703,8 @@ async fn handle_prepare_done(
state
{
for result_sender in waiting_for_response.drain(..) {
- let _ = result_sender.send(result.clone());
+ let result = result.clone().map(|_| ());
+ let _ = result_sender.send(result);
}
num_failures
} else {
@@ -738,16 +724,18 @@ async fn handle_prepare_done(
continue
}
- // Don't send failed artifacts to the execution's queue.
- if let Err(ref error) = result {
- let _ = result_tx.send(Err(ValidationError::from(error.clone())));
- continue
- }
+ let path = match &result {
+ Ok(success) => success.path.clone(),
+ Err(error) => {
+ let _ = result_tx.send(Err(ValidationError::from(error.clone())));
+ continue
+ },
+ };
send_execute(
execute_queue,
execute::ToQueue::Enqueue {
- artifact: ArtifactPathId::new(artifact_id.clone(), cache_path),
+ artifact: ArtifactPathId::new(artifact_id.clone(), &path),
pending_execution_request: PendingExecutionRequest {
exec_timeout,
params,
@@ -760,8 +748,8 @@ async fn handle_prepare_done(
}
*state = match result {
- Ok(prepare_stats) =>
- ArtifactState::Prepared { last_time_needed: SystemTime::now(), prepare_stats },
+ Ok(PrepareSuccess { path, stats: prepare_stats }) =>
+ ArtifactState::Prepared { path, last_time_needed: SystemTime::now(), prepare_stats },
Err(error) => {
let last_time_failed = SystemTime::now();
let num_failures = *num_failures + 1;
@@ -814,7 +802,6 @@ async fn enqueue_prepare_for_execute(
}
async fn handle_cleanup_pulse(
- cache_path: &Path,
sweeper_tx: &mut mpsc::Sender,
artifacts: &mut Artifacts,
artifact_ttl: Duration,
@@ -825,14 +812,13 @@ async fn handle_cleanup_pulse(
"PVF pruning: {} artifacts reached their end of life",
to_remove.len(),
);
- for artifact_id in to_remove {
+ for (artifact_id, path) in to_remove {
gum::debug!(
target: LOG_TARGET,
validation_code_hash = ?artifact_id.code_hash,
"pruning artifact",
);
- let artifact_path = artifact_id.path(cache_path);
- sweeper_tx.send(artifact_path).await.map_err(|_| Fatal)?;
+ sweeper_tx.send(path).await.map_err(|_| Fatal)?;
}
Ok(())
@@ -890,7 +876,11 @@ pub(crate) mod tests {
use crate::InvalidCandidate;
use assert_matches::assert_matches;
use futures::future::BoxFuture;
- use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats};
+ use polkadot_node_core_pvf_common::{
+ error::PrepareError,
+ prepare::{PrepareStats, PrepareSuccess},
+ };
+ use sp_core::hexdisplay::AsBytesRef;
const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3);
pub(crate) const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(30);
@@ -910,12 +900,16 @@ pub(crate) mod tests {
}
/// Creates a new PVF which artifact id can be uniquely identified by the given number.
- fn artifact_id(descriminator: u32) -> ArtifactId {
- ArtifactId::from_pvf_prep_data(&PvfPrepData::from_discriminator(descriminator))
+ fn artifact_id(discriminator: u32) -> ArtifactId {
+ ArtifactId::from_pvf_prep_data(&PvfPrepData::from_discriminator(discriminator))
}
- fn artifact_path(descriminator: u32) -> PathBuf {
- artifact_id(descriminator).path(&PathBuf::from(std::env::temp_dir())).to_owned()
+ fn artifact_path(discriminator: u32) -> PathBuf {
+ let pvf = PvfPrepData::from_discriminator(discriminator);
+ let checksum = blake3::hash(pvf.code().as_bytes_ref());
+ artifact_id(discriminator)
+ .path(&PathBuf::from(std::env::temp_dir()), checksum.to_hex().as_str())
+ .to_owned()
}
struct Builder {
@@ -953,8 +947,6 @@ pub(crate) mod tests {
impl Test {
fn new(Builder { cleanup_pulse_interval, artifact_ttl, artifacts }: Builder) -> Self {
- let cache_path = PathBuf::from(std::env::temp_dir());
-
let (to_host_tx, to_host_rx) = mpsc::channel(10);
let (to_prepare_queue_tx, to_prepare_queue_rx) = mpsc::channel(10);
let (from_prepare_queue_tx, from_prepare_queue_rx) = mpsc::unbounded();
@@ -962,7 +954,6 @@ pub(crate) mod tests {
let (to_sweeper_tx, to_sweeper_rx) = mpsc::channel(10);
let run = run(Inner {
- cache_path,
cleanup_pulse_interval,
artifact_ttl,
artifacts,
@@ -1111,12 +1102,18 @@ pub(crate) mod tests {
let mut builder = Builder::default();
builder.cleanup_pulse_interval = Duration::from_millis(100);
builder.artifact_ttl = Duration::from_millis(500);
- builder
- .artifacts
- .insert_prepared(artifact_id(1), mock_now, PrepareStats::default());
- builder
- .artifacts
- .insert_prepared(artifact_id(2), mock_now, PrepareStats::default());
+ builder.artifacts.insert_prepared(
+ artifact_id(1),
+ artifact_path(1),
+ mock_now,
+ PrepareStats::default(),
+ );
+ builder.artifacts.insert_prepared(
+ artifact_id(2),
+ artifact_path(2),
+ mock_now,
+ PrepareStats::default(),
+ );
let mut test = builder.build();
let mut host = test.host_handle();
@@ -1188,7 +1185,7 @@ pub(crate) mod tests {
test.from_prepare_queue_tx
.send(prepare::FromQueue {
artifact_id: artifact_id(1),
- result: Ok(PrepareStats::default()),
+ result: Ok(PrepareSuccess::default()),
})
.await
.unwrap();
@@ -1204,7 +1201,7 @@ pub(crate) mod tests {
test.from_prepare_queue_tx
.send(prepare::FromQueue {
artifact_id: artifact_id(2),
- result: Ok(PrepareStats::default()),
+ result: Ok(PrepareSuccess::default()),
})
.await
.unwrap();
@@ -1258,7 +1255,7 @@ pub(crate) mod tests {
test.from_prepare_queue_tx
.send(prepare::FromQueue {
artifact_id: artifact_id(1),
- result: Ok(PrepareStats::default()),
+ result: Ok(PrepareSuccess::default()),
})
.await
.unwrap();
@@ -1371,7 +1368,7 @@ pub(crate) mod tests {
test.from_prepare_queue_tx
.send(prepare::FromQueue {
artifact_id: artifact_id(2),
- result: Ok(PrepareStats::default()),
+ result: Ok(PrepareSuccess::default()),
})
.await
.unwrap();
@@ -1527,7 +1524,7 @@ pub(crate) mod tests {
test.from_prepare_queue_tx
.send(prepare::FromQueue {
artifact_id: artifact_id(1),
- result: Ok(PrepareStats::default()),
+ result: Ok(PrepareSuccess::default()),
})
.await
.unwrap();
@@ -1703,7 +1700,7 @@ pub(crate) mod tests {
test.from_prepare_queue_tx
.send(prepare::FromQueue {
artifact_id: artifact_id(1),
- result: Ok(PrepareStats::default()),
+ result: Ok(PrepareSuccess::default()),
})
.await
.unwrap();
diff --git a/polkadot/node/core/pvf/src/lib.rs b/polkadot/node/core/pvf/src/lib.rs
index 102a91dbdad7..7e7a13252548 100644
--- a/polkadot/node/core/pvf/src/lib.rs
+++ b/polkadot/node/core/pvf/src/lib.rs
@@ -84,7 +84,7 @@
//! A pruning task will run at a fixed interval of time. This task will remove all artifacts that
//! weren't used or received a heads up signal for a while.
//!
-//! ## Execution
+//! ## Execution
//!
//! The execute workers will be fed by the requests from the execution queue, which is basically a
//! combination of a path to the compiled artifact and the
diff --git a/polkadot/node/core/pvf/src/prepare/pool.rs b/polkadot/node/core/pvf/src/prepare/pool.rs
index 8e02f540d321..21af21e5b028 100644
--- a/polkadot/node/core/pvf/src/prepare/pool.rs
+++ b/polkadot/node/core/pvf/src/prepare/pool.rs
@@ -68,7 +68,7 @@ pub enum ToPool {
///
/// In either case, the worker is considered busy and no further `StartWork` messages should be
/// sent until either `Concluded` or `Rip` message is received.
- StartWork { worker: Worker, pvf: PvfPrepData, artifact_path: PathBuf },
+ StartWork { worker: Worker, pvf: PvfPrepData, cache_path: PathBuf },
}
/// A message sent from pool to its client.
@@ -232,7 +232,7 @@ fn handle_to_pool(
.boxed(),
);
},
- ToPool::StartWork { worker, pvf, artifact_path } => {
+ ToPool::StartWork { worker, pvf, cache_path } => {
if let Some(data) = spawned.get_mut(worker) {
if let Some(idle) = data.idle.take() {
let preparation_timer = metrics.time_preparation();
@@ -242,7 +242,7 @@ fn handle_to_pool(
worker,
idle,
pvf,
- artifact_path,
+ cache_path,
preparation_timer,
)
.boxed(),
@@ -303,10 +303,10 @@ async fn start_work_task(
worker: Worker,
idle: IdleWorker,
pvf: PvfPrepData,
- artifact_path: PathBuf,
+ cache_path: PathBuf,
_preparation_timer: Option,
) -> PoolEvent {
- let outcome = worker_intf::start_work(&metrics, idle, pvf, artifact_path).await;
+ let outcome = worker_intf::start_work(&metrics, idle, pvf, cache_path).await;
PoolEvent::StartWork(worker, outcome)
}
diff --git a/polkadot/node/core/pvf/src/prepare/queue.rs b/polkadot/node/core/pvf/src/prepare/queue.rs
index c38012d74548..c140a6cafda0 100644
--- a/polkadot/node/core/pvf/src/prepare/queue.rs
+++ b/polkadot/node/core/pvf/src/prepare/queue.rs
@@ -268,12 +268,12 @@ fn find_idle_worker(queue: &mut Queue) -> Option {
}
async fn handle_from_pool(queue: &mut Queue, from_pool: pool::FromPool) -> Result<(), Fatal> {
- use pool::FromPool::*;
+ use pool::FromPool;
match from_pool {
- Spawned(worker) => handle_worker_spawned(queue, worker).await?,
- Concluded { worker, rip, result } =>
+ FromPool::Spawned(worker) => handle_worker_spawned(queue, worker).await?,
+ FromPool::Concluded { worker, rip, result } =>
handle_worker_concluded(queue, worker, rip, result).await?,
- Rip(worker) => handle_worker_rip(queue, worker).await?,
+ FromPool::Rip(worker) => handle_worker_rip(queue, worker).await?,
}
Ok(())
}
@@ -424,17 +424,17 @@ async fn spawn_extra_worker(queue: &mut Queue, critical: bool) -> Result<(), Fat
/// Attaches the work to the given worker telling the poll about the job.
async fn assign(queue: &mut Queue, worker: Worker, job: Job) -> Result<(), Fatal> {
let job_data = &mut queue.jobs[job];
-
- let artifact_id = ArtifactId::from_pvf_prep_data(&job_data.pvf);
- let artifact_path = artifact_id.path(&queue.cache_path);
-
job_data.worker = Some(worker);
queue.workers[worker].job = Some(job);
send_pool(
&mut queue.to_pool_tx,
- pool::ToPool::StartWork { worker, pvf: job_data.pvf.clone(), artifact_path },
+ pool::ToPool::StartWork {
+ worker,
+ pvf: job_data.pvf.clone(),
+ cache_path: queue.cache_path.clone(),
+ },
)
.await?;
@@ -491,7 +491,7 @@ mod tests {
use crate::host::tests::TEST_PREPARATION_TIMEOUT;
use assert_matches::assert_matches;
use futures::{future::BoxFuture, FutureExt};
- use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats};
+ use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareSuccess};
use slotmap::SlotMap;
use std::task::Poll;
@@ -612,7 +612,7 @@ mod tests {
test.send_from_pool(pool::FromPool::Concluded {
worker: w,
rip: false,
- result: Ok(PrepareStats::default()),
+ result: Ok(PrepareSuccess::default()),
});
assert_eq!(
@@ -651,7 +651,7 @@ mod tests {
test.send_from_pool(pool::FromPool::Concluded {
worker: w1,
rip: false,
- result: Ok(PrepareStats::default()),
+ result: Ok(PrepareSuccess::default()),
});
assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. });
@@ -697,7 +697,7 @@ mod tests {
test.send_from_pool(pool::FromPool::Concluded {
worker: w1,
rip: false,
- result: Ok(PrepareStats::default()),
+ result: Ok(PrepareSuccess::default()),
});
assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Kill(w1));
}
@@ -731,7 +731,7 @@ mod tests {
test.send_from_pool(pool::FromPool::Concluded {
worker: w1,
rip: true,
- result: Ok(PrepareStats::default()),
+ result: Ok(PrepareSuccess::default()),
});
// Since there is still work, the queue requested one extra worker to spawn to handle the
diff --git a/polkadot/node/core/pvf/src/prepare/worker_intf.rs b/polkadot/node/core/pvf/src/prepare/worker_intf.rs
index a22fa74b2fe1..e7f142a46bb8 100644
--- a/polkadot/node/core/pvf/src/prepare/worker_intf.rs
+++ b/polkadot/node/core/pvf/src/prepare/worker_intf.rs
@@ -17,6 +17,7 @@
//! Host interface to the prepare worker.
use crate::{
+ artifacts::ArtifactId,
metrics::Metrics,
security,
worker_intf::{
@@ -27,8 +28,8 @@ use crate::{
};
use parity_scale_codec::{Decode, Encode};
use polkadot_node_core_pvf_common::{
- error::{PrepareError, PrepareResult},
- prepare::PrepareStats,
+ error::{PrepareError, PrepareResult, PrepareWorkerResult},
+ prepare::{PrepareStats, PrepareSuccess, PrepareWorkerSuccess},
pvf::PvfPrepData,
worker_dir, SecurityStatus,
};
@@ -81,7 +82,7 @@ pub enum Outcome {
/// final destination location.
RenameTmpFile {
worker: IdleWorker,
- result: PrepareResult,
+ result: PrepareWorkerResult,
err: String,
// Unfortunately `PathBuf` doesn't implement `Encode`/`Decode`, so we do a fallible
// conversion to `Option`.
@@ -115,7 +116,7 @@ pub async fn start_work(
metrics: &Metrics,
worker: IdleWorker,
pvf: PvfPrepData,
- artifact_path: PathBuf,
+ cache_path: PathBuf,
) -> Outcome {
let IdleWorker { stream, pid, worker_dir } = worker;
@@ -123,8 +124,8 @@ pub async fn start_work(
target: LOG_TARGET,
worker_pid = %pid,
?worker_dir,
- "starting prepare for {}",
- artifact_path.display(),
+ "starting prepare for {:?}",
+ pvf,
);
with_worker_dir_setup(
@@ -135,7 +136,7 @@ pub async fn start_work(
let preparation_timeout = pvf.prep_timeout();
let audit_log_file = security::AuditLogFile::try_open_and_seek_to_end().await;
- if let Err(err) = send_request(&mut stream, pvf.clone()).await {
+ if let Err(err) = send_request(&mut stream, &pvf).await {
gum::warn!(
target: LOG_TARGET,
worker_pid = %pid,
@@ -159,7 +160,7 @@ pub async fn start_work(
match result {
// Received bytes from worker within the time limit.
- Ok(Ok(prepare_result)) => {
+ Ok(Ok(prepare_worker_result)) => {
// Check if any syscall violations occurred during the job. For now this is only
// informative, as we are not enforcing the seccomp policy yet.
for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await {
@@ -175,10 +176,11 @@ pub async fn start_work(
handle_response(
metrics,
IdleWorker { stream, pid, worker_dir },
- prepare_result,
+ prepare_worker_result,
pid,
tmp_artifact_file,
- artifact_path,
+ &pvf,
+ &cache_path,
preparation_timeout,
)
.await
@@ -215,20 +217,22 @@ pub async fn start_work(
async fn handle_response(
metrics: &Metrics,
worker: IdleWorker,
- result: PrepareResult,
+ result: PrepareWorkerResult,
worker_pid: u32,
tmp_file: PathBuf,
- artifact_path: PathBuf,
+ pvf: &PvfPrepData,
+ cache_path: &PathBuf,
preparation_timeout: Duration,
) -> Outcome {
- let PrepareStats { cpu_time_elapsed, memory_stats } = match result.clone() {
- Ok(result) => result,
- // Timed out on the child. This should already be logged by the child.
- Err(PrepareError::TimedOut) => return Outcome::TimedOut,
- Err(PrepareError::JobDied(err)) => return Outcome::JobDied(err),
- Err(PrepareError::OutOfMemory) => return Outcome::OutOfMemory,
- Err(_) => return Outcome::Concluded { worker, result },
- };
+ let PrepareWorkerSuccess { checksum, stats: PrepareStats { cpu_time_elapsed, memory_stats } } =
+ match result.clone() {
+ Ok(result) => result,
+ // Timed out on the child. This should already be logged by the child.
+ Err(PrepareError::TimedOut) => return Outcome::TimedOut,
+ Err(PrepareError::JobDied(err)) => return Outcome::JobDied(err),
+ Err(PrepareError::OutOfMemory) => return Outcome::OutOfMemory,
+ Err(err) => return Outcome::Concluded { worker, result: Err(err) },
+ };
if cpu_time_elapsed > preparation_timeout {
// The job didn't complete within the timeout.
@@ -243,6 +247,9 @@ async fn handle_response(
return Outcome::TimedOut
}
+ let artifact_id = ArtifactId::from_pvf_prep_data(pvf);
+ let artifact_path = artifact_id.path(cache_path, &checksum);
+
gum::debug!(
target: LOG_TARGET,
%worker_pid,
@@ -252,7 +259,13 @@ async fn handle_response(
);
let outcome = match tokio::fs::rename(&tmp_file, &artifact_path).await {
- Ok(()) => Outcome::Concluded { worker, result },
+ Ok(()) => Outcome::Concluded {
+ worker,
+ result: Ok(PrepareSuccess {
+ path: artifact_path,
+ stats: PrepareStats { cpu_time_elapsed, memory_stats: memory_stats.clone() },
+ }),
+ },
Err(err) => {
gum::warn!(
target: LOG_TARGET,
@@ -329,14 +342,14 @@ where
outcome
}
-async fn send_request(stream: &mut UnixStream, pvf: PvfPrepData) -> io::Result<()> {
+async fn send_request(stream: &mut UnixStream, pvf: &PvfPrepData) -> io::Result<()> {
framed_send(stream, &pvf.encode()).await?;
Ok(())
}
-async fn recv_response(stream: &mut UnixStream, pid: u32) -> io::Result {
+async fn recv_response(stream: &mut UnixStream, pid: u32) -> io::Result {
let result = framed_recv(stream).await?;
- let result = PrepareResult::decode(&mut &result[..]).map_err(|e| {
+ let result = PrepareWorkerResult::decode(&mut &result[..]).map_err(|e| {
// We received invalid bytes from the worker.
let bound_bytes = &result[..result.len().min(4)];
gum::warn!(
diff --git a/polkadot/node/core/pvf/src/worker_intf.rs b/polkadot/node/core/pvf/src/worker_intf.rs
index 8f9a7de354b8..5e589b9abcee 100644
--- a/polkadot/node/core/pvf/src/worker_intf.rs
+++ b/polkadot/node/core/pvf/src/worker_intf.rs
@@ -198,7 +198,7 @@ pub async fn tmppath_in(prefix: &str, dir: &Path) -> io::Result {
/// The same as [`tmppath_in`], but uses [`std::env::temp_dir`] as the directory.
pub async fn tmppath(prefix: &str) -> io::Result {
- let temp_dir = PathBuf::from(std::env::temp_dir());
+ let temp_dir = std::env::temp_dir();
tmppath_in(prefix, &temp_dir).await
}
@@ -453,7 +453,7 @@ impl Drop for WorkerDir {
/// artifacts from previous jobs.
pub fn clear_worker_dir_path(worker_dir_path: &Path) -> io::Result<()> {
fn remove_dir_contents(path: &Path) -> io::Result<()> {
- for entry in std::fs::read_dir(&path)? {
+ for entry in std::fs::read_dir(path)? {
let entry = entry?;
let path = entry.path();
diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs
index d2d842cf84a3..5bdf49cc719e 100644
--- a/polkadot/node/core/pvf/tests/it/main.rs
+++ b/polkadot/node/core/pvf/tests/it/main.rs
@@ -20,8 +20,7 @@ use assert_matches::assert_matches;
use parity_scale_codec::Encode as _;
use polkadot_node_core_pvf::{
start, testing::build_workers_and_get_paths, Config, InvalidCandidate, Metrics, PrepareError,
- PrepareJobKind, PrepareStats, PvfPrepData, ValidationError, ValidationHost,
- JOB_TIMEOUT_WALL_CLOCK_FACTOR,
+ PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR,
};
use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult};
use polkadot_primitives::{ExecutorParam, ExecutorParams};
@@ -70,7 +69,7 @@ impl TestHost {
&self,
code: &[u8],
executor_params: ExecutorParams,
- ) -> Result {
+ ) -> Result<(), PrepareError> {
let (result_tx, result_rx) = futures::channel::oneshot::channel();
let code = sp_maybe_compressed_blob::decompress(code, 16 * 1024 * 1024)
diff --git a/polkadot/node/malus/src/variants/common.rs b/polkadot/node/malus/src/variants/common.rs
index 474887ee8df7..20b6654638e7 100644
--- a/polkadot/node/malus/src/variants/common.rs
+++ b/polkadot/node/malus/src/variants/common.rs
@@ -30,7 +30,7 @@ use polkadot_node_subsystem::{
use polkadot_primitives::{
CandidateCommitments, CandidateDescriptor, CandidateReceipt, PersistedValidationData,
- PvfExecTimeoutKind,
+ PvfExecKind,
};
use futures::channel::oneshot;
@@ -90,10 +90,10 @@ impl FakeCandidateValidation {
}
}
- fn should_misbehave(&self, timeout: PvfExecTimeoutKind) -> bool {
+ fn should_misbehave(&self, timeout: PvfExecKind) -> bool {
match timeout {
- PvfExecTimeoutKind::Backing => self.includes_backing(),
- PvfExecTimeoutKind::Approval => self.includes_approval(),
+ PvfExecKind::Backing => self.includes_backing(),
+ PvfExecKind::Approval => self.includes_approval(),
}
}
}
@@ -279,13 +279,13 @@ where
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
},
} => {
match self.fake_validation {
- x if x.misbehaves_valid() && x.should_misbehave(exec_timeout_kind) => {
+ x if x.misbehaves_valid() && x.should_misbehave(exec_kind) => {
// Behave normally if the `PoV` is not known to be malicious.
if pov.block_data.0.as_slice() != MALICIOUS_POV {
return Some(FromOrchestra::Communication {
@@ -295,7 +295,7 @@ where
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
response_sender,
},
})
@@ -333,14 +333,14 @@ where
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
response_sender,
},
})
},
}
},
- x if x.misbehaves_invalid() && x.should_misbehave(exec_timeout_kind) => {
+ x if x.misbehaves_invalid() && x.should_misbehave(exec_kind) => {
// Set the validation result to invalid with probability `p` and trigger a
// dispute
let behave_maliciously = self.distribution.sample(&mut rand::thread_rng());
@@ -373,7 +373,7 @@ where
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
response_sender,
},
})
@@ -388,7 +388,7 @@ where
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
response_sender,
},
}),
@@ -401,13 +401,13 @@ where
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
response_sender,
..
},
} => {
match self.fake_validation {
- x if x.misbehaves_valid() && x.should_misbehave(exec_timeout_kind) => {
+ x if x.misbehaves_valid() && x.should_misbehave(exec_kind) => {
// Behave normally if the `PoV` is not known to be malicious.
if pov.block_data.0.as_slice() != MALICIOUS_POV {
return Some(FromOrchestra::Communication {
@@ -415,7 +415,7 @@ where
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
response_sender,
},
})
@@ -445,13 +445,13 @@ where
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
response_sender,
},
}),
}
},
- x if x.misbehaves_invalid() && x.should_misbehave(exec_timeout_kind) => {
+ x if x.misbehaves_invalid() && x.should_misbehave(exec_kind) => {
// Maliciously set the validation result to invalid for a valid candidate
// with probability `p`
let behave_maliciously = self.distribution.sample(&mut rand::thread_rng());
@@ -479,7 +479,7 @@ where
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
response_sender,
},
})
@@ -491,7 +491,7 @@ where
candidate_receipt,
pov,
executor_params,
- exec_timeout_kind,
+ exec_kind,
response_sender,
},
}),
diff --git a/polkadot/node/overseer/examples/minimal-example.rs b/polkadot/node/overseer/examples/minimal-example.rs
index b2c0ea2f75a8..857cdba673db 100644
--- a/polkadot/node/overseer/examples/minimal-example.rs
+++ b/polkadot/node/overseer/examples/minimal-example.rs
@@ -32,7 +32,7 @@ use polkadot_overseer::{
gen::{FromOrchestra, SpawnedSubsystem},
HeadSupportsParachains, SubsystemError,
};
-use polkadot_primitives::{CandidateReceipt, Hash, PvfExecTimeoutKind};
+use polkadot_primitives::{CandidateReceipt, Hash, PvfExecKind};
struct AlwaysSupportsParachains;
@@ -77,7 +77,7 @@ impl Subsystem1 {
candidate_receipt,
pov: PoV { block_data: BlockData(Vec::new()) }.into(),
executor_params: Default::default(),
- exec_timeout_kind: PvfExecTimeoutKind::Backing,
+ exec_kind: PvfExecKind::Backing,
response_sender: tx,
};
ctx.send_message(msg).await;
diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs
index 254f5fe45120..0494274367d9 100644
--- a/polkadot/node/overseer/src/tests.rs
+++ b/polkadot/node/overseer/src/tests.rs
@@ -30,7 +30,7 @@ use polkadot_node_subsystem_types::messages::{
};
use polkadot_primitives::{
CandidateHash, CandidateReceipt, CollatorPair, Id as ParaId, InvalidDisputeStatementKind,
- PvfExecTimeoutKind, SessionIndex, ValidDisputeStatementKind, ValidatorIndex,
+ PvfExecKind, SessionIndex, ValidDisputeStatementKind, ValidatorIndex,
};
use crate::{
@@ -106,7 +106,7 @@ where
candidate_receipt,
pov: PoV { block_data: BlockData(Vec::new()) }.into(),
executor_params: Default::default(),
- exec_timeout_kind: PvfExecTimeoutKind::Backing,
+ exec_kind: PvfExecKind::Backing,
response_sender: tx,
})
.await;
@@ -804,7 +804,7 @@ fn test_candidate_validation_msg() -> CandidateValidationMessage {
candidate_receipt,
pov,
executor_params: Default::default(),
- exec_timeout_kind: PvfExecTimeoutKind::Backing,
+ exec_kind: PvfExecKind::Backing,
response_sender,
}
}
diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs
index 43456daec302..44c6f27b17cc 100644
--- a/polkadot/node/subsystem-types/src/messages.rs
+++ b/polkadot/node/subsystem-types/src/messages.rs
@@ -47,7 +47,7 @@ use polkadot_primitives::{
CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupIndex,
GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, InboundDownwardMessage,
InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData,
- PvfCheckStatement, PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield,
+ PvfCheckStatement, PvfExecKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield,
SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex,
ValidatorSignature,
};
@@ -150,8 +150,8 @@ pub enum CandidateValidationMessage {
pov: Arc,
/// Session's executor parameters
executor_params: ExecutorParams,
- /// Execution timeout kind (backing/approvals)
- exec_timeout_kind: PvfExecTimeoutKind,
+ /// Execution kind, used for timeouts and retries (backing/approvals)
+ exec_kind: PvfExecKind,
/// The sending side of the response channel
response_sender: oneshot::Sender>,
},
@@ -175,8 +175,8 @@ pub enum CandidateValidationMessage {
pov: Arc,
/// Session's executor parameters
executor_params: ExecutorParams,
- /// Execution timeout kind (backing/approvals)
- exec_timeout_kind: PvfExecTimeoutKind,
+ /// Execution kind, used for timeouts and retries (backing/approvals)
+ exec_kind: PvfExecKind,
/// The sending side of the response channel
response_sender: oneshot::Sender>,
},
diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs
index 4ba8b8b031fc..2570bcadf606 100644
--- a/polkadot/primitives/src/lib.rs
+++ b/polkadot/primitives/src/lib.rs
@@ -48,11 +48,11 @@ pub use v6::{
HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec,
InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce,
OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry,
- PersistedValidationData, PvfCheckStatement, PvfExecTimeoutKind, PvfPrepTimeoutKind,
- RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels,
- RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex,
- SessionInfo, Signature, Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields,
- SignedStatement, SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield,
+ PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, RuntimeMetricLabel,
+ RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, RuntimeMetricOp,
+ RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signature,
+ Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, SignedStatement,
+ SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield,
UncheckedSignedAvailabilityBitfields, UncheckedSignedStatement, UpgradeGoAhead,
UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode,
ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation,
diff --git a/polkadot/primitives/src/v6/executor_params.rs b/polkadot/primitives/src/v6/executor_params.rs
index bb9980f68796..112a529f62b0 100644
--- a/polkadot/primitives/src/v6/executor_params.rs
+++ b/polkadot/primitives/src/v6/executor_params.rs
@@ -21,7 +21,7 @@
//! by the first element of the vector). Decoding to a usable semantics structure is
//! done in `polkadot-node-core-pvf`.
-use crate::{BlakeTwo256, HashT as _, PvfExecTimeoutKind, PvfPrepTimeoutKind};
+use crate::{BlakeTwo256, HashT as _, PvfExecKind, PvfPrepKind};
use parity_scale_codec::{Decode, Encode};
use polkadot_core_primitives::Hash;
use scale_info::TypeInfo;
@@ -45,7 +45,7 @@ pub const PRECHECK_MEM_MAX_LO: u64 = 256 * 1024 * 1024;
pub const PRECHECK_MEM_MAX_HI: u64 = 16 * 1024 * 1024 * 1024;
// Default PVF timeouts. Must never be changed! Use executor environment parameters to adjust them.
-// See also `PvfPrepTimeoutKind` and `PvfExecTimeoutKind` docs.
+// See also `PvfPrepKind` and `PvfExecKind` docs.
/// Default PVF preparation timeout for prechecking requests.
pub const DEFAULT_PRECHECK_PREPARATION_TIMEOUT: Duration = Duration::from_secs(60);
@@ -99,12 +99,12 @@ pub enum ExecutorParam {
/// Always ensure that `precheck_timeout` < `lenient_timeout`.
/// When absent, the default values will be used.
#[codec(index = 5)]
- PvfPrepTimeout(PvfPrepTimeoutKind, u64),
+ PvfPrepTimeout(PvfPrepKind, u64),
/// PVF execution timeouts, in millisecond.
/// Always ensure that `backing_timeout` < `approval_timeout`.
/// When absent, the default values will be used.
#[codec(index = 6)]
- PvfExecTimeout(PvfExecTimeoutKind, u64),
+ PvfExecTimeout(PvfExecKind, u64),
/// Enables WASM bulk memory proposal
#[codec(index = 7)]
WasmExtBulkMemory,
@@ -174,7 +174,7 @@ impl ExecutorParams {
}
/// Returns a PVF preparation timeout, if any
- pub fn pvf_prep_timeout(&self, kind: PvfPrepTimeoutKind) -> Option {
+ pub fn pvf_prep_timeout(&self, kind: PvfPrepKind) -> Option {
for param in &self.0 {
if let ExecutorParam::PvfPrepTimeout(k, timeout) = param {
if kind == *k {
@@ -186,7 +186,7 @@ impl ExecutorParams {
}
/// Returns a PVF execution timeout, if any
- pub fn pvf_exec_timeout(&self, kind: PvfExecTimeoutKind) -> Option {
+ pub fn pvf_exec_timeout(&self, kind: PvfExecKind) -> Option {
for param in &self.0 {
if let ExecutorParam::PvfExecTimeout(k, timeout) = param {
if kind == *k {
@@ -242,12 +242,12 @@ impl ExecutorParams {
StackNativeMax(_) => "StackNativeMax",
PrecheckingMaxMemory(_) => "PrecheckingMaxMemory",
PvfPrepTimeout(kind, _) => match kind {
- PvfPrepTimeoutKind::Precheck => "PvfPrepTimeoutKind::Precheck",
- PvfPrepTimeoutKind::Lenient => "PvfPrepTimeoutKind::Lenient",
+ PvfPrepKind::Precheck => "PvfPrepKind::Precheck",
+ PvfPrepKind::Prepare => "PvfPrepKind::Prepare",
},
PvfExecTimeout(kind, _) => match kind {
- PvfExecTimeoutKind::Backing => "PvfExecTimeoutKind::Backing",
- PvfExecTimeoutKind::Approval => "PvfExecTimeoutKind::Approval",
+ PvfExecKind::Backing => "PvfExecKind::Backing",
+ PvfExecKind::Approval => "PvfExecKind::Approval",
},
WasmExtBulkMemory => "WasmExtBulkMemory",
};
@@ -297,30 +297,23 @@ impl ExecutorParams {
}
if let (Some(precheck), Some(lenient)) = (
- seen.get("PvfPrepTimeoutKind::Precheck")
+ seen.get("PvfPrepKind::Precheck")
.or(Some(&DEFAULT_PRECHECK_PREPARATION_TIMEOUT_MS)),
- seen.get("PvfPrepTimeoutKind::Lenient")
+ seen.get("PvfPrepKind::Prepare")
.or(Some(&DEFAULT_LENIENT_PREPARATION_TIMEOUT_MS)),
) {
if *precheck >= *lenient {
- return Err(IncompatibleValues(
- "PvfPrepTimeoutKind::Precheck",
- "PvfPrepTimeoutKind::Lenient",
- ))
+ return Err(IncompatibleValues("PvfPrepKind::Precheck", "PvfPrepKind::Prepare"))
}
}
if let (Some(backing), Some(approval)) = (
- seen.get("PvfExecTimeoutKind::Backing")
- .or(Some(&DEFAULT_BACKING_EXECUTION_TIMEOUT_MS)),
- seen.get("PvfExecTimeoutKind::Approval")
+ seen.get("PvfExecKind::Backing").or(Some(&DEFAULT_BACKING_EXECUTION_TIMEOUT_MS)),
+ seen.get("PvfExecKind::Approval")
.or(Some(&DEFAULT_APPROVAL_EXECUTION_TIMEOUT_MS)),
) {
if *backing >= *approval {
- return Err(IncompatibleValues(
- "PvfExecTimeoutKind::Backing",
- "PvfExecTimeoutKind::Approval",
- ))
+ return Err(IncompatibleValues("PvfExecKind::Backing", "PvfExecKind::Approval"))
}
}
diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs
index 9371b3db406b..83b590dc3203 100644
--- a/polkadot/primitives/src/v6/mod.rs
+++ b/polkadot/primitives/src/v6/mod.rs
@@ -1781,30 +1781,22 @@ impl WellKnownKey {
}
}
-/// Type discriminator for PVF preparation timeouts
+/// Type discriminator for PVF preparation.
#[derive(Encode, Decode, TypeInfo, Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
-pub enum PvfPrepTimeoutKind {
- /// For prechecking requests, the time period after which the preparation worker is considered
- /// unresponsive and will be killed.
+pub enum PvfPrepKind {
+ /// For prechecking requests.
Precheck,
- /// For execution and heads-up requests, the time period after which the preparation worker is
- /// considered unresponsive and will be killed. More lenient than the timeout for prechecking
- /// to prevent honest validators from timing out on valid PVFs.
- Lenient,
+ /// For execution and heads-up requests.
+ Prepare,
}
-/// Type discriminator for PVF execution timeouts
+/// Type discriminator for PVF execution.
#[derive(Encode, Decode, TypeInfo, Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
-pub enum PvfExecTimeoutKind {
- /// The amount of time to spend on execution during backing.
+pub enum PvfExecKind {
+ /// For backing requests.
Backing,
-
- /// The amount of time to spend on execution during approval or disputes.
- ///
- /// This should be much longer than the backing execution timeout to ensure that in the
- /// absence of extremely large disparities between hardware, blocks that pass backing are
- /// considered executable by approval checkers or dispute participants.
+ /// For approval and dispute request.
Approval,
}
diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md
index 4dbb7980c1be..74d88ba3ad99 100644
--- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md
+++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md
@@ -31,6 +31,10 @@ hopefully resolve. We use a more brief delay here (1 second as opposed to 15
minutes for preparation (see above)), because a successful execution must happen
in a short amount of time.
+If the execution fails during the backing phase, we won't retry to reduce the chance of
+supporting nondeterministic candidates. This reduces the chance of nondeterministic blocks
+getting backed and honest backers getting slashed.
+
We currently know of the following specific cases that will lead to a retried
execution request:
diff --git a/polkadot/runtime/parachains/src/configuration/benchmarking.rs b/polkadot/runtime/parachains/src/configuration/benchmarking.rs
index 508e0579a09d..67daf1c45988 100644
--- a/polkadot/runtime/parachains/src/configuration/benchmarking.rs
+++ b/polkadot/runtime/parachains/src/configuration/benchmarking.rs
@@ -17,7 +17,7 @@
use crate::configuration::*;
use frame_benchmarking::{benchmarks, BenchmarkError, BenchmarkResult};
use frame_system::RawOrigin;
-use primitives::{ExecutorParam, ExecutorParams, PvfExecTimeoutKind, PvfPrepTimeoutKind};
+use primitives::{ExecutorParam, ExecutorParams, PvfExecKind, PvfPrepKind};
use sp_runtime::traits::One;
benchmarks! {
@@ -41,10 +41,10 @@ benchmarks! {
ExecutorParam::StackNativeMax(256 * 1024 * 1024),
ExecutorParam::WasmExtBulkMemory,
ExecutorParam::PrecheckingMaxMemory(2 * 1024 * 1024 * 1024),
- ExecutorParam::PvfPrepTimeout(PvfPrepTimeoutKind::Precheck, 60_000),
- ExecutorParam::PvfPrepTimeout(PvfPrepTimeoutKind::Lenient, 360_000),
- ExecutorParam::PvfExecTimeout(PvfExecTimeoutKind::Backing, 2_000),
- ExecutorParam::PvfExecTimeout(PvfExecTimeoutKind::Approval, 12_000),
+ ExecutorParam::PvfPrepTimeout(PvfPrepKind::Precheck, 60_000),
+ ExecutorParam::PvfPrepTimeout(PvfPrepKind::Prepare, 360_000),
+ ExecutorParam::PvfExecTimeout(PvfExecKind::Backing, 2_000),
+ ExecutorParam::PvfExecTimeout(PvfExecKind::Approval, 12_000),
][..]))
set_config_with_perbill {}: set_on_demand_fee_variability(RawOrigin::Root, Perbill::from_percent(100))
diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml
index c415527c372c..c4464c5f787d 100644
--- a/substrate/client/cli/Cargo.toml
+++ b/substrate/client/cli/Cargo.toml
@@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"]
array-bytes = "6.1"
chrono = "0.4.27"
clap = { version = "4.4.6", features = ["derive", "string", "wrap_help"] }
-fdlimit = "0.2.1"
+fdlimit = "0.3.0"
futures = "0.3.21"
itertools = "0.10.3"
libp2p-identity = { version = "0.1.3", features = ["peerid", "ed25519"]}
diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs
index 4d218da6aa89..b842df5a690a 100644
--- a/substrate/client/cli/src/config.rs
+++ b/substrate/client/cli/src/config.rs
@@ -605,14 +605,25 @@ pub trait CliConfiguration: Sized {
logger.init()?;
- if let Some(new_limit) = fdlimit::raise_fd_limit() {
- if new_limit < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT {
+ match fdlimit::raise_fd_limit() {
+ Ok(fdlimit::Outcome::LimitRaised { to, .. }) =>
+ if to < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT {
+ warn!(
+ "Low open file descriptor limit configured for the process. \
+ Current value: {:?}, recommended value: {:?}.",
+ to, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT,
+ );
+ },
+ Ok(fdlimit::Outcome::Unsupported) => {
+ // Unsupported platform (non-Linux)
+ },
+ Err(error) => {
warn!(
- "Low open file descriptor limit configured for the process. \
- Current value: {:?}, recommended value: {:?}.",
- new_limit, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT,
+ "Failed to configure file descriptor limit for the process: \
+ {}, recommended value: {:?}.",
+ error, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT,
);
- }
+ },
}
Ok(())
diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs
index 309d8c5135be..966c74103657 100644
--- a/substrate/client/consensus/beefy/src/worker.rs
+++ b/substrate/client/consensus/beefy/src/worker.rs
@@ -456,6 +456,7 @@ where
.filter(|genesis| *genesis == self.persisted_state.pallet_genesis)
.ok_or(Error::ConsensusReset)?;
+ let mut new_session_added = false;
if *header.number() > self.best_grandpa_block() {
// update best GRANDPA finalized block we have seen
self.persisted_state.set_best_grandpa(header.clone());
@@ -475,9 +476,15 @@ where
{
if let Some(new_validator_set) = find_authorities_change::(&header) {
self.init_session_at(new_validator_set, *header.number());
+ new_session_added = true;
}
}
+ if new_session_added {
+ crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state)
+ .map_err(|e| Error::Backend(e.to_string()))?;
+ }
+
// Update gossip validator votes filter.
if let Err(e) = self
.persisted_state
@@ -848,15 +855,10 @@ where
.fuse(),
);
+ self.process_new_state();
let error = loop {
- // Act on changed 'state'.
- self.process_new_state();
-
// Mutable reference used to drive the gossip engine.
let mut gossip_engine = &mut self.comms.gossip_engine;
- // Use temp val and report after async section,
- // to avoid having to Mutex-wrap `gossip_engine`.
- let mut gossip_report: Option = None;
// Wait for, and handle external events.
// The branches below only change 'state', actual voting happens afterwards,
@@ -884,10 +886,15 @@ where
if let Err(err) = self.triage_incoming_justif(justif) {
debug!(target: LOG_TARGET, "🥩 {}", err);
}
- gossip_report = Some(peer_report);
+ self.comms.gossip_engine.report(peer_report.who, peer_report.cost_benefit);
+ },
+ ResponseInfo::PeerReport(peer_report) => {
+ self.comms.gossip_engine.report(peer_report.who, peer_report.cost_benefit);
+ continue;
+ },
+ ResponseInfo::Pending => {
+ continue;
},
- ResponseInfo::PeerReport(peer_report) => gossip_report = Some(peer_report),
- ResponseInfo::Pending => (),
}
},
justif = block_import_justif.next() => {
@@ -924,12 +931,15 @@ where
},
// Process peer reports.
report = self.comms.gossip_report_stream.next() => {
- gossip_report = report;
+ if let Some(PeerReport { who, cost_benefit }) = report {
+ self.comms.gossip_engine.report(who, cost_benefit);
+ }
+ continue;
},
}
- if let Some(PeerReport { who, cost_benefit }) = gossip_report {
- self.comms.gossip_engine.report(who, cost_benefit);
- }
+
+ // Act on changed 'state'.
+ self.process_new_state();
};
// return error _and_ `comms` that can be reused
diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml
index 670312e4161a..c6091f97d637 100644
--- a/substrate/client/service/test/Cargo.toml
+++ b/substrate/client/service/test/Cargo.toml
@@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
async-channel = "1.8.0"
array-bytes = "6.1"
-fdlimit = "0.2.1"
+fdlimit = "0.3.0"
futures = "0.3.21"
log = "0.4.17"
parity-scale-codec = "3.6.1"
diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs
index 9700c7643c48..456df73459a3 100644
--- a/substrate/client/service/test/src/lib.rs
+++ b/substrate/client/service/test/src/lib.rs
@@ -285,7 +285,7 @@ where
base_port: u16,
) -> TestNet {
sp_tracing::try_init_simple();
- fdlimit::raise_fd_limit();
+ fdlimit::raise_fd_limit().unwrap();
let runtime = Runtime::new().expect("Error creating tokio runtime");
let mut net = TestNet {
runtime,
diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs
index 909a930e3821..ab4bd51ffc0e 100644
--- a/substrate/frame/nomination-pools/src/lib.rs
+++ b/substrate/frame/nomination-pools/src/lib.rs
@@ -1626,6 +1626,10 @@ pub mod pallet {
#[pallet::constant]
type MaxPointsToBalance: Get;
+ /// The maximum number of simultaneous unbonding chunks that can exist per member.
+ #[pallet::constant]
+ type MaxUnbonding: Get;
+
/// Infallible method for converting `Currency::Balance` to `U256`.
type BalanceToU256: Convert, U256>;
@@ -1644,9 +1648,6 @@ pub mod pallet {
/// The maximum length, in bytes, that a pools metadata maybe.
type MaxMetadataLen: Get;
-
- /// The maximum number of simultaneous unbonding chunks that can exist per member.
- type MaxUnbonding: Get;
}
/// The sum of funds across all pools.
diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs
index 9c36c94b87b4..40f30735258f 100644
--- a/substrate/frame/staking/src/pallet/impls.rs
+++ b/substrate/frame/staking/src/pallet/impls.rs
@@ -794,7 +794,7 @@ impl Pallet {
stash: T::AccountId,
exposure: Exposure>,
) {
- >::insert(¤t_era, &stash, &exposure);
+ EraInfo::::set_exposure(current_era, &stash, exposure);
}
#[cfg(feature = "runtime-benchmarks")]
@@ -1745,9 +1745,16 @@ impl StakingInterface for Pallet {
}
fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool {
+ // look in the non paged exposures
+ // FIXME: Can be cleaned up once non paged exposures are cleared (https://github.com/paritytech/polkadot-sdk/issues/433)
ErasStakers::::iter_prefix(era).any(|(validator, exposures)| {
validator == *who || exposures.others.iter().any(|i| i.who == *who)
})
+ ||
+ // look in the paged exposures
+ ErasStakersPaged::::iter_prefix((era,)).any(|((validator, _), exposure_page)| {
+ validator == *who || exposure_page.others.iter().any(|i| i.who == *who)
+ })
}
fn status(
who: &Self::AccountId,
@@ -1812,6 +1819,7 @@ impl Pallet {
Self::check_nominators()?;
Self::check_exposures()?;
+ Self::check_paged_exposures()?;
Self::check_ledgers()?;
Self::check_count()
}
@@ -1860,6 +1868,70 @@ impl Pallet {
.collect::>()
}
+ fn check_paged_exposures() -> Result<(), TryRuntimeError> {
+ use sp_staking::PagedExposureMetadata;
+ use sp_std::collections::btree_map::BTreeMap;
+
+ // Sanity check for the paged exposure of the active era.
+ let mut exposures: BTreeMap>> =
+ BTreeMap::new();
+ let era = Self::active_era().unwrap().index;
+ let accumulator_default = PagedExposureMetadata {
+ total: Zero::zero(),
+ own: Zero::zero(),
+ nominator_count: 0,
+ page_count: 0,
+ };
+
+ ErasStakersPaged::::iter_prefix((era,))
+ .map(|((validator, _page), expo)| {
+ ensure!(
+ expo.page_total ==
+ expo.others.iter().map(|e| e.value).fold(Zero::zero(), |acc, x| acc + x),
+ "wrong total exposure for the page.",
+ );
+
+ let metadata = exposures.get(&validator).unwrap_or(&accumulator_default);
+ exposures.insert(
+ validator,
+ PagedExposureMetadata {
+ total: metadata.total + expo.page_total,
+ own: metadata.own,
+ nominator_count: metadata.nominator_count + expo.others.len() as u32,
+ page_count: metadata.page_count + 1,
+ },
+ );
+
+ Ok(())
+ })
+ .collect::>()?;
+
+ exposures
+ .iter()
+ .map(|(validator, metadata)| {
+ let actual_overview = ErasStakersOverview::::get(era, validator);
+
+ ensure!(actual_overview.is_some(), "No overview found for a paged exposure");
+ let actual_overview = actual_overview.unwrap();
+
+ ensure!(
+ actual_overview.total == metadata.total + actual_overview.own,
+ "Exposure metadata does not have correct total exposed stake."
+ );
+ ensure!(
+ actual_overview.nominator_count == metadata.nominator_count,
+ "Exposure metadata does not have correct count of nominators."
+ );
+ ensure!(
+ actual_overview.page_count == metadata.page_count,
+ "Exposure metadata does not have correct count of pages."
+ );
+
+ Ok(())
+ })
+ .collect::>()
+ }
+
fn check_nominators() -> Result<(), TryRuntimeError> {
// a check per nominator to ensure their entire stake is correctly distributed. Will only
// kick-in if the nomination was submitted before the current era.
diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs
index ee6f67adf14c..bac2530b19bb 100644
--- a/substrate/frame/staking/src/tests.rs
+++ b/substrate/frame/staking/src/tests.rs
@@ -6637,6 +6637,14 @@ fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout(
);
assert_eq!(EraInfo::::get_page_count(1, &11), 2);
+ // validator is exposed
+ assert!(::is_exposed_in_era(&11, &1));
+ // nominators are exposed
+ for i in 10..15 {
+ let who: AccountId = 1000 + i;
+ assert!(::is_exposed_in_era(&who, &1));
+ }
+
// case 2: exposure exist in ErasStakers and ErasStakersClipped (legacy).
// delete paged storage and add exposure to clipped storage
>::remove((1, 11, 0));
@@ -6672,6 +6680,14 @@ fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout(
assert_eq!(actual_exposure_full.own, 1000);
assert_eq!(actual_exposure_full.total, total_exposure);
+ // validator is exposed
+ assert!(::is_exposed_in_era(&11, &1));
+ // nominators are exposed
+ for i in 10..15 {
+ let who: AccountId = 1000 + i;
+ assert!(::is_exposed_in_era(&who, &1));
+ }
+
// for pages other than 0, clipped storage returns empty exposure
assert_eq!(EraInfo::::get_paged_exposure(1, &11, 1), None);
// page size is 1 for clipped storage
diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr
index 7375bcd2f16a..b5d108275249 100644
--- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr
+++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr
@@ -6,8 +6,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied
|
= help: the following other types implement trait `WrapperTypeDecode`:
Box
- frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Rc
+ frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Arc
= note: required for `Bar` to implement `Decode`
= note: required for `Bar` to implement `FullCodec`
@@ -44,8 +44,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied
bytes::bytes::Bytes
Cow<'a, T>
parity_scale_codec::Ref<'a, T, U>
- frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Rc
+ frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Arc
Vec
and $N others
@@ -81,8 +81,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied
|
= help: the following other types implement trait `WrapperTypeDecode`:
Box
- frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Rc
+ frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Arc
= note: required for `Bar` to implement `Decode`
= note: required for `Bar` to implement `FullCodec`
@@ -119,8 +119,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied
bytes::bytes::Bytes
Cow<'a, T>
parity_scale_codec::Ref<'a, T, U>
- frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Rc
+ frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Arc
Vec
and $N others
@@ -137,8 +137,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied
|
= help: the following other types implement trait `WrapperTypeDecode`:
Box
- frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Rc
+ frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Arc
= note: required for `Bar` to implement `Decode`
= note: required for `Bar` to implement `FullCodec`
@@ -177,8 +177,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied
bytes::bytes::Bytes
Cow<'a, T>
parity_scale_codec::Ref<'a, T, U>
- frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Rc
+ frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Arc
Vec
and $N others
diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr
index 3a0a25712aaf..b58902590b85 100644
--- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr
+++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr
@@ -6,8 +6,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied
|
= help: the following other types implement trait `WrapperTypeDecode`:
Box
- frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Rc
+ frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Arc
= note: required for `Bar` to implement `Decode`
= note: required for `Bar` to implement `FullCodec`
@@ -44,8 +44,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied
bytes::bytes::Bytes
Cow<'a, T>
parity_scale_codec::Ref<'a, T, U>
- frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Rc
+ frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Arc
Vec
and $N others
@@ -81,8 +81,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied
|
= help: the following other types implement trait `WrapperTypeDecode`:
Box
- frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Rc
+ frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Arc
= note: required for `Bar` to implement `Decode`
= note: required for `Bar` to implement `FullCodec`
@@ -119,8 +119,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied
bytes::bytes::Bytes
Cow<'a, T>
parity_scale_codec::Ref<'a, T, U>
- frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Rc
+ frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Arc
Vec
and $N others
@@ -137,8 +137,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied
|
= help: the following other types implement trait `WrapperTypeDecode`:
Box
- frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Rc
+ frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Arc
= note: required for `Bar` to implement `Decode`
= note: required for `Bar` to implement `FullCodec`
@@ -177,8 +177,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied
bytes::bytes::Bytes
Cow<'a, T>
parity_scale_codec::Ref<'a, T, U>
- frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Rc
+ frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes
Arc
Vec
and $N others
diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml
index 79df81e62c66..34485c72ab03 100644
--- a/substrate/primitives/core/Cargo.toml
+++ b/substrate/primitives/core/Cargo.toml
@@ -26,10 +26,8 @@ bs58 = { version = "0.5.0", default-features = false, optional = true }
rand = { version = "0.8.5", features = ["small_rng"], optional = true }
substrate-bip39 = { version = "0.4.4", optional = true }
bip39 = { version = "2.0.0", default-features = false }
-regex = { version = "1.6.0", optional = true }
zeroize = { version = "1.4.3", default-features = false }
secrecy = { version = "0.8.0", default-features = false }
-lazy_static = { version = "1.4.0", default-features = false, optional = true }
parking_lot = { version = "0.12.1", optional = true }
ss58-registry = { version = "1.34.0", default-features = false }
sp-std = { path = "../std", default-features = false}
@@ -51,18 +49,20 @@ blake2 = { version = "0.10.4", default-features = false, optional = true }
libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true }
schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false }
merlin = { version = "2.0", default-features = false }
-secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"], optional = true }
+secp256k1 = { version = "0.28.0", default-features = false, features = ["recovery", "alloc"], optional = true }
sp-core-hashing = { path = "hashing", default-features = false, optional = true }
sp-runtime-interface = { path = "../runtime-interface", default-features = false}
# bls crypto
w3f-bls = { version = "0.1.3", default-features = false, optional = true}
# bandersnatch crypto
-bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "cbc342e", default-features = false, optional = true }
+bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "3ddc205", default-features = false, optional = true }
[dev-dependencies]
criterion = "0.4.0"
serde_json = "1.0.108"
+lazy_static = "1.4.0"
+regex = "1.6.0"
sp-core-hashing-proc-macro = { path = "hashing/proc-macro" }
[[bench]]
@@ -92,7 +92,6 @@ std = [
"hash256-std-hasher/std",
"impl-serde/std",
"itertools",
- "lazy_static",
"libsecp256k1/std",
"log/std",
"merlin/std",
@@ -102,7 +101,6 @@ std = [
"primitive-types/serde",
"primitive-types/std",
"rand",
- "regex",
"scale-info/std",
"schnorrkel/std",
"secp256k1/global-context",
diff --git a/substrate/primitives/core/fuzz/Cargo.toml b/substrate/primitives/core/fuzz/Cargo.toml
new file mode 100644
index 000000000000..9a094b07d4a1
--- /dev/null
+++ b/substrate/primitives/core/fuzz/Cargo.toml
@@ -0,0 +1,20 @@
+[package]
+name = "sp-core-fuzz"
+version = "0.0.0"
+publish = false
+
+[package.metadata]
+cargo-fuzz = true
+
+[dependencies]
+lazy_static = "1.4.0"
+libfuzzer-sys = "0.4"
+regex = "1.10.2"
+
+sp-core = { path = ".." }
+
+[[bin]]
+name = "fuzz_address_uri"
+path = "fuzz_targets/fuzz_address_uri.rs"
+test = false
+doc = false
diff --git a/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs b/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs
new file mode 100644
index 000000000000..e2d9e2fc8b08
--- /dev/null
+++ b/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs
@@ -0,0 +1,53 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![no_main]
+
+extern crate libfuzzer_sys;
+extern crate regex;
+extern crate sp_core;
+
+use libfuzzer_sys::fuzz_target;
+use regex::Regex;
+use sp_core::crypto::AddressUri;
+
+lazy_static::lazy_static! {
+ static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$")
+ .expect("constructed from known-good static value; qed");
+}
+
+fuzz_target!(|input: &str| {
+ let regex_result = SECRET_PHRASE_REGEX.captures(input);
+ let manual_result = AddressUri::parse(input);
+ assert_eq!(regex_result.is_some(), manual_result.is_ok());
+ if manual_result.is_err() {
+ let _ = format!("{}", manual_result.as_ref().err().unwrap());
+ }
+ if let (Some(regex_result), Ok(manual_result)) = (regex_result, manual_result) {
+ assert_eq!(regex_result.name("phrase").map(|p| p.as_str()), manual_result.phrase);
+
+ let manual_paths = manual_result
+ .paths
+ .iter()
+ .map(|s| "/".to_string() + s)
+ .collect::>()
+ .join("");
+
+ assert_eq!(regex_result.name("path").unwrap().as_str().to_string(), manual_paths);
+ assert_eq!(regex_result.name("password").map(|pass| pass.as_str()), manual_result.pass);
+ }
+});
diff --git a/substrate/primitives/core/src/address_uri.rs b/substrate/primitives/core/src/address_uri.rs
new file mode 100644
index 000000000000..862747c9a4b6
--- /dev/null
+++ b/substrate/primitives/core/src/address_uri.rs
@@ -0,0 +1,432 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Little util for parsing an address URI. Replaces regular expressions.
+
+#[cfg(all(not(feature = "std"), any(feature = "serde", feature = "full_crypto")))]
+use sp_std::{
+ alloc::string::{String, ToString},
+ vec::Vec,
+};
+
+/// A container for results of parsing the address uri string.
+///
+/// Intended to be equivalent of:
+/// `Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$")`
+/// which also handles soft and hard derivation paths:
+/// `Regex::new(r"/(/?[^/]+)")`
+///
+/// Example:
+/// ```
+/// use sp_core::crypto::AddressUri;
+/// let manual_result = AddressUri::parse("hello world/s//h///pass");
+/// assert_eq!(
+/// manual_result.unwrap(),
+/// AddressUri { phrase: Some("hello world"), paths: vec!["s", "/h"], pass: Some("pass") }
+/// );
+/// ```
+#[derive(Debug, PartialEq)]
+pub struct AddressUri<'a> {
+ /// Phrase, hexadecimal string, or ss58-compatible string.
+ pub phrase: Option<&'a str>,
+ /// Key derivation paths, ordered as in input string,
+ pub paths: Vec<&'a str>,
+ /// Password.
+ pub pass: Option<&'a str>,
+}
+
+/// Errors that are possible during parsing the address URI.
+#[allow(missing_docs)]
+#[cfg_attr(feature = "std", derive(thiserror::Error))]
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub enum Error {
+ #[cfg_attr(feature = "std", error("Invalid character in phrase:\n{0}"))]
+ InvalidCharacterInPhrase(InvalidCharacterInfo),
+ #[cfg_attr(feature = "std", error("Invalid character in password:\n{0}"))]
+ InvalidCharacterInPass(InvalidCharacterInfo),
+ #[cfg_attr(feature = "std", error("Missing character in hard path:\n{0}"))]
+ MissingCharacterInHardPath(InvalidCharacterInfo),
+ #[cfg_attr(feature = "std", error("Missing character in soft path:\n{0}"))]
+ MissingCharacterInSoftPath(InvalidCharacterInfo),
+}
+
+impl Error {
+ /// Creates an instance of `Error::InvalidCharacterInPhrase` using given parameters.
+ pub fn in_phrase(input: &str, pos: usize) -> Self {
+ Self::InvalidCharacterInPhrase(InvalidCharacterInfo::new(input, pos))
+ }
+ /// Creates an instance of `Error::InvalidCharacterInPass` using given parameters.
+ pub fn in_pass(input: &str, pos: usize) -> Self {
+ Self::InvalidCharacterInPass(InvalidCharacterInfo::new(input, pos))
+ }
+ /// Creates an instance of `Error::MissingCharacterInHardPath` using given parameters.
+ pub fn in_hard_path(input: &str, pos: usize) -> Self {
+ Self::MissingCharacterInHardPath(InvalidCharacterInfo::new(input, pos))
+ }
+ /// Creates an instance of `Error::MissingCharacterInSoftPath` using given parameters.
+ pub fn in_soft_path(input: &str, pos: usize) -> Self {
+ Self::MissingCharacterInSoftPath(InvalidCharacterInfo::new(input, pos))
+ }
+}
+
+/// Complementary error information.
+///
+/// Strucutre contains complementary information about parsing address URI string.
+/// String contains a copy of an original URI string, 0-based integer indicates position of invalid
+/// character.
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub struct InvalidCharacterInfo(String, usize);
+
+impl InvalidCharacterInfo {
+ fn new(info: &str, pos: usize) -> Self {
+ Self(info.to_string(), pos)
+ }
+}
+
+impl sp_std::fmt::Display for InvalidCharacterInfo {
+ fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result {
+ let (s, pos) = escape_string(&self.0, self.1);
+ write!(f, "{s}\n{i}^", i = sp_std::iter::repeat(" ").take(pos).collect::())
+ }
+}
+
+/// Escapes the control characters in given string, and recomputes the position if some characters
+/// were actually escaped.
+fn escape_string(input: &str, pos: usize) -> (String, usize) {
+ let mut out = String::with_capacity(2 * input.len());
+ let mut out_pos = 0;
+ input
+ .chars()
+ .enumerate()
+ .map(|(i, c)| {
+ let esc = |c| (i, Some('\\'), c, 2);
+ match c {
+ '\t' => esc('t'),
+ '\n' => esc('n'),
+ '\r' => esc('r'),
+ '\x07' => esc('a'),
+ '\x08' => esc('b'),
+ '\x0b' => esc('v'),
+ '\x0c' => esc('f'),
+ _ => (i, None, c, 1),
+ }
+ })
+ .for_each(|(i, maybe_escape, c, increment)| {
+ maybe_escape.map(|e| out.push(e));
+ out.push(c);
+ if i < pos {
+ out_pos += increment;
+ }
+ });
+ (out, out_pos)
+}
+
+fn extract_prefix<'a>(input: &mut &'a str, is_allowed: &dyn Fn(char) -> bool) -> Option<&'a str> {
+ let output = input.trim_start_matches(is_allowed);
+ let prefix_len = input.len() - output.len();
+ let prefix = if prefix_len > 0 { Some(&input[..prefix_len]) } else { None };
+ *input = output;
+ prefix
+}
+
+fn strip_prefix(input: &mut &str, prefix: &str) -> bool {
+ if let Some(stripped_input) = input.strip_prefix(prefix) {
+ *input = stripped_input;
+ true
+ } else {
+ false
+ }
+}
+
+impl<'a> AddressUri<'a> {
+ /// Parses the given string.
+ pub fn parse(mut input: &'a str) -> Result {
+ let initial_input = input;
+ let initial_input_len = input.len();
+ let phrase = extract_prefix(&mut input, &|ch: char| {
+ ch.is_ascii_digit() || ch.is_ascii_alphabetic() || ch == ' '
+ });
+
+ let mut pass = None;
+ let mut paths = Vec::new();
+ while !input.is_empty() {
+ let unstripped_input = input;
+ if strip_prefix(&mut input, "///") {
+ pass = Some(extract_prefix(&mut input, &|ch: char| ch != '\n').unwrap_or(""));
+ } else if strip_prefix(&mut input, "//") {
+ let path = extract_prefix(&mut input, &|ch: char| ch != '/')
+ .ok_or(Error::in_hard_path(initial_input, initial_input_len - input.len()))?;
+ assert!(path.len() > 0);
+ // hard path shall contain leading '/', so take it from unstripped input.
+ paths.push(&unstripped_input[1..path.len() + 2]);
+ } else if strip_prefix(&mut input, "/") {
+ paths.push(
+ extract_prefix(&mut input, &|ch: char| ch != '/').ok_or(
+ Error::in_soft_path(initial_input, initial_input_len - input.len()),
+ )?,
+ );
+ } else {
+ return Err(if pass.is_some() {
+ Error::in_pass(initial_input, initial_input_len - input.len())
+ } else {
+ Error::in_phrase(initial_input, initial_input_len - input.len())
+ });
+ }
+ }
+
+ Ok(Self { phrase, paths, pass })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use regex::Regex;
+
+ lazy_static::lazy_static! {
+ static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$")
+ .expect("constructed from known-good static value; qed");
+ }
+
+ fn check_with_regex(input: &str) {
+ let regex_result = SECRET_PHRASE_REGEX.captures(input);
+ let manual_result = AddressUri::parse(input);
+ assert_eq!(regex_result.is_some(), manual_result.is_ok());
+ if let (Some(regex_result), Ok(manual_result)) = (regex_result, manual_result) {
+ assert_eq!(
+ regex_result.name("phrase").map(|phrase| phrase.as_str()),
+ manual_result.phrase
+ );
+
+ let manual_paths = manual_result
+ .paths
+ .iter()
+ .map(|s| "/".to_string() + s)
+ .collect::>()
+ .join("");
+
+ assert_eq!(regex_result.name("path").unwrap().as_str().to_string(), manual_paths);
+ assert_eq!(
+ regex_result.name("password").map(|phrase| phrase.as_str()),
+ manual_result.pass
+ );
+ }
+ }
+
+ fn check(input: &str, result: Result) {
+ let manual_result = AddressUri::parse(input);
+ assert_eq!(manual_result, result);
+ check_with_regex(input);
+ }
+
+ #[test]
+ fn test00() {
+ check("///", Ok(AddressUri { phrase: None, pass: Some(""), paths: vec![] }));
+ }
+
+ #[test]
+ fn test01() {
+ check("////////", Ok(AddressUri { phrase: None, pass: Some("/////"), paths: vec![] }))
+ }
+
+ #[test]
+ fn test02() {
+ check(
+ "sdasd///asda",
+ Ok(AddressUri { phrase: Some("sdasd"), pass: Some("asda"), paths: vec![] }),
+ );
+ }
+
+ #[test]
+ fn test03() {
+ check(
+ "sdasd//asda",
+ Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/asda"] }),
+ );
+ }
+
+ #[test]
+ fn test04() {
+ check("sdasd//a", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/a"] }));
+ }
+
+ #[test]
+ fn test05() {
+ let input = "sdasd//";
+ check(input, Err(Error::in_hard_path(input, 7)));
+ }
+
+ #[test]
+ fn test06() {
+ check(
+ "sdasd/xx//asda",
+ Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["xx", "/asda"] }),
+ );
+ }
+
+ #[test]
+ fn test07() {
+ check(
+ "sdasd/xx//a/b//c///pass",
+ Ok(AddressUri {
+ phrase: Some("sdasd"),
+ pass: Some("pass"),
+ paths: vec!["xx", "/a", "b", "/c"],
+ }),
+ );
+ }
+
+ #[test]
+ fn test08() {
+ check(
+ "sdasd/xx//a",
+ Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["xx", "/a"] }),
+ );
+ }
+
+ #[test]
+ fn test09() {
+ let input = "sdasd/xx//";
+ check(input, Err(Error::in_hard_path(input, 10)));
+ }
+
+ #[test]
+ fn test10() {
+ check(
+ "sdasd/asda",
+ Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asda"] }),
+ );
+ }
+
+ #[test]
+ fn test11() {
+ check(
+ "sdasd/asda//x",
+ Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asda", "/x"] }),
+ );
+ }
+
+ #[test]
+ fn test12() {
+ check("sdasd/a", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["a"] }));
+ }
+
+ #[test]
+ fn test13() {
+ let input = "sdasd/";
+ check(input, Err(Error::in_soft_path(input, 6)));
+ }
+
+ #[test]
+ fn test14() {
+ check("sdasd", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec![] }));
+ }
+
+ #[test]
+ fn test15() {
+ let input = "sdasd.";
+ check(input, Err(Error::in_phrase(input, 5)));
+ }
+
+ #[test]
+ fn test16() {
+ let input = "sd.asd/asd.a";
+ check(input, Err(Error::in_phrase(input, 2)));
+ }
+
+ #[test]
+ fn test17() {
+ let input = "sd.asd//asd.a";
+ check(input, Err(Error::in_phrase(input, 2)));
+ }
+
+ #[test]
+ fn test18() {
+ check(
+ "sdasd/asd.a",
+ Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asd.a"] }),
+ );
+ }
+
+ #[test]
+ fn test19() {
+ check(
+ "sdasd//asd.a",
+ Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/asd.a"] }),
+ );
+ }
+
+ #[test]
+ fn test20() {
+ let input = "///\n";
+ check(input, Err(Error::in_pass(input, 3)));
+ }
+
+ #[test]
+ fn test21() {
+ let input = "///a\n";
+ check(input, Err(Error::in_pass(input, 4)));
+ }
+
+ #[test]
+ fn test22() {
+ let input = "sd asd///asd.a\n";
+ check(input, Err(Error::in_pass(input, 14)));
+ }
+
+ #[test]
+ fn test_invalid_char_info_1() {
+ let expected = "01234\n^";
+ let f = format!("{}", InvalidCharacterInfo::new("01234", 0));
+ assert_eq!(expected, f);
+ }
+
+ #[test]
+ fn test_invalid_char_info_2() {
+ let expected = "01\n ^";
+ let f = format!("{}", InvalidCharacterInfo::new("01", 1));
+ assert_eq!(expected, f);
+ }
+
+ #[test]
+ fn test_invalid_char_info_3() {
+ let expected = "01234\n ^";
+ let f = format!("{}", InvalidCharacterInfo::new("01234", 2));
+ assert_eq!(expected, f);
+ }
+
+ #[test]
+ fn test_invalid_char_info_4() {
+ let expected = "012\\n456\n ^";
+ let f = format!("{}", InvalidCharacterInfo::new("012\n456", 3));
+ assert_eq!(expected, f);
+ }
+
+ #[test]
+ fn test_invalid_char_info_5() {
+ let expected = "012\\n456\n ^";
+ let f = format!("{}", InvalidCharacterInfo::new("012\n456", 5));
+ assert_eq!(expected, f);
+ }
+
+ #[test]
+ fn test_invalid_char_info_6() {
+ let expected = "012\\f456\\t89\n ^";
+ let f = format!("{}", InvalidCharacterInfo::new("012\x0c456\t89", 9));
+ assert_eq!(expected, f);
+ }
+}
diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs
index d369de5a1c01..c9719e344d3e 100644
--- a/substrate/primitives/core/src/crypto.rs
+++ b/substrate/primitives/core/src/crypto.rs
@@ -25,8 +25,6 @@ use codec::{Decode, Encode, MaxEncodedLen};
use itertools::Itertools;
#[cfg(feature = "std")]
use rand::{rngs::OsRng, RngCore};
-#[cfg(feature = "std")]
-use regex::Regex;
use scale_info::TypeInfo;
#[cfg(feature = "std")]
pub use secrecy::{ExposeSecret, SecretString};
@@ -43,6 +41,11 @@ pub use ss58_registry::{from_known_address_format, Ss58AddressFormat, Ss58Addres
/// Trait to zeroize a memory buffer.
pub use zeroize::Zeroize;
+#[cfg(feature = "std")]
+pub use crate::address_uri::AddressUri;
+#[cfg(any(feature = "std", feature = "full_crypto"))]
+pub use crate::address_uri::Error as AddressUriError;
+
/// The root phrase for our publicly known keys.
pub const DEV_PHRASE: &str =
"bottom drive obey lake curtain smoke basket hold race lonely fit walk";
@@ -82,8 +85,8 @@ impl> UncheckedInto for S {
#[cfg(feature = "full_crypto")]
pub enum SecretStringError {
/// The overall format was invalid (e.g. the seed phrase contained symbols).
- #[cfg_attr(feature = "std", error("Invalid format"))]
- InvalidFormat,
+ #[cfg_attr(feature = "std", error("Invalid format {0}"))]
+ InvalidFormat(AddressUriError),
/// The seed phrase provided is not a valid BIP39 phrase.
#[cfg_attr(feature = "std", error("Invalid phrase"))]
InvalidPhrase,
@@ -101,6 +104,13 @@ pub enum SecretStringError {
InvalidPath,
}
+#[cfg(any(feature = "std", feature = "full_crypto"))]
+impl From for SecretStringError {
+ fn from(e: AddressUriError) -> Self {
+ Self::InvalidFormat(e)
+ }
+}
+
/// An error when deriving a key.
#[cfg_attr(feature = "std", derive(thiserror::Error))]
#[derive(Debug, Clone, PartialEq, Eq)]
@@ -208,7 +218,7 @@ impl> From for DeriveJunction {
/// An error type for SS58 decoding.
#[cfg_attr(feature = "std", derive(thiserror::Error))]
#[cfg_attr(not(feature = "std"), derive(Debug))]
-#[derive(Clone, Copy, Eq, PartialEq)]
+#[derive(Clone, Eq, PartialEq)]
#[allow(missing_docs)]
#[cfg(any(feature = "full_crypto", feature = "serde"))]
pub enum PublicError {
@@ -235,6 +245,11 @@ pub enum PublicError {
InvalidPath,
#[cfg_attr(feature = "std", error("Disallowed SS58 Address Format for this datatype."))]
FormatNotAllowed,
+ #[cfg_attr(feature = "std", error("Password not allowed."))]
+ PasswordNotAllowed,
+ #[cfg(feature = "std")]
+ #[cfg_attr(feature = "std", error("Incorrect URI syntax {0}."))]
+ MalformedUri(#[from] AddressUriError),
}
#[cfg(feature = "std")]
@@ -414,47 +429,40 @@ pub fn set_default_ss58_version(new_default: Ss58AddressFormat) {
DEFAULT_VERSION.store(new_default.into(), core::sync::atomic::Ordering::Relaxed);
}
-#[cfg(feature = "std")]
-lazy_static::lazy_static! {
- static ref SS58_REGEX: Regex = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$")
- .expect("constructed from known-good static value; qed");
- static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[\d\w ]+)?(?P(//?[^/]+)*)(///(?P.*))?$")
- .expect("constructed from known-good static value; qed");
- static ref JUNCTION_REGEX: Regex = Regex::new(r"/(/?[^/]+)")
- .expect("constructed from known-good static value; qed");
-}
-
#[cfg(feature = "std")]
impl + AsRef<[u8]> + Public + Derive> Ss58Codec for T {
fn from_string(s: &str) -> Result {
- let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?;
- let s = cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS);
+ let cap = AddressUri::parse(s)?;
+ if cap.pass.is_some() {
+ return Err(PublicError::PasswordNotAllowed);
+ }
+ let s = cap.phrase.unwrap_or(DEV_ADDRESS);
let addr = if let Some(stripped) = s.strip_prefix("0x") {
let d = array_bytes::hex2bytes(stripped).map_err(|_| PublicError::InvalidFormat)?;
Self::from_slice(&d).map_err(|()| PublicError::BadLength)?
} else {
Self::from_ss58check(s)?
};
- if cap["path"].is_empty() {
+ if cap.paths.is_empty() {
Ok(addr)
} else {
- let path =
- JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1]));
- addr.derive(path).ok_or(PublicError::InvalidPath)
+ addr.derive(cap.paths.iter().map(DeriveJunction::from))
+ .ok_or(PublicError::InvalidPath)
}
}
fn from_string_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> {
- let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?;
- let (addr, v) = Self::from_ss58check_with_version(
- cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS),
- )?;
- if cap["path"].is_empty() {
+ let cap = AddressUri::parse(s)?;
+ if cap.pass.is_some() {
+ return Err(PublicError::PasswordNotAllowed);
+ }
+ let (addr, v) = Self::from_ss58check_with_version(cap.phrase.unwrap_or(DEV_ADDRESS))?;
+ if cap.paths.is_empty() {
Ok((addr, v))
} else {
- let path =
- JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1]));
- addr.derive(path).ok_or(PublicError::InvalidPath).map(|a| (a, v))
+ addr.derive(cap.paths.iter().map(DeriveJunction::from))
+ .ok_or(PublicError::InvalidPath)
+ .map(|a| (a, v))
}
}
}
@@ -817,22 +825,15 @@ impl sp_std::str::FromStr for SecretUri {
type Err = SecretStringError;
fn from_str(s: &str) -> Result {
- let cap = SECRET_PHRASE_REGEX.captures(s).ok_or(SecretStringError::InvalidFormat)?;
-
- let junctions = JUNCTION_REGEX
- .captures_iter(&cap["path"])
- .map(|f| DeriveJunction::from(&f[1]))
- .collect::>();
-
- let phrase = cap.name("phrase").map(|r| r.as_str()).unwrap_or(DEV_PHRASE);
- let password = cap.name("password");
+ let cap = AddressUri::parse(s)?;
+ let phrase = cap.phrase.unwrap_or(DEV_PHRASE);
Ok(Self {
phrase: SecretString::from_str(phrase).expect("Returns infallible error; qed"),
- password: password.map(|v| {
- SecretString::from_str(v.as_str()).expect("Returns infallible error; qed")
- }),
- junctions,
+ password: cap
+ .pass
+ .map(|v| SecretString::from_str(v).expect("Returns infallible error; qed")),
+ junctions: cap.paths.iter().map(DeriveJunction::from).collect::>(),
})
}
}
diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs
index 603fa515a30e..471714582a6b 100644
--- a/substrate/primitives/core/src/ecdsa.rs
+++ b/substrate/primitives/core/src/ecdsa.rs
@@ -336,7 +336,7 @@ impl Signature {
pub fn recover_prehashed(&self, message: &[u8; 32]) -> Option {
let rid = RecoveryId::from_i32(self.0[64] as i32).ok()?;
let sig = RecoverableSignature::from_compact(&self.0[..64], rid).ok()?;
- let message = Message::from_slice(message).expect("Message is 32 bytes; qed");
+ let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed");
#[cfg(feature = "std")]
let context = SECP256K1;
@@ -458,7 +458,7 @@ impl Pair {
/// Sign a pre-hashed message
pub fn sign_prehashed(&self, message: &[u8; 32]) -> Signature {
- let message = Message::from_slice(message).expect("Message is 32 bytes; qed");
+ let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed");
#[cfg(feature = "std")]
let context = SECP256K1;
@@ -508,12 +508,7 @@ impl Pair {
#[cfg(feature = "full_crypto")]
impl Drop for Pair {
fn drop(&mut self) {
- let ptr = self.secret.as_mut_ptr();
- for off in 0..self.secret.len() {
- unsafe {
- core::ptr::write_volatile(ptr.add(off), 0);
- }
- }
+ self.secret.non_secure_erase()
}
}
@@ -760,7 +755,7 @@ mod test {
let msg = [0u8; 32];
let sig1 = pair.sign_prehashed(&msg);
let sig2: Signature = {
- let message = Message::from_slice(&msg).unwrap();
+ let message = Message::from_digest_slice(&msg).unwrap();
SECP256K1.sign_ecdsa_recoverable(&message, &pair.secret).into()
};
assert_eq!(sig1, sig2);
diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs
index ec0641c54668..4873d1a21127 100644
--- a/substrate/primitives/core/src/lib.rs
+++ b/substrate/primitives/core/src/lib.rs
@@ -55,6 +55,8 @@ pub mod crypto;
pub mod hexdisplay;
pub use paste;
+#[cfg(any(feature = "full_crypto", feature = "std"))]
+mod address_uri;
#[cfg(feature = "bandersnatch-experimental")]
pub mod bandersnatch;
#[cfg(feature = "bls-experimental")]
diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml
index 445104b736e0..59df8895bb7f 100644
--- a/substrate/primitives/io/Cargo.toml
+++ b/substrate/primitives/io/Cargo.toml
@@ -28,7 +28,7 @@ sp-trie = { path = "../trie", default-features = false, optional = true}
sp-externalities = { path = "../externalities", default-features = false}
sp-tracing = { path = "../tracing", default-features = false}
log = { version = "0.4.17", optional = true }
-secp256k1 = { version = "0.24.0", features = ["recovery", "global-context"], optional = true }
+secp256k1 = { version = "0.28.0", features = ["recovery", "global-context"], optional = true }
tracing = { version = "0.1.29", default-features = false }
tracing-core = { version = "0.1.28", default-features = false}
diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs
index c4182d6ab3a0..a300152ee66d 100644
--- a/substrate/primitives/io/src/lib.rs
+++ b/substrate/primitives/io/src/lib.rs
@@ -1139,7 +1139,7 @@ pub trait Crypto {
.map_err(|_| EcdsaVerifyError::BadV)?;
let sig = RecoverableSignature::from_compact(&sig[..64], rid)
.map_err(|_| EcdsaVerifyError::BadRS)?;
- let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed");
+ let msg = Message::from_digest_slice(msg).expect("Message is 32 bytes; qed");
let pubkey = SECP256K1
.recover_ecdsa(&msg, &sig)
.map_err(|_| EcdsaVerifyError::BadSignature)?;
@@ -1185,7 +1185,7 @@ pub trait Crypto {
.map_err(|_| EcdsaVerifyError::BadV)?;
let sig = RecoverableSignature::from_compact(&sig[..64], rid)
.map_err(|_| EcdsaVerifyError::BadRS)?;
- let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed");
+ let msg = Message::from_digest_slice(msg).expect("Message is 32 bytes; qed");
let pubkey = SECP256K1
.recover_ecdsa(&msg, &sig)
.map_err(|_| EcdsaVerifyError::BadSignature)?;
diff --git a/substrate/utils/build-script-utils/src/version.rs b/substrate/utils/build-script-utils/src/version.rs
index f6a9ff9554ab..549e499b1102 100644
--- a/substrate/utils/build-script-utils/src/version.rs
+++ b/substrate/utils/build-script-utils/src/version.rs
@@ -59,3 +59,34 @@ fn get_version(impl_commit: &str) -> String {
impl_commit
)
}
+
+/// Generate `SUBSTRATE_WASMTIME_VERSION`
+pub fn generate_wasmtime_version() {
+ generate_dependency_version("wasmtime", "SUBSTRATE_WASMTIME_VERSION");
+}
+
+fn generate_dependency_version(dep: &str, env_var: &str) {
+ // we only care about the root
+ match std::process::Command::new("cargo")
+ .args(["tree", "--depth=0", "--locked", "--package", dep])
+ .output()
+ {
+ Ok(output) if output.status.success() => {
+ let version = String::from_utf8_lossy(&output.stdout);
+
+ // vX.X.X
+ if let Some(ver) = version.strip_prefix(&format!("{} v", dep)) {
+ println!("cargo:rustc-env={}={}", env_var, ver);
+ } else {
+ println!("cargo:warning=Unexpected result {}", version);
+ }
+ },
+
+ // command errors out when it could not find the given dependency
+ // or when having multiple versions of it
+ Ok(output) =>
+ println!("cargo:warning=`cargo tree` {}", String::from_utf8_lossy(&output.stderr)),
+
+ Err(err) => println!("cargo:warning=Could not run `cargo tree`: {}", err),
+ }
+}