From 9d760a9f569cf58bf6f6c19bac93d0d33f54a454 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 2 Jan 2025 13:13:45 +0100 Subject: [PATCH 01/10] [CI] Skip SemVer on R0-silent and update docs (#6285) Changes: - Make R0-silent not run the semver check again. Originally I thought this would be good to have a bullet-proof check, but it now often triggers when CI or unrelated files are changed. In the end, the developer has to make the right choice here - and always will need to. So bringing back the R0 label gives more power to the devs and should increase dev velocity. We still need to ensure that every use of this label is well understood, and not just used out of lazyness. - Fix `/cmd prdoc` bump levels - Update docs --------- Signed-off-by: Oliver Tale-Yazdi --- .github/scripts/generate-prdoc.py | 18 +++- .github/workflows/check-semver.yml | 10 ++- .github/workflows/command-prdoc.yml | 2 +- docs/contributor/prdoc.md | 124 ++++++++++++++++------------ 4 files changed, 97 insertions(+), 57 deletions(-) diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py index 780fa0012976..9154f185e64b 100644 --- a/.github/scripts/generate-prdoc.py +++ b/.github/scripts/generate-prdoc.py @@ -36,6 +36,21 @@ def from_pr_number(n, audience, bump, force): create_prdoc(n, audience, pr.title, pr.body, patch, bump, force) +def translate_audience(audience): + aliases = { + 'runtime_dev': 'Runtime Dev', + 'runtime_user': 'Runtime Operator', + 'node_dev': 'Node Dev', + 'node_user': 'Node User', + } + + if audience in aliases: + to = aliases[audience] + print(f"Translated audience '{audience}' to '{to}'") + audience = to + + return audience + def create_prdoc(pr, audience, title, description, patch, bump, force): path = f"prdoc/pr_{pr}.prdoc" @@ -49,6 +64,7 @@ def create_prdoc(pr, audience, title, description, patch, bump, force): print(f"No preexisting PrDoc for PR {pr}") prdoc = { "title": title, "doc": [{}], "crates": [] } + audience = translate_audience(audience) prdoc["doc"][0]["audience"] = audience prdoc["doc"][0]["description"] = description @@ -117,7 +133,7 @@ def setup_parser(parser=None, pr_required=True): parser = argparse.ArgumentParser() parser.add_argument("--pr", type=int, required=pr_required, help="The PR number to generate the PrDoc for.") parser.add_argument("--audience", type=str, nargs='*', choices=allowed_audiences, default=["todo"], help="The audience of whom the changes may concern. Example: --audience runtime_dev node_dev") - parser.add_argument("--bump", type=str, default="major", choices=["patch", "minor", "major", "silent", "ignore", "no_change"], help="A default bump level for all crates. Example: --bump patch") + parser.add_argument("--bump", type=str, default="major", choices=["patch", "minor", "major", "silent", "ignore", "none"], help="A default bump level for all crates. Example: --bump patch") parser.add_argument("--force", action="store_true", help="Whether to overwrite any existing PrDoc.") return parser diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index 16028c8de770..0da3e54ef60b 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -2,7 +2,7 @@ name: Check semver on: pull_request: - types: [opened, synchronize, reopened, ready_for_review] + types: [opened, synchronize, reopened, ready_for_review, labeled, unlabeled] workflow_dispatch: merge_group: @@ -62,21 +62,29 @@ jobs: echo "PRDOC_EXTRA_ARGS=--max-bump minor" >> $GITHUB_ENV + - name: Echo Skip + if: ${{ contains(github.event.pull_request.labels.*.name, 'R0-silent') }} + run: echo "Skipping this PR because it is labeled as R0-silent." + - name: Rust Cache + if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }} uses: Swatinem/rust-cache@82a92a6e8fbeee089604da2575dc567ae9ddeaab # v2.7.5 with: cache-on-failure: true - name: Rust compilation prerequisites + if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }} run: | rustup default $TOOLCHAIN rustup component add rust-src --toolchain $TOOLCHAIN - name: install parity-publish + if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }} # Set the target dir to cache the build. run: CARGO_TARGET_DIR=./target/ cargo install parity-publish@0.10.3 --locked -q - name: check semver + if: ${{ !contains(github.event.pull_request.labels.*.name, 'R0-silent') }} run: | if [ -z "$PR" ]; then echo "Skipping master/merge queue" diff --git a/.github/workflows/command-prdoc.yml b/.github/workflows/command-prdoc.yml index 7022e8e0e006..71dbcfbd2289 100644 --- a/.github/workflows/command-prdoc.yml +++ b/.github/workflows/command-prdoc.yml @@ -14,7 +14,7 @@ on: required: true options: - "TODO" - - "no_change" + - "none" - "patch" - "minor" - "major" diff --git a/docs/contributor/prdoc.md b/docs/contributor/prdoc.md index 4a1a3c1f0688..1f6252425e69 100644 --- a/docs/contributor/prdoc.md +++ b/docs/contributor/prdoc.md @@ -1,73 +1,88 @@ # PRDoc -A [prdoc](https://github.com/paritytech/prdoc) is like a changelog but for a Pull Request. We use this approach to -record changes on a crate level. This information is then processed by the release team to apply the correct crate -version bumps and to generate the CHANGELOG of the next release. +A [prdoc](https://github.com/paritytech/prdoc) is like a changelog but for a Pull Request. We use +this approach to record changes on a crate level. This information is then processed by the release +team to apply the correct crate version bumps and to generate the CHANGELOG of the next release. ## Requirements -When creating a PR, the author needs to decide with the `R0-silent` label whether the PR has to contain a prdoc. The -`R0` label should only be placed for No-OP changes like correcting a typo in a comment or CI stuff. If unsure, ping -the [CODEOWNERS](../../.github/CODEOWNERS) for advice. +When creating a PR, the author needs to decide with the `R0-silent` label whether the PR has to +contain a prdoc. The `R0` label should only be placed for No-OP changes like correcting a typo in a +comment or CI stuff. If unsure, ping the [CODEOWNERS](../../.github/CODEOWNERS) for advice. -## PRDoc How-To +## Auto Generation -A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). Please follow these steps to generate one: - -1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install parity-prdoc`. -1. Open a Pull Request and get the PR number. -1. Generate the file with `prdoc generate `. The output filename will be printed. -1. Optional: Install the `prdoc/schema_user.json` schema in your editor, for example - [VsCode](https://github.com/paritytech/prdoc?tab=readme-ov-file#schemas). -1. Edit your `.prdoc` file according to the [Audience](#pick-an-audience) and [SemVer](#record-semver-changes) sections. -1. Check your prdoc with `prdoc check -n `. This is optional since the CI will also check it. - -> **Tip:** GitHub CLI and jq can be used to provide the number of your PR to generate the correct file: -> `prdoc generate $(gh pr view --json number | jq '.number') -o prdoc` - -Alternatively you can call the prdoc from PR via `/cmd prdoc` (see args with `/cmd prdoc --help`) -in a comment to PR to trigger it from CI. +You can create a PrDoc by using the `/cmd prdoc` command (see args with `/cmd prdoc --help`) in a +comment on your PR. Options: -- `pr`: The PR number to generate the PrDoc for. -- `audience`: The audience of whom the changes may concern. -- `bump`: A default bump level for all crates. - The PrDoc will likely need to be edited to reflect the actual changes after generation. -- `force`: Whether to overwrite any existing PrDoc. +- `audience` The audience of whom the changes may concern. + - `runtime_dev`: Anyone building a runtime themselves. For example parachain teams, or people + providing template runtimes. Also devs using pallets, FRAME etc directly. These are people who + care about the protocol (WASM), not the meta-protocol (client). + - `runtime_user`: Anyone using the runtime. Can be front-end devs reading the state, exchanges + listening for events, libraries that have hard-coded pallet indices etc. Anything that would + result in an observable change to the runtime behaviour must be marked with this. + - `node_dev`: Those who build around the client side code. Alternative client builders, SMOLDOT, + those who consume RPCs. These are people who are oblivious to the runtime changes. They only care + about the meta-protocol, not the protocol itself. + - `node_operator`: People who run the node. Think of validators, exchanges, indexer services, CI + actions. Anything that modifies how the binary behaves (its arguments, default arguments, error + messags, etc) must be marked with this. +- `bump:`: The default bump level for all crates. The PrDoc will likely need to be edited to reflect + the actual changes after generation. More details in the section below. + - `none`: There is no observable change. So to say: if someone were handed the old and the new + version of our software, it would be impossible to figure out what version is which. + - `patch`: Fixes that will never cause compilation errors if someone updates to this version. No + functionality has been changed. Should be limited to fixing bugs or No-OP implementation + changes. + - `minor`: Additions that will never cause compilation errors if someone updates to this version. + No functionality has been changed. Should be limited to adding new features. + - `major`: Anything goes. +- `force: true|false`: Whether to overwrite any existing PrDoc file. -## Pick An Audience - -While describing a PR, the author needs to consider which audience(s) need to be addressed. -The list of valid audiences is described and documented in the JSON schema as follow: +### Example -- `Node Dev`: Those who build around the client side code. Alternative client builders, SMOLDOT, those who consume RPCs. - These are people who are oblivious to the runtime changes. They only care about the meta-protocol, not the protocol - itself. +```bash +/cmd prdoc --audience runtime_dev --bump patch +``` -- `Runtime Dev`: All of those who rely on the runtime. A parachain team that is using a pallet. A DApp that is using a - pallet. These are people who care about the protocol (WASM), not the meta-protocol (client). +## Local Generation -- `Node Operator`: Those who don't write any code and only run code. +A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). Please follow these steps +to generate one: -- `Runtime User`: Anyone using the runtime. This can be a token holder or a dev writing a front end for a chain. +1. Install the [`prdoc` CLI](https://github.com/paritytech/prdoc) by running `cargo install + parity-prdoc`. +1. Open a Pull Request and get the PR number. +1. Generate the file with `prdoc generate `. The output filename will be printed. +1. Optional: Install the `prdoc/schema_user.json` schema in your editor, for example + [VsCode](https://github.com/paritytech/prdoc?tab=readme-ov-file#schemas). +1. Edit your `.prdoc` file according to the [Audience](#pick-an-audience) and + [SemVer](#record-semver-changes) sections. +1. Check your prdoc with `prdoc check -n `. This is optional since the CI will also check + it. -If you have a change that affects multiple audiences, you can either list them all, or write multiple sections and -re-phrase the changes for each audience. +> **Tip:** GitHub CLI and jq can be used to provide the number of your PR to generate the correct +> file: +> `prdoc generate $(gh pr view --json number | jq '.number') -o prdoc` ## Record SemVer Changes -All published crates that got modified need to have an entry in the `crates` section of your `PRDoc`. This entry tells -the release team how to bump the crate version prior to the next release. It is very important that this information is -correct, otherwise it could break the code of downstream teams. +All published crates that got modified need to have an entry in the `crates` section of your +`PRDoc`. This entry tells the release team how to bump the crate version prior to the next release. +It is very important that this information is correct, otherwise it could break the code of +downstream teams. The bump can either be `major`, `minor`, `patch` or `none`. The three first options are defined by -[rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html), whereas `None` should be picked if no other -applies. The `None` option is equivalent to the `R0-silent` label, but on a crate level. Experimental and private APIs -are exempt from bumping and can be broken at any time. Please read the [Crate Section](../RELEASE.md) of the RELEASE doc -about them. +[rust-lang.org](https://doc.rust-lang.org/cargo/reference/semver.html), whereas `None` should be +picked if no other applies. The `None` option is equivalent to the `R0-silent` label, but on a crate +level. Experimental and private APIs are exempt from bumping and can be broken at any time. Please +read the [Crate Section](../RELEASE.md) of the RELEASE doc about them. -> **Note**: There is currently no CI in place to sanity check this information, but should be added soon. +> **Note**: There is currently no CI in place to sanity check this information, but should be added +> soon. ### Example @@ -81,12 +96,13 @@ crates: bump: minor ``` -It means that downstream code using `frame-example-pallet` is still guaranteed to work as before, while code using -`frame-example` might break. +It means that downstream code using `frame-example-pallet` is still guaranteed to work as before, +while code using `frame-example` might break. ### Dependencies -A crate that depends on another crate will automatically inherit its `major` bumps. This means that you do not need to -bump a crate that had a SemVer breaking change only from re-exporting another crate with a breaking change. -`minor` an `patch` bumps do not need to be inherited, since `cargo` will automatically update them to the latest -compatible version. +A crate that depends on another crate will automatically inherit its `major` bumps. This means that +you do not need to bump a crate that had a SemVer breaking change only from re-exporting another +crate with a breaking change. +`minor` an `patch` bumps do not need to be inherited, since `cargo` will automatically update them +to the latest compatible version. From fcbc0ef2d109c9c96c6821959c9899a3d3dd20a1 Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Thu, 2 Jan 2025 17:54:03 +0100 Subject: [PATCH 02/10] Add workflow for networking benchmarks (#7029) # Description Adds charts for networking benchmarks --- .github/workflows/networking-benchmarks.yml | 107 ++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 .github/workflows/networking-benchmarks.yml diff --git a/.github/workflows/networking-benchmarks.yml b/.github/workflows/networking-benchmarks.yml new file mode 100644 index 000000000000..e45ae601105d --- /dev/null +++ b/.github/workflows/networking-benchmarks.yml @@ -0,0 +1,107 @@ +name: Networking Benchmarks + +on: + push: + branches: + - master + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + preflight: + uses: ./.github/workflows/reusable-preflight.yml + + build: + timeout-minutes: 80 + needs: [preflight] + runs-on: ${{ needs.preflight.outputs.RUNNER_BENCHMARK }} + container: + image: ${{ needs.preflight.outputs.IMAGE }} + strategy: + fail-fast: false + matrix: + features: + [ + { + bench: "notifications_protocol", + }, + { + bench: "request_response_protocol", + }, + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Run Benchmarks + id: run-benchmarks + run: | + mkdir -p ./charts + forklift cargo bench -p sc-network --bench ${{ matrix.features.bench }} -- --output-format bencher | grep "^test" | tee ./charts/networking-bench.txt || echo "Benchmarks failed" + ls -lsa ./charts + + - name: Upload artifacts + uses: actions/upload-artifact@v4.3.6 + with: + name: ${{ matrix.features.bench }}-${{ github.sha }} + path: ./charts + + publish-benchmarks: + timeout-minutes: 60 + needs: [build] + if: github.ref == 'refs/heads/master' + environment: subsystem-benchmarks + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: gh-pages + fetch-depth: 0 + + - run: git checkout master -- + + - name: Download artifacts + uses: actions/download-artifact@v4.1.8 + with: + name: networking-bench-${{ github.sha }} + path: ./charts + + - name: Setup git + run: | + # Fixes "detected dubious ownership" error in the ci + git config --global --add safe.directory '*' + ls -lsR ./charts + + - uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: ${{ secrets.POLKADOTSDK_GHPAGES_APP_ID }} + private-key: ${{ secrets.POLKADOTSDK_GHPAGES_APP_KEY }} + + - name: Generate ${{ env.BENCH }} + env: + BENCH: notifications_protocol + uses: benchmark-action/github-action-benchmark@v1 + with: + tool: "cargo" + output-file-path: ./charts/${{ env.BENCH }}.txt + benchmark-data-dir-path: ./bench/${{ env.BENCH }} + github-token: ${{ steps.app-token.outputs.token }} + auto-push: true + + - name: Generate ${{ env.BENCH }} + env: + BENCH: request_response_protocol + uses: benchmark-action/github-action-benchmark@v1 + with: + tool: "cargo" + output-file-path: ./charts/${{ env.BENCH }}.txt + benchmark-data-dir-path: ./bench/${{ env.BENCH }} + github-token: ${{ steps.app-token.outputs.token }} + auto-push: true From 20513d6fec617acf783fef8db872beb0584b6a9b Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Thu, 2 Jan 2025 19:36:45 +0100 Subject: [PATCH 03/10] [pallet-revive] fix file case (#6981) fix https://github.com/paritytech/polkadot-sdk/issues/6970 --------- Co-authored-by: command-bot <> --- prdoc/pr_6981.prdoc | 7 ++ .../js/abi/{ErrorTester.json => Errors.json} | 0 .../js/abi/{ErrorTester.ts => Errors.ts} | 2 +- .../revive/rpc/examples/js/abi/errorTester.ts | 106 ------------------ .../contracts/{ErrorTester.sol => Errors.sol} | 2 +- .../{ErrorTester.polkavm => Errors.polkavm} | Bin .../rpc/examples/js/src/geth-diff.test.ts | 54 ++++----- substrate/frame/revive/rpc/src/tests.rs | 2 +- 8 files changed, 37 insertions(+), 136 deletions(-) create mode 100644 prdoc/pr_6981.prdoc rename substrate/frame/revive/rpc/examples/js/abi/{ErrorTester.json => Errors.json} (100%) rename substrate/frame/revive/rpc/examples/js/abi/{ErrorTester.ts => Errors.ts} (98%) delete mode 100644 substrate/frame/revive/rpc/examples/js/abi/errorTester.ts rename substrate/frame/revive/rpc/examples/js/contracts/{ErrorTester.sol => Errors.sol} (98%) rename substrate/frame/revive/rpc/examples/js/pvm/{ErrorTester.polkavm => Errors.polkavm} (100%) diff --git a/prdoc/pr_6981.prdoc b/prdoc/pr_6981.prdoc new file mode 100644 index 000000000000..8ed70e51ef45 --- /dev/null +++ b/prdoc/pr_6981.prdoc @@ -0,0 +1,7 @@ +title: '[pallet-revive] fix file case' +doc: +- audience: Runtime Dev + description: "fix https://github.com/paritytech/polkadot-sdk/issues/6970\r\n" +crates: +- name: pallet-revive-eth-rpc + bump: minor diff --git a/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.json b/substrate/frame/revive/rpc/examples/js/abi/Errors.json similarity index 100% rename from substrate/frame/revive/rpc/examples/js/abi/ErrorTester.json rename to substrate/frame/revive/rpc/examples/js/abi/Errors.json diff --git a/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts b/substrate/frame/revive/rpc/examples/js/abi/Errors.ts similarity index 98% rename from substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts rename to substrate/frame/revive/rpc/examples/js/abi/Errors.ts index f3776e498fd5..b39567531c6d 100644 --- a/substrate/frame/revive/rpc/examples/js/abi/ErrorTester.ts +++ b/substrate/frame/revive/rpc/examples/js/abi/Errors.ts @@ -1,4 +1,4 @@ -export const ErrorTesterAbi = [ +export const ErrorsAbi = [ { inputs: [ { diff --git a/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts b/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts deleted file mode 100644 index f3776e498fd5..000000000000 --- a/substrate/frame/revive/rpc/examples/js/abi/errorTester.ts +++ /dev/null @@ -1,106 +0,0 @@ -export const ErrorTesterAbi = [ - { - inputs: [ - { - internalType: "string", - name: "message", - type: "string", - }, - ], - name: "CustomError", - type: "error", - }, - { - inputs: [ - { - internalType: "bool", - name: "newState", - type: "bool", - }, - ], - name: "setState", - outputs: [], - stateMutability: "nonpayable", - type: "function", - }, - { - inputs: [], - name: "state", - outputs: [ - { - internalType: "bool", - name: "", - type: "bool", - }, - ], - stateMutability: "view", - type: "function", - }, - { - inputs: [], - name: "triggerAssertError", - outputs: [], - stateMutability: "pure", - type: "function", - }, - { - inputs: [], - name: "triggerCustomError", - outputs: [], - stateMutability: "pure", - type: "function", - }, - { - inputs: [], - name: "triggerDivisionByZero", - outputs: [ - { - internalType: "uint256", - name: "", - type: "uint256", - }, - ], - stateMutability: "pure", - type: "function", - }, - { - inputs: [], - name: "triggerOutOfBoundsError", - outputs: [ - { - internalType: "uint256", - name: "", - type: "uint256", - }, - ], - stateMutability: "pure", - type: "function", - }, - { - inputs: [], - name: "triggerRequireError", - outputs: [], - stateMutability: "pure", - type: "function", - }, - { - inputs: [], - name: "triggerRevertError", - outputs: [], - stateMutability: "pure", - type: "function", - }, - { - inputs: [ - { - internalType: "uint256", - name: "value", - type: "uint256", - }, - ], - name: "valueMatch", - outputs: [], - stateMutability: "payable", - type: "function", - }, -] as const; diff --git a/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol b/substrate/frame/revive/rpc/examples/js/contracts/Errors.sol similarity index 98% rename from substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol rename to substrate/frame/revive/rpc/examples/js/contracts/Errors.sol index f1fdd219624a..abbdba8d32eb 100644 --- a/substrate/frame/revive/rpc/examples/js/contracts/ErrorTester.sol +++ b/substrate/frame/revive/rpc/examples/js/contracts/Errors.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -contract ErrorTester { +contract Errors { bool public state; // Payable function that can be used to test insufficient funds errors diff --git a/substrate/frame/revive/rpc/examples/js/pvm/ErrorTester.polkavm b/substrate/frame/revive/rpc/examples/js/pvm/Errors.polkavm similarity index 100% rename from substrate/frame/revive/rpc/examples/js/pvm/ErrorTester.polkavm rename to substrate/frame/revive/rpc/examples/js/pvm/Errors.polkavm diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts index 37ebbc9ea3b3..b9ee877927bb 100644 --- a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts +++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts @@ -1,7 +1,7 @@ import { jsonRpcErrors, procs, createEnv, getByteCode } from './geth-diff-setup.ts' import { afterAll, afterEach, beforeAll, describe, expect, test } from 'bun:test' import { encodeFunctionData, Hex, parseEther } from 'viem' -import { ErrorTesterAbi } from '../abi/ErrorTester' +import { ErrorsAbi } from '../abi/Errors' import { FlipperCallerAbi } from '../abi/FlipperCaller' import { FlipperAbi } from '../abi/Flipper' @@ -17,19 +17,19 @@ const envs = await Promise.all([createEnv('geth'), createEnv('kitchensink')]) for (const env of envs) { describe(env.serverWallet.chain.name, () => { - let errorTesterAddr: Hex = '0x' + let errorsAddr: Hex = '0x' let flipperAddr: Hex = '0x' let flipperCallerAddr: Hex = '0x' beforeAll(async () => { { const hash = await env.serverWallet.deployContract({ - abi: ErrorTesterAbi, - bytecode: getByteCode('errorTester', env.evm), + abi: ErrorsAbi, + bytecode: getByteCode('errors', env.evm), }) const deployReceipt = await env.serverWallet.waitForTransactionReceipt({ hash }) if (!deployReceipt.contractAddress) throw new Error('Contract address should be set') - errorTesterAddr = deployReceipt.contractAddress + errorsAddr = deployReceipt.contractAddress } { @@ -60,8 +60,8 @@ for (const env of envs) { expect.assertions(3) try { await env.accountWallet.readContract({ - address: errorTesterAddr, - abi: ErrorTesterAbi, + address: errorsAddr, + abi: ErrorsAbi, functionName: 'triggerAssertError', }) } catch (err) { @@ -78,8 +78,8 @@ for (const env of envs) { expect.assertions(3) try { await env.accountWallet.readContract({ - address: errorTesterAddr, - abi: ErrorTesterAbi, + address: errorsAddr, + abi: ErrorsAbi, functionName: 'triggerRevertError', }) } catch (err) { @@ -96,8 +96,8 @@ for (const env of envs) { expect.assertions(3) try { await env.accountWallet.readContract({ - address: errorTesterAddr, - abi: ErrorTesterAbi, + address: errorsAddr, + abi: ErrorsAbi, functionName: 'triggerDivisionByZero', }) } catch (err) { @@ -116,8 +116,8 @@ for (const env of envs) { expect.assertions(3) try { await env.accountWallet.readContract({ - address: errorTesterAddr, - abi: ErrorTesterAbi, + address: errorsAddr, + abi: ErrorsAbi, functionName: 'triggerOutOfBoundsError', }) } catch (err) { @@ -136,8 +136,8 @@ for (const env of envs) { expect.assertions(3) try { await env.accountWallet.readContract({ - address: errorTesterAddr, - abi: ErrorTesterAbi, + address: errorsAddr, + abi: ErrorsAbi, functionName: 'triggerCustomError', }) } catch (err) { @@ -154,8 +154,8 @@ for (const env of envs) { expect.assertions(3) try { await env.accountWallet.simulateContract({ - address: errorTesterAddr, - abi: ErrorTesterAbi, + address: errorsAddr, + abi: ErrorsAbi, functionName: 'valueMatch', value: parseEther('10'), args: [parseEther('10')], @@ -187,8 +187,8 @@ for (const env of envs) { expect.assertions(3) try { await env.accountWallet.estimateContractGas({ - address: errorTesterAddr, - abi: ErrorTesterAbi, + address: errorsAddr, + abi: ErrorsAbi, functionName: 'valueMatch', value: parseEther('10'), args: [parseEther('10')], @@ -205,8 +205,8 @@ for (const env of envs) { expect.assertions(3) try { await env.accountWallet.estimateContractGas({ - address: errorTesterAddr, - abi: ErrorTesterAbi, + address: errorsAddr, + abi: ErrorsAbi, functionName: 'valueMatch', value: parseEther('10'), args: [parseEther('10')], @@ -223,8 +223,8 @@ for (const env of envs) { expect.assertions(3) try { await env.serverWallet.estimateContractGas({ - address: errorTesterAddr, - abi: ErrorTesterAbi, + address: errorsAddr, + abi: ErrorsAbi, functionName: 'valueMatch', value: parseEther('11'), args: [parseEther('10')], @@ -255,8 +255,8 @@ for (const env of envs) { expect(balance).toBe(0n) await env.accountWallet.estimateContractGas({ - address: errorTesterAddr, - abi: ErrorTesterAbi, + address: errorsAddr, + abi: ErrorsAbi, functionName: 'setState', args: [true], }) @@ -273,7 +273,7 @@ for (const env of envs) { expect(balance).toBe(0n) const data = encodeFunctionData({ - abi: ErrorTesterAbi, + abi: ErrorsAbi, functionName: 'setState', args: [true], }) @@ -284,7 +284,7 @@ for (const env of envs) { { data, from: env.accountWallet.account.address, - to: errorTesterAddr, + to: errorsAddr, }, ], }) diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs index 43b600c33d78..e64e16d45b2a 100644 --- a/substrate/frame/revive/rpc/src/tests.rs +++ b/substrate/frame/revive/rpc/src/tests.rs @@ -222,7 +222,7 @@ async fn deploy_and_call() -> anyhow::Result<()> { async fn revert_call() -> anyhow::Result<()> { let _lock = SHARED_RESOURCES.write(); let client = SharedResources::client().await; - let (bytecode, contract) = get_contract("ErrorTester")?; + let (bytecode, contract) = get_contract("Errors")?; let receipt = TransactionBuilder::default() .input(bytecode) .send_and_wait_for_receipt(&client) From bdd11933dd9399f39d9eb74915117e6c94a905f1 Mon Sep 17 00:00:00 2001 From: 0xLucca <95830307+0xLucca@users.noreply.github.com> Date: Thu, 2 Jan 2025 16:14:21 -0300 Subject: [PATCH 04/10] Remove warning log from frame-omni-bencher CLI (#7020) # Description This PR removes the outdated warning message from the `frame-omni-bencher` CLI that states the tool is "not yet battle tested". Fixes #7019 ## Integration No integration steps are required. ## Review Notes The functionality of the tool remains unchanged. Removes the warning message from the CLI output. --------- Co-authored-by: Oliver Tale-Yazdi Co-authored-by: command-bot <> --- prdoc/pr_7020.prdoc | 18 ++++++++++++++++++ substrate/utils/frame/omni-bencher/src/main.rs | 2 -- 2 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_7020.prdoc diff --git a/prdoc/pr_7020.prdoc b/prdoc/pr_7020.prdoc new file mode 100644 index 000000000000..5bbdb44c45a0 --- /dev/null +++ b/prdoc/pr_7020.prdoc @@ -0,0 +1,18 @@ +title: Remove warning log from frame-omni-bencher CLI +doc: +- audience: Node Operator + description: |- + # Description + + This PR removes the outdated warning message from the `frame-omni-bencher` CLI that states the tool is "not yet battle tested". Fixes #7019 + + ## Integration + + No integration steps are required. + + ## Review Notes + + The functionality of the tool remains unchanged. Removes the warning message from the CLI output. +crates: +- name: frame-omni-bencher + bump: patch diff --git a/substrate/utils/frame/omni-bencher/src/main.rs b/substrate/utils/frame/omni-bencher/src/main.rs index 7d8aa891dc4a..f0f9ab753b07 100644 --- a/substrate/utils/frame/omni-bencher/src/main.rs +++ b/substrate/utils/frame/omni-bencher/src/main.rs @@ -24,8 +24,6 @@ use tracing_subscriber::EnvFilter; fn main() -> Result<()> { setup_logger(); - log::warn!("The FRAME omni-bencher is not yet battle tested - double check the results.",); - command::Command::parse().run() } From 472945703925a1beb094439fd7e43149c44960d5 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Fri, 3 Jan 2025 04:29:44 +0900 Subject: [PATCH 05/10] Fix polkadot sdk doc. (#7022) If you see the doc https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html The runtime part introduction is missing. Co-authored-by: Oliver Tale-Yazdi --- docs/sdk/src/polkadot_sdk/frame_runtime.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/sdk/src/polkadot_sdk/frame_runtime.rs b/docs/sdk/src/polkadot_sdk/frame_runtime.rs index 8acf19f76413..24595e445fdd 100644 --- a/docs/sdk/src/polkadot_sdk/frame_runtime.rs +++ b/docs/sdk/src/polkadot_sdk/frame_runtime.rs @@ -57,6 +57,7 @@ //! The following example showcases a minimal pallet. #![doc = docify::embed!("src/polkadot_sdk/frame_runtime.rs", pallet)] //! +//! ## Runtime //! //! A runtime is a collection of pallets that are amalgamated together. Each pallet typically has //! some configurations (exposed as a `trait Config`) that needs to be *specified* in the runtime. From b7e2695163e97fcacd8264a4291375ce66a95afc Mon Sep 17 00:00:00 2001 From: Xavier Lau Date: Fri, 3 Jan 2025 05:18:18 +0800 Subject: [PATCH 06/10] Improve remote externalities logging (#7021) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changes: - Automatically detect if current env is tty. If not disable the spinner logging. - Add `Result` type. - Format log style. Originally reported from: - https://github.com/hack-ink/polkadot-runtime-releaser/blob/4811d2b419649a73edd5bd1f748a858b846eb139/action/try-runtime/action.yml#L75-L91 - https://github.com/hack-ink/polkadot-runtime-releaser-workshop/pull/3#issuecomment-2563883943 Closes #7010. --- Polkadot address: 156HGo9setPcU2qhFMVWLkcmtCEGySLwNqa3DaEiYSWtte4Y --------- Signed-off-by: Xavier Lau Co-authored-by: command-bot <> Co-authored-by: Bastian Köcher Co-authored-by: Oliver Tale-Yazdi --- prdoc/pr_7021.prdoc | 8 + .../frame/remote-externalities/src/lib.rs | 219 +++++++++--------- .../frame/remote-externalities/src/logging.rs | 86 +++++++ 3 files changed, 205 insertions(+), 108 deletions(-) create mode 100644 prdoc/pr_7021.prdoc create mode 100644 substrate/utils/frame/remote-externalities/src/logging.rs diff --git a/prdoc/pr_7021.prdoc b/prdoc/pr_7021.prdoc new file mode 100644 index 000000000000..5443579bbd92 --- /dev/null +++ b/prdoc/pr_7021.prdoc @@ -0,0 +1,8 @@ +title: Improve remote externalities logging +doc: +- audience: Node Dev + description: |- + Automatically detect if current env is tty. If not disable the spinner logging. +crates: +- name: frame-remote-externalities + bump: patch diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 75a2ac2aef41..4c49663260bb 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -20,6 +20,8 @@ //! An equivalent of `sp_io::TestExternalities` that can load its state from a remote substrate //! based chain, or a local state snapshot file. +mod logging; + use codec::{Compact, Decode, Encode}; use indicatif::{ProgressBar, ProgressStyle}; use jsonrpsee::{core::params::ArrayParams, http_client::HttpClient}; @@ -37,7 +39,6 @@ use sp_runtime::{ StateVersion, }; use sp_state_machine::TestExternalities; -use spinners::{Spinner, Spinners}; use std::{ cmp::{max, min}, fs, @@ -49,6 +50,8 @@ use std::{ use substrate_rpc_client::{rpc_params, BatchRequestBuilder, ChainApi, ClientT, StateApi}; use tokio_retry::{strategy::FixedInterval, Retry}; +type Result = std::result::Result; + type KeyValue = (StorageKey, StorageData); type TopKeyValues = Vec; type ChildKeyValues = Vec<(ChildInfo, Vec)>; @@ -87,7 +90,7 @@ impl Snapshot { } } - fn load(path: &PathBuf) -> Result, &'static str> { + fn load(path: &PathBuf) -> Result> { let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; // The first item in the SCALE encoded struct bytes is the snapshot version. We decode and // check that first, before proceeding to decode the rest of the snapshot. @@ -168,9 +171,9 @@ impl Transport { } // Build an HttpClient from a URI. - async fn init(&mut self) -> Result<(), &'static str> { + async fn init(&mut self) -> Result<()> { if let Self::Uri(uri) = self { - log::debug!(target: LOG_TARGET, "initializing remote client to {:?}", uri); + debug!(target: LOG_TARGET, "initializing remote client to {uri:?}"); // If we have a ws uri, try to convert it to an http uri. // We use an HTTP client rather than WS because WS starts to choke with "accumulated @@ -178,11 +181,11 @@ impl Transport { // from a node running a default configuration. let uri = if uri.starts_with("ws://") { let uri = uri.replace("ws://", "http://"); - log::info!(target: LOG_TARGET, "replacing ws:// in uri with http://: {:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)", uri); + info!(target: LOG_TARGET, "replacing ws:// in uri with http://: {uri:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)"); uri } else if uri.starts_with("wss://") { let uri = uri.replace("wss://", "https://"); - log::info!(target: LOG_TARGET, "replacing wss:// in uri with https://: {:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)", uri); + info!(target: LOG_TARGET, "replacing wss:// in uri with https://: {uri:?} (ws is currently unstable for fetching remote storage, for more see https://github.com/paritytech/jsonrpsee/issues/1086)"); uri } else { uri.clone() @@ -193,7 +196,7 @@ impl Transport { .request_timeout(std::time::Duration::from_secs(60 * 5)) .build(uri) .map_err(|e| { - log::error!(target: LOG_TARGET, "error: {:?}", e); + error!(target: LOG_TARGET, "error: {e:?}"); "failed to build http client" })?; @@ -364,23 +367,23 @@ where &self, key: StorageKey, maybe_at: Option, - ) -> Result, &'static str> { + ) -> Result> { trace!(target: LOG_TARGET, "rpc: get_storage"); self.as_online().rpc_client().storage(key, maybe_at).await.map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); + error!(target: LOG_TARGET, "Error = {e:?}"); "rpc get_storage failed." }) } /// Get the latest finalized head. - async fn rpc_get_head(&self) -> Result { + async fn rpc_get_head(&self) -> Result { trace!(target: LOG_TARGET, "rpc: finalized_head"); // sadly this pretty much unreadable... ChainApi::<(), _, B::Header, ()>::finalized_head(self.as_online().rpc_client()) .await .map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); + error!(target: LOG_TARGET, "Error = {e:?}"); "rpc finalized_head failed." }) } @@ -390,13 +393,13 @@ where prefix: Option, start_key: Option, at: B::Hash, - ) -> Result, &'static str> { + ) -> Result> { self.as_online() .rpc_client() .storage_keys_paged(prefix, Self::DEFAULT_KEY_DOWNLOAD_PAGE, start_key, Some(at)) .await .map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); + error!(target: LOG_TARGET, "Error = {e:?}"); "rpc get_keys failed" }) } @@ -407,7 +410,7 @@ where prefix: &StorageKey, block: B::Hash, parallel: usize, - ) -> Result, &'static str> { + ) -> Result> { /// Divide the workload and return the start key of each chunks. Guaranteed to return a /// non-empty list. fn gen_start_keys(prefix: &StorageKey) -> Vec { @@ -491,7 +494,7 @@ where block: B::Hash, start_key: Option<&StorageKey>, end_key: Option<&StorageKey>, - ) -> Result, &'static str> { + ) -> Result> { let mut last_key: Option<&StorageKey> = start_key; let mut keys: Vec = vec![]; @@ -518,11 +521,11 @@ where // scraping out of range or no more matches, // we are done either way if page_len < Self::DEFAULT_KEY_DOWNLOAD_PAGE as usize { - log::debug!(target: LOG_TARGET, "last page received: {}", page_len); + debug!(target: LOG_TARGET, "last page received: {page_len}"); break } - log::debug!( + debug!( target: LOG_TARGET, "new total = {}, full page received: {}", keys.len(), @@ -589,11 +592,10 @@ where let total_payloads = payloads.len(); while start_index < total_payloads { - log::debug!( + debug!( target: LOG_TARGET, - "Remaining payloads: {} Batch request size: {}", + "Remaining payloads: {} Batch request size: {batch_size}", total_payloads - start_index, - batch_size, ); let end_index = usize::min(start_index + batch_size, total_payloads); @@ -620,18 +622,16 @@ where retries += 1; let failure_log = format!( - "Batch request failed ({}/{} retries). Error: {}", - retries, - Self::MAX_RETRIES, - e + "Batch request failed ({retries}/{} retries). Error: {e}", + Self::MAX_RETRIES ); // after 2 subsequent failures something very wrong is happening. log a warning // and reset the batch size down to 1. if retries >= 2 { - log::warn!("{}", failure_log); + warn!("{failure_log}"); batch_size = 1; } else { - log::debug!("{}", failure_log); + debug!("{failure_log}"); // Decrease batch size by DECREASE_FACTOR batch_size = (batch_size as f32 * Self::BATCH_SIZE_DECREASE_FACTOR) as usize; @@ -655,13 +655,11 @@ where ) }; - log::debug!( + debug!( target: LOG_TARGET, - "Request duration: {:?} Target duration: {:?} Last batch size: {} Next batch size: {}", - request_duration, + "Request duration: {request_duration:?} Target duration: {:?} Last batch size: {} Next batch size: {batch_size}", Self::REQUEST_DURATION_TARGET, end_index - start_index, - batch_size ); let batch_response_len = batch_response.len(); @@ -689,21 +687,24 @@ where prefix: StorageKey, at: B::Hash, pending_ext: &mut TestExternalities>, - ) -> Result, &'static str> { - let start = Instant::now(); - let mut sp = Spinner::with_timer(Spinners::Dots, "Scraping keys...".into()); - // TODO We could start downloading when having collected the first batch of keys - // https://github.com/paritytech/polkadot-sdk/issues/2494 - let keys = self - .rpc_get_keys_parallel(&prefix, at, Self::PARALLEL_REQUESTS) - .await? - .into_iter() - .collect::>(); - sp.stop_with_message(format!( - "✅ Found {} keys ({:.2}s)", - keys.len(), - start.elapsed().as_secs_f32() - )); + ) -> Result> { + let keys = logging::with_elapsed_async( + || async { + // TODO: We could start downloading when having collected the first batch of keys. + // https://github.com/paritytech/polkadot-sdk/issues/2494 + let keys = self + .rpc_get_keys_parallel(&prefix, at, Self::PARALLEL_REQUESTS) + .await? + .into_iter() + .collect::>(); + + Ok(keys) + }, + "Scraping keys...", + |keys| format!("Found {} keys", keys.len()), + ) + .await?; + if keys.is_empty() { return Ok(Default::default()) } @@ -735,7 +736,7 @@ where let storage_data = match storage_data_result { Ok(storage_data) => storage_data.into_iter().flatten().collect::>(), Err(e) => { - log::error!(target: LOG_TARGET, "Error while getting storage data: {}", e); + error!(target: LOG_TARGET, "Error while getting storage data: {e}"); return Err("Error while getting storage data") }, }; @@ -751,27 +752,31 @@ where .map(|(key, maybe_value)| match maybe_value { Some(data) => (key.clone(), data), None => { - log::warn!(target: LOG_TARGET, "key {:?} had none corresponding value.", &key); + warn!(target: LOG_TARGET, "key {key:?} had none corresponding value."); let data = StorageData(vec![]); (key.clone(), data) }, }) .collect::>(); - let mut sp = Spinner::with_timer(Spinners::Dots, "Inserting keys into DB...".into()); - let start = Instant::now(); - pending_ext.batch_insert(key_values.clone().into_iter().filter_map(|(k, v)| { - // Don't insert the child keys here, they need to be inserted separately with all their - // data in the load_child_remote function. - match is_default_child_storage_key(&k.0) { - true => None, - false => Some((k.0, v.0)), - } - })); - sp.stop_with_message(format!( - "✅ Inserted keys into DB ({:.2}s)", - start.elapsed().as_secs_f32() - )); + logging::with_elapsed( + || { + pending_ext.batch_insert(key_values.clone().into_iter().filter_map(|(k, v)| { + // Don't insert the child keys here, they need to be inserted separately with + // all their data in the load_child_remote function. + match is_default_child_storage_key(&k.0) { + true => None, + false => Some((k.0, v.0)), + } + })); + + Ok(()) + }, + "Inserting keys into DB...", + |_| "Inserted keys into DB".into(), + ) + .expect("must succeed; qed"); + Ok(key_values) } @@ -781,7 +786,7 @@ where prefixed_top_key: &StorageKey, child_keys: Vec, at: B::Hash, - ) -> Result, &'static str> { + ) -> Result> { let child_keys_len = child_keys.len(); let payloads = child_keys @@ -803,7 +808,7 @@ where match Self::get_storage_data_dynamic_batch_size(client, payloads, &bar).await { Ok(storage_data) => storage_data, Err(e) => { - log::error!(target: LOG_TARGET, "batch processing failed: {:?}", e); + error!(target: LOG_TARGET, "batch processing failed: {e:?}"); return Err("batch processing failed") }, }; @@ -816,7 +821,7 @@ where .map(|(key, maybe_value)| match maybe_value { Some(v) => (key.clone(), v), None => { - log::warn!(target: LOG_TARGET, "key {:?} had no corresponding value.", &key); + warn!(target: LOG_TARGET, "key {key:?} had no corresponding value."); (key.clone(), StorageData(vec![])) }, }) @@ -828,7 +833,7 @@ where prefixed_top_key: &StorageKey, child_prefix: StorageKey, at: B::Hash, - ) -> Result, &'static str> { + ) -> Result> { let retry_strategy = FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); let mut all_child_keys = Vec::new(); @@ -850,7 +855,7 @@ where let child_keys = Retry::spawn(retry_strategy.clone(), get_child_keys_closure) .await .map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); + error!(target: LOG_TARGET, "Error = {e:?}"); "rpc child_get_keys failed." })?; @@ -896,7 +901,7 @@ where &self, top_kv: &[KeyValue], pending_ext: &mut TestExternalities>, - ) -> Result { + ) -> Result { let child_roots = top_kv .iter() .filter(|(k, _)| is_default_child_storage_key(k.as_ref())) @@ -904,7 +909,7 @@ where .collect::>(); if child_roots.is_empty() { - info!(target: LOG_TARGET, "👩‍👦 no child roots found to scrape",); + info!(target: LOG_TARGET, "👩‍👦 no child roots found to scrape"); return Ok(Default::default()) } @@ -930,7 +935,7 @@ where let un_prefixed = match ChildType::from_prefixed_key(&prefixed_top_key) { Some((ChildType::ParentKeyId, storage_key)) => storage_key, None => { - log::error!(target: LOG_TARGET, "invalid key: {:?}", prefixed_top_key); + error!(target: LOG_TARGET, "invalid key: {prefixed_top_key:?}"); return Err("Invalid child key") }, }; @@ -954,13 +959,13 @@ where async fn load_top_remote( &self, pending_ext: &mut TestExternalities>, - ) -> Result { + ) -> Result { let config = self.as_online(); let at = self .as_online() .at .expect("online config must be initialized by this point; qed."); - log::info!(target: LOG_TARGET, "scraping key-pairs from remote at block height {:?}", at); + info!(target: LOG_TARGET, "scraping key-pairs from remote at block height {at:?}"); let mut keys_and_values = Vec::new(); for prefix in &config.hashed_prefixes { @@ -968,7 +973,7 @@ where let additional_key_values = self.rpc_get_pairs(StorageKey(prefix.to_vec()), at, pending_ext).await?; let elapsed = now.elapsed(); - log::info!( + info!( target: LOG_TARGET, "adding data for hashed prefix: {:?}, took {:.2}s", HexDisplay::from(prefix), @@ -979,7 +984,7 @@ where for key in &config.hashed_keys { let key = StorageKey(key.to_vec()); - log::info!( + info!( target: LOG_TARGET, "adding data for hashed key: {:?}", HexDisplay::from(&key) @@ -990,7 +995,7 @@ where keys_and_values.push((key, value)); }, None => { - log::warn!( + warn!( target: LOG_TARGET, "no data found for hashed key: {:?}", HexDisplay::from(&key) @@ -1005,17 +1010,16 @@ where /// The entry point of execution, if `mode` is online. /// /// initializes the remote client in `transport`, and sets the `at` field, if not specified. - async fn init_remote_client(&mut self) -> Result<(), &'static str> { + async fn init_remote_client(&mut self) -> Result<()> { // First, initialize the http client. self.as_online_mut().transport.init().await?; // Then, if `at` is not set, set it. if self.as_online().at.is_none() { let at = self.rpc_get_head().await?; - log::info!( + info!( target: LOG_TARGET, - "since no at is provided, setting it to latest finalized head, {:?}", - at + "since no at is provided, setting it to latest finalized head, {at:?}", ); self.as_online_mut().at = Some(at); } @@ -1040,7 +1044,7 @@ where .filter(|p| *p != DEFAULT_CHILD_STORAGE_KEY_PREFIX) .count() == 0 { - log::info!( + info!( target: LOG_TARGET, "since no prefix is filtered, the data for all pallets will be downloaded" ); @@ -1050,7 +1054,7 @@ where Ok(()) } - async fn load_header(&self) -> Result { + async fn load_header(&self) -> Result { let retry_strategy = FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); let get_header_closure = || { @@ -1069,14 +1073,12 @@ where /// `load_child_remote`. /// /// Must be called after `init_remote_client`. - async fn load_remote_and_maybe_save( - &mut self, - ) -> Result>, &'static str> { + async fn load_remote_and_maybe_save(&mut self) -> Result>> { let state_version = StateApi::::runtime_version(self.as_online().rpc_client(), None) .await .map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); + error!(target: LOG_TARGET, "Error = {e:?}"); "rpc runtime_version failed." }) .map(|v| v.state_version())?; @@ -1100,11 +1102,10 @@ where self.load_header().await?, ); let encoded = snapshot.encode(); - log::info!( + info!( target: LOG_TARGET, - "writing snapshot of {} bytes to {:?}", + "writing snapshot of {} bytes to {path:?}", encoded.len(), - path ); std::fs::write(path, encoded).map_err(|_| "fs::write failed")?; @@ -1119,33 +1120,35 @@ where Ok(pending_ext) } - async fn do_load_remote(&mut self) -> Result, &'static str> { + async fn do_load_remote(&mut self) -> Result> { self.init_remote_client().await?; let inner_ext = self.load_remote_and_maybe_save().await?; Ok(RemoteExternalities { header: self.load_header().await?, inner_ext }) } - fn do_load_offline( - &mut self, - config: OfflineConfig, - ) -> Result, &'static str> { - let mut sp = Spinner::with_timer(Spinners::Dots, "Loading snapshot...".into()); - let start = Instant::now(); - info!(target: LOG_TARGET, "Loading snapshot from {:?}", &config.state_snapshot.path); - let Snapshot { snapshot_version: _, header, state_version, raw_storage, storage_root } = - Snapshot::::load(&config.state_snapshot.path)?; - - let inner_ext = TestExternalities::from_raw_snapshot( - raw_storage, - storage_root, - self.overwrite_state_version.unwrap_or(state_version), - ); - sp.stop_with_message(format!("✅ Loaded snapshot ({:.2}s)", start.elapsed().as_secs_f32())); + fn do_load_offline(&mut self, config: OfflineConfig) -> Result> { + let (header, inner_ext) = logging::with_elapsed( + || { + info!(target: LOG_TARGET, "Loading snapshot from {:?}", &config.state_snapshot.path); + + let Snapshot { header, state_version, raw_storage, storage_root, .. } = + Snapshot::::load(&config.state_snapshot.path)?; + let inner_ext = TestExternalities::from_raw_snapshot( + raw_storage, + storage_root, + self.overwrite_state_version.unwrap_or(state_version), + ); + + Ok((header, inner_ext)) + }, + "Loading snapshot...", + |_| "Loaded snapshot".into(), + )?; Ok(RemoteExternalities { inner_ext, header }) } - pub(crate) async fn pre_build(mut self) -> Result, &'static str> { + pub(crate) async fn pre_build(mut self) -> Result> { let mut ext = match self.mode.clone() { Mode::Offline(config) => self.do_load_offline(config)?, Mode::Online(_) => self.do_load_remote().await?, @@ -1159,7 +1162,7 @@ where // inject manual key values. if !self.hashed_key_values.is_empty() { - log::info!( + info!( target: LOG_TARGET, "extending externalities with {} manually injected key-values", self.hashed_key_values.len() @@ -1169,7 +1172,7 @@ where // exclude manual key values. if !self.hashed_blacklist.is_empty() { - log::info!( + info!( target: LOG_TARGET, "excluding externalities from {} keys", self.hashed_blacklist.len() @@ -1221,7 +1224,7 @@ where self } - pub async fn build(self) -> Result, &'static str> { + pub async fn build(self) -> Result> { let mut ext = self.pre_build().await?; ext.commit_all().unwrap(); diff --git a/substrate/utils/frame/remote-externalities/src/logging.rs b/substrate/utils/frame/remote-externalities/src/logging.rs new file mode 100644 index 000000000000..7ab901c004de --- /dev/null +++ b/substrate/utils/frame/remote-externalities/src/logging.rs @@ -0,0 +1,86 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{ + future::Future, + io::{self, IsTerminal}, + time::Instant, +}; + +use spinners::{Spinner, Spinners}; + +use super::Result; + +// A simple helper to time a operation with a nice spinner, start message, and end message. +// +// The spinner is only displayed when stdout is a terminal. +pub(super) fn with_elapsed(f: F, start_msg: &str, end_msg: EndMsg) -> Result +where + F: FnOnce() -> Result, + EndMsg: FnOnce(&R) -> String, +{ + let timer = Instant::now(); + let mut maybe_sp = start(start_msg); + + Ok(end(f()?, timer, maybe_sp.as_mut(), end_msg)) +} + +// A simple helper to time an async operation with a nice spinner, start message, and end message. +// +// The spinner is only displayed when stdout is a terminal. +pub(super) async fn with_elapsed_async( + f: F, + start_msg: &str, + end_msg: EndMsg, +) -> Result +where + F: FnOnce() -> Fut, + Fut: Future>, + EndMsg: FnOnce(&R) -> String, +{ + let timer = Instant::now(); + let mut maybe_sp = start(start_msg); + + Ok(end(f().await?, timer, maybe_sp.as_mut(), end_msg)) +} + +fn start(start_msg: &str) -> Option { + let msg = format!("⏳ {start_msg}"); + + if io::stdout().is_terminal() { + Some(Spinner::new(Spinners::Dots, msg)) + } else { + println!("{msg}"); + + None + } +} + +fn end(val: T, timer: Instant, maybe_sp: Option<&mut Spinner>, end_msg: EndMsg) -> T +where + EndMsg: FnOnce(&T) -> String, +{ + let msg = format!("✅ {} in {:.2}s", end_msg(&val), timer.elapsed().as_secs_f32()); + + if let Some(sp) = maybe_sp { + sp.stop_with_message(msg); + } else { + println!("{msg}"); + } + + val +} From f3ab3854e1df9e0498599f01ba4f9f152426432a Mon Sep 17 00:00:00 2001 From: Utkarsh Bhardwaj Date: Fri, 3 Jan 2025 10:39:39 +0000 Subject: [PATCH 07/10] migrate pallet-mixnet to umbrella crate (#6986) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Migrate pallet-mixnet to use umbrella crate whilst adding a few types and traits in the frame prelude that are used by other pallets as well. ## Review Notes * This PR migrates `pallet-mixnet` to use the umbrella crate. * Note that some imports like `use sp_application_crypto::RuntimeAppPublic;` and imports from `sp_mixnet::types::` have not been migrated to the umbrella crate as they are not used in any / many other places and are relevant only to the `pallet-mixnet`. * Transaction related helpers to submit transactions from `frame-system` have been added to the main `prelude` as they have usage across various pallets. ```Rust pub use frame_system::offchain::*; ``` * Exporting `arithmetic` module in the main `prelude` since this is used a lot throughout various pallets. * Nightly formatting has been applied using `cargo fmt` * Benchmarking dependencies have been removed from`palet-mixnet` as there is no benchmarking.rs present for `pallet-mixnet`. For the same reason, `"pallet-mixnet?/runtime-benchmarks"` has been removed from `umbrella/Cargo.toml`. --------- Co-authored-by: Dónal Murray --- Cargo.lock | 7 +--- prdoc/pr_6986.prdoc | 18 ++++++++++ substrate/frame/mixnet/Cargo.toml | 24 ++----------- substrate/frame/mixnet/src/lib.rs | 60 ++++++++++++++----------------- substrate/frame/src/lib.rs | 19 ++++++++-- umbrella/Cargo.toml | 1 - 6 files changed, 64 insertions(+), 65 deletions(-) create mode 100644 prdoc/pr_6986.prdoc diff --git a/Cargo.lock b/Cargo.lock index 6151ed33c5b6..3c55a14256c5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14029,18 +14029,13 @@ dependencies = [ name = "pallet-mixnet" version = "0.4.0" dependencies = [ - "frame-benchmarking 28.0.0", - "frame-support 28.0.0", - "frame-system 28.0.0", "log", "parity-scale-codec", + "polkadot-sdk-frame 0.1.0", "scale-info", "serde", "sp-application-crypto 30.0.0", - "sp-arithmetic 23.0.0", - "sp-io 30.0.0", "sp-mixnet 0.4.0", - "sp-runtime 31.0.1", ] [[package]] diff --git a/prdoc/pr_6986.prdoc b/prdoc/pr_6986.prdoc new file mode 100644 index 000000000000..8deb6b04bd1c --- /dev/null +++ b/prdoc/pr_6986.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: '[pallet-mixnet] Migrate to using frame umbrella crate' + +doc: + - audience: Runtime Dev + description: This PR migrates the pallet-mixnet to use the frame umbrella crate. This + is part of the ongoing effort to migrate all pallets to use the frame umbrella crate. + The effort is tracked [here](https://github.com/paritytech/polkadot-sdk/issues/6504). + +crates: + - name: pallet-mixnet + bump: minor + - name: polkadot-sdk-frame + bump: minor + - name: polkadot-sdk + bump: none \ No newline at end of file diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index bb5e84864566..0ae3b3938c60 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -17,42 +17,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { features = ["derive", "max-encoded-len"], workspace = true } -frame-benchmarking = { optional = true, workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } +frame = { workspace = true, features = ["experimental", "runtime"] } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { features = ["derive"], workspace = true } sp-application-crypto = { workspace = true } -sp-arithmetic = { workspace = true } -sp-io = { workspace = true } sp-mixnet = { workspace = true } -sp-runtime = { workspace = true } [features] default = ["std"] std = [ "codec/std", - "frame-benchmarking?/std", - "frame-support/std", - "frame-system/std", + "frame/std", "log/std", "scale-info/std", "serde/std", "sp-application-crypto/std", - "sp-arithmetic/std", - "sp-io/std", "sp-mixnet/std", - "sp-runtime/std", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", ] try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "sp-runtime/try-runtime", + "frame/try-runtime", ] diff --git a/substrate/frame/mixnet/src/lib.rs b/substrate/frame/mixnet/src/lib.rs index 6579ed678ae7..984981817676 100644 --- a/substrate/frame/mixnet/src/lib.rs +++ b/substrate/frame/mixnet/src/lib.rs @@ -23,28 +23,23 @@ extern crate alloc; +pub use pallet::*; + use alloc::vec::Vec; -use codec::{Decode, Encode, MaxEncodedLen}; use core::cmp::Ordering; -use frame_support::{ - traits::{EstimateNextSessionRotation, Get, OneSessionHandler}, - BoundedVec, +use frame::{ + deps::{ + sp_io::{self, MultiRemovalResults}, + sp_runtime, + }, + prelude::*, }; -use frame_system::{ - offchain::{CreateInherent, SubmitTransaction}, - pallet_prelude::BlockNumberFor, -}; -pub use pallet::*; -use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; use sp_application_crypto::RuntimeAppPublic; -use sp_arithmetic::traits::{CheckedSub, Saturating, UniqueSaturatedInto, Zero}; -use sp_io::MultiRemovalResults; use sp_mixnet::types::{ AuthorityId, AuthoritySignature, KxPublic, Mixnode, MixnodesErr, PeerId, SessionIndex, SessionPhase, SessionStatus, KX_PUBLIC_SIZE, }; -use sp_runtime::RuntimeDebug; const LOG_TARGET: &str = "runtime::mixnet"; @@ -168,12 +163,9 @@ fn twox>( // The pallet //////////////////////////////////////////////////////////////////////////////// -#[frame_support::pallet(dev_mode)] +#[frame::pallet(dev_mode)] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - #[pallet::pallet] pub struct Pallet(_); @@ -254,7 +246,7 @@ pub mod pallet { StorageDoubleMap<_, Identity, SessionIndex, Identity, AuthorityIndex, BoundedMixnodeFor>; #[pallet::genesis_config] - #[derive(frame_support::DefaultNoBound)] + #[derive(DefaultNoBound)] pub struct GenesisConfig { /// The mixnode set for the very first session. pub mixnodes: BoundedVec, T::MaxAuthorities>, @@ -308,7 +300,7 @@ pub mod pallet { fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { let Self::Call::register { registration, signature } = call else { - return InvalidTransaction::Call.into() + return InvalidTransaction::Call.into(); }; // Check session index matches @@ -320,16 +312,16 @@ pub mod pallet { // Check authority index is valid if registration.authority_index >= T::MaxAuthorities::get() { - return InvalidTransaction::BadProof.into() + return InvalidTransaction::BadProof.into(); } let Some(authority_id) = NextAuthorityIds::::get(registration.authority_index) else { - return InvalidTransaction::BadProof.into() + return InvalidTransaction::BadProof.into(); }; // Check the authority hasn't registered a mixnode yet if Self::already_registered(registration.session_index, registration.authority_index) { - return InvalidTransaction::Stale.into() + return InvalidTransaction::Stale.into(); } // Check signature. Note that we don't use regular signed transactions for registration @@ -339,7 +331,7 @@ pub mod pallet { authority_id.verify(&encoded_registration, signature) }); if !signature_ok { - return InvalidTransaction::BadProof.into() + return InvalidTransaction::BadProof.into(); } ValidTransaction::with_tag_prefix("MixnetRegistration") @@ -368,12 +360,12 @@ impl Pallet { .saturating_sub(CurrentSessionStartBlock::::get()); let Some(block_in_phase) = block_in_phase.checked_sub(&T::NumCoverToCurrentBlocks::get()) else { - return SessionPhase::CoverToCurrent + return SessionPhase::CoverToCurrent; }; let Some(block_in_phase) = block_in_phase.checked_sub(&T::NumRequestsToCurrentBlocks::get()) else { - return SessionPhase::RequestsToCurrent + return SessionPhase::RequestsToCurrent; }; if block_in_phase < T::NumCoverToPrevBlocks::get() { SessionPhase::CoverToPrev @@ -411,7 +403,7 @@ impl Pallet { return Err(MixnodesErr::InsufficientRegistrations { num: 0, min: T::MinMixnodes::get(), - }) + }); }; Self::mixnodes(prev_session_index) } @@ -430,7 +422,7 @@ impl Pallet { // registering let block_in_session = block_number.saturating_sub(CurrentSessionStartBlock::::get()); if block_in_session < T::NumRegisterStartSlackBlocks::get() { - return false + return false; } let (Some(end_block), _weight) = @@ -438,7 +430,7 @@ impl Pallet { else { // Things aren't going to work terribly well in this case as all the authorities will // just pile in after the slack period... - return true + return true; }; let remaining_blocks = end_block @@ -447,7 +439,7 @@ impl Pallet { if remaining_blocks.is_zero() { // Into the slack time at the end of the session. Not necessarily too late; // registrations are accepted right up until the session ends. - return true + return true; } // Want uniform distribution over the remaining blocks, so pick this block with probability @@ -496,7 +488,7 @@ impl Pallet { "Session {session_index} registration attempted, \ but current session is {current_session_index}", ); - return false + return false; } let block_number = frame_system::Pallet::::block_number(); @@ -505,7 +497,7 @@ impl Pallet { target: LOG_TARGET, "Waiting for the session to progress further before registering", ); - return false + return false; } let Some((authority_index, authority_id)) = Self::next_local_authority() else { @@ -513,7 +505,7 @@ impl Pallet { target: LOG_TARGET, "Not an authority in the next session; cannot register a mixnode", ); - return false + return false; }; if Self::already_registered(session_index, authority_index) { @@ -521,14 +513,14 @@ impl Pallet { target: LOG_TARGET, "Already registered a mixnode for the next session", ); - return false + return false; } let registration = Registration { block_number, session_index, authority_index, mixnode: mixnode.into() }; let Some(signature) = authority_id.sign(®istration.encode()) else { log::debug!(target: LOG_TARGET, "Failed to sign registration"); - return false + return false; }; let call = Call::register { registration, signature }; let xt = T::create_inherent(call.into()); diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index 8031ddf96e6a..b3e340cbcbff 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -203,12 +203,18 @@ pub mod prelude { /// Dispatch types from `frame-support`, other fundamental traits #[doc(no_inline)] pub use frame_support::dispatch::{GetDispatchInfo, PostDispatchInfo}; - pub use frame_support::traits::{Contains, IsSubType, OnRuntimeUpgrade}; + pub use frame_support::traits::{ + Contains, EstimateNextSessionRotation, IsSubType, OnRuntimeUpgrade, OneSessionHandler, + }; /// Pallet prelude of `frame-system`. #[doc(no_inline)] pub use frame_system::pallet_prelude::*; + /// Transaction related helpers to submit transactions. + #[doc(no_inline)] + pub use frame_system::offchain::*; + /// All FRAME-relevant derive macros. #[doc(no_inline)] pub use super::derive::*; @@ -216,6 +222,9 @@ pub mod prelude { /// All hashing related things pub use super::hashing::*; + /// All arithmetic types and traits used for safe math. + pub use super::arithmetic::*; + /// Runtime traits #[doc(no_inline)] pub use sp_runtime::traits::{ @@ -223,9 +232,11 @@ pub mod prelude { Saturating, StaticLookup, TrailingZeroInput, }; - /// Other error/result types for runtime + /// Other runtime types and traits #[doc(no_inline)] - pub use sp_runtime::{DispatchErrorWithPostInfo, DispatchResultWithInfo, TokenError}; + pub use sp_runtime::{ + BoundToRuntimeAppPublic, DispatchErrorWithPostInfo, DispatchResultWithInfo, TokenError, + }; } #[cfg(any(feature = "try-runtime", test))] @@ -509,6 +520,8 @@ pub mod traits { } /// The arithmetic types used for safe math. +/// +/// This is already part of the [`prelude`]. pub mod arithmetic { pub use sp_arithmetic::{traits::*, *}; } diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index f36d39d63f6a..d2a47ade7f87 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -290,7 +290,6 @@ runtime-benchmarks = [ "pallet-membership?/runtime-benchmarks", "pallet-message-queue?/runtime-benchmarks", "pallet-migrations?/runtime-benchmarks", - "pallet-mixnet?/runtime-benchmarks", "pallet-mmr?/runtime-benchmarks", "pallet-multisig?/runtime-benchmarks", "pallet-nft-fractionalization?/runtime-benchmarks", From 659f4848a7564c45d8d3a3d13c7596801050da82 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 3 Jan 2025 13:29:29 +0100 Subject: [PATCH 08/10] [docs] Fix release naming (#7032) - **[docs] Fix release naming** - **Remove outdated and unmaintained file** Closes https://github.com/paritytech/polkadot-sdk/issues/6998 --------- Signed-off-by: Oliver Tale-Yazdi --- README.md | 4 +- cumulus/docs/release.md | 135 ---------------------------------------- docs/RELEASE.md | 6 +- 3 files changed, 7 insertions(+), 138 deletions(-) delete mode 100644 cumulus/docs/release.md diff --git a/README.md b/README.md index 6c0dfbb2e7e4..24352cc28a1a 100644 --- a/README.md +++ b/README.md @@ -40,9 +40,9 @@ curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/paritytec ![Current Stable Release](https://raw.githubusercontent.com/paritytech/release-registry/main/badges/polkadot-sdk-latest.svg)  ![Next Stable Release](https://raw.githubusercontent.com/paritytech/release-registry/main/badges/polkadot-sdk-next.svg) -The Polkadot SDK is released every three months as a `stableYYMMDD` release. They are supported for +The Polkadot SDK is released every three months as a `stableYYMM` release. They are supported for one year with patches. See the next upcoming versions in the [Release -Registry](https://github.com/paritytech/release-registry/). +Registry](https://github.com/paritytech/release-registry/) and more docs in [RELEASE.md](./docs/RELEASE.md). You can use [`psvm`](https://github.com/paritytech/psvm) to update all dependencies to a specific version without needing to manually select the correct version for each crate. diff --git a/cumulus/docs/release.md b/cumulus/docs/release.md deleted file mode 100644 index 8302b7b9b7fc..000000000000 --- a/cumulus/docs/release.md +++ /dev/null @@ -1,135 +0,0 @@ -# Releases - -## Versioning - -### Example #1 - -``` -| Polkadot | v 0. 9.22 | -| Client | v 0. 9.22 0 | -| Runtime | v 9 22 0 | => 9220 -| semver | 0. 9.22 0 | -``` - -### Example #2 - -``` -| Polkadot | v 0.10.42 | -| Client | v 0.10.42 0 | -| Runtime | v 10.42 0 | => 10420 -| semver | 0.10.42 0 | -``` - -### Example #3 - -``` -| Polkadot | v 1. 2.18 | -| Client | v 1. 2.18 0 | -| Runtime | v 1 2 18 0 | => 102180 -| semver | 1. 2.18 0 | -``` - - -This document contains information related to the releasing process and describes a few of the steps and checks that are -performed during the release process. - -## Client - -### Burn In - -Ensure that Parity DevOps has run the new release on Westend and Kusama Asset Hub collators for 12h prior to publishing -the release. - -### Build Artifacts - -Add any necessary assets to the release. They should include: - -- Linux binaries - - GPG signature - - SHA256 checksum -- WASM binaries of the runtimes -- Source code - - -## Runtimes - -### Spec Version - -A new runtime release must bump the `spec_version`. This may follow a pattern with the client release (e.g. runtime -v9220 corresponds to v0.9.22). - -### Runtime version bump between RCs - -The clients need to be aware of runtime changes. However, we do not want to bump the `spec_version` for every single -release candidate. Instead, we can bump the `impl` field of the version to signal the change to the client. This applies -only to runtimes that have been deployed. - -### Old Migrations Removed - -Previous `on_runtime_upgrade` functions from old upgrades should be removed. - -### New Migrations - -Ensure that any migrations that are required due to storage or logic changes are included in the `on_runtime_upgrade` -function of the appropriate pallets. - -### Extrinsic Ordering & Storage - -Offline signing libraries depend on a consistent ordering of call indices and functions. Compare the metadata of the -current and new runtimes and ensure that the `module index, call index` tuples map to the same set of functions. It also -checks if there have been any changes in `storage`. In case of a breaking change, increase `transaction_version`. - -To verify the order has not changed, manually start the following -[Github Action](https://github.com/paritytech/polkadot-sdk/cumulus/.github/workflows/release-20_extrinsic-ordering-check-from-bin.yml). -It takes around a minute to run and will produce the report as artifact you need to manually check. - -To run it, in the _Run Workflow_ dropdown: -1. **Use workflow from**: to ignore, leave `master` as default -2. **The WebSocket url of the reference node**: - Asset Hub Polkadot: `wss://statemint-rpc.polkadot.io` - - Asset Hub Kusama: `wss://statemine-rpc.polkadot.io` - - Asset Hub Westend: `wss://westmint-rpc.polkadot.io` -3. **A url to a Linux binary for the node containing the runtime to test**: Paste the URL of the latest - release-candidate binary from the draft-release on Github. The binary has to previously be uploaded to S3 (Github url - link to the binary is constantly changing) - - E.g: https://releases.parity.io/cumulus/v0.9.270-rc3/polkadot-parachain -4. **The name of the chain under test. Usually, you would pass a local chain**: - Asset Hub Polkadot: - `asset-hub-polkadot-local` - - Asset Hub Kusama: `asset-hub-kusama-local` - - Asset Hub Westend: `asset-hub-westend-local` -5. Click **Run workflow** - -When the workflow is done, click on it and download the zip artifact, inside you'll find an `output.txt` file. The -things to look for in the output are lines like: - -- `[Identity] idx 28 -> 25 (calls 15)` - indicates the index for Identity has changed -- `[+] Society, Recovery` - indicates the new version includes 2 additional modules/pallets. -- If no indices have changed, every modules line should look something like `[Identity] idx 25 (calls 15)` - -**Note**: Adding new functions to the runtime does not constitute a breaking change as long as the indexes did not -change. - -**Note**: Extrinsic function signatures changes (adding/removing & ordering arguments) are not caught by the job, so -those changes should be reviewed "manually" - -### Benchmarks - -The Benchmarks can now be started from the CI. First find the CI pipeline from -[here](https://gitlab.parity.io/parity/mirrors/cumulus/-/pipelines?page=1&scope=all&ref=release-parachains-v9220) and -pick the latest. [Guide](https://github.com/paritytech/ci_cd/wiki/Benchmarks:-cumulus) - -### Integration Tests - -Until https://github.com/paritytech/ci_cd/issues/499 is done, tests will have to be run manually. -1. Go to https://github.com/paritytech/parachains-integration-tests and check out the release branch. E.g. -https://github.com/paritytech/parachains-integration-tests/tree/release-v9270-v0.9.27 for `release-parachains-v0.9.270` -2. Clone `release-parachains-` branch from Cumulus -3. `cargo build --release` -4. Copy `./target/polkadot-parachain` to `./bin` -5. Clone `it/release--fast-sudo` from Polkadot In case the branch does not exists (it is a manual process): - cherry pick `paritytech/polkadot@791c8b8` and run: - `find . -type f -name "*.toml" -print0 | xargs -0 sed -i '' -e 's/polkadot-vX.X.X/polkadot-v/g'` -6. `cargo build --release --features fast-runtime` -7. Copy `./target/polkadot` into `./bin` (in Cumulus) -8. Run the tests: - - Asset Hub Polkadot: `yarn zombienet-test -c ./examples/statemint/config.toml -t ./examples/statemint` - - Asset Hub Kusama: `yarn zombienet-test -c ./examples/statemine/config.toml -t ./examples/statemine` diff --git a/docs/RELEASE.md b/docs/RELEASE.md index bea367411359..677cb5465b67 100644 --- a/docs/RELEASE.md +++ b/docs/RELEASE.md @@ -14,7 +14,11 @@ Merging to it is restricted to [Backports](#backports). We are releasing multiple different things from this repository in one release, but we don't want to use the same version for everything. Thus, in the following we explain the versioning story for the crates, node and Westend & -Rococo. To easily refer to a release, it shall be named by its date in the form `stableYYMMDD`. +Rococo. + +To easily refer to a release, it shall be named by its date in the form `stableYYMM`. Patches to stable releases are +tagged in the form of `stableYYMM-PATCH`, with `PATCH` ranging from 1 to 99. For example, the fourth patch to +`stable2409` would be `stable2409-4`. ## Crate From 721f6d97613b0ece9c8414e8ec8ba31d2f67d40c Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Fri, 3 Jan 2025 14:19:18 +0100 Subject: [PATCH 09/10] [WIP] Fix networking-benchmarks (#7036) cc https://github.com/paritytech/ci_cd/issues/1094 --- ...nchmarks.yml => benchmarks-networking.yml} | 20 ++++++++++--------- ...enchmarks.yml => benchmarks-subsystem.yml} | 0 2 files changed, 11 insertions(+), 9 deletions(-) rename .github/workflows/{networking-benchmarks.yml => benchmarks-networking.yml} (86%) rename .github/workflows/{subsystem-benchmarks.yml => benchmarks-subsystem.yml} (100%) diff --git a/.github/workflows/networking-benchmarks.yml b/.github/workflows/benchmarks-networking.yml similarity index 86% rename from .github/workflows/networking-benchmarks.yml rename to .github/workflows/benchmarks-networking.yml index e45ae601105d..79494b9a015c 100644 --- a/.github/workflows/networking-benchmarks.yml +++ b/.github/workflows/benchmarks-networking.yml @@ -17,7 +17,7 @@ jobs: uses: ./.github/workflows/reusable-preflight.yml build: - timeout-minutes: 80 + timeout-minutes: 50 needs: [preflight] runs-on: ${{ needs.preflight.outputs.RUNNER_BENCHMARK }} container: @@ -27,12 +27,8 @@ jobs: matrix: features: [ - { - bench: "notifications_protocol", - }, - { - bench: "request_response_protocol", - }, + { bench: "notifications_protocol" }, + { bench: "request_response_protocol" }, ] steps: - name: Checkout @@ -42,7 +38,7 @@ jobs: id: run-benchmarks run: | mkdir -p ./charts - forklift cargo bench -p sc-network --bench ${{ matrix.features.bench }} -- --output-format bencher | grep "^test" | tee ./charts/networking-bench.txt || echo "Benchmarks failed" + forklift cargo bench -p sc-network --bench ${{ matrix.features.bench }} -- --output-format bencher | grep "^test" | tee ./charts/${{ matrix.features.bench }}.txt || echo "Benchmarks failed" ls -lsa ./charts - name: Upload artifacts @@ -69,7 +65,13 @@ jobs: - name: Download artifacts uses: actions/download-artifact@v4.1.8 with: - name: networking-bench-${{ github.sha }} + name: notifications_protocol-${{ github.sha }} + path: ./charts + + - name: Download artifacts + uses: actions/download-artifact@v4.1.8 + with: + name: request_response_protocol-${{ github.sha }} path: ./charts - name: Setup git diff --git a/.github/workflows/subsystem-benchmarks.yml b/.github/workflows/benchmarks-subsystem.yml similarity index 100% rename from .github/workflows/subsystem-benchmarks.yml rename to .github/workflows/benchmarks-subsystem.yml From 0b4f131b000e01f1aca3f023937a36dcc281d5e2 Mon Sep 17 00:00:00 2001 From: Qiwei Yang Date: Sat, 4 Jan 2025 06:22:12 +0800 Subject: [PATCH 10/10] Replace duplicated whitelist with whitelisted_storage_keys (#7024) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit related issue: #7018 replaced duplicated whitelists with `AllPalletsWithSystem::whitelisted_storage_keys();` in this PR --------- Co-authored-by: Guillaume Thiolliere Co-authored-by: Bastian Köcher --- .../runtimes/assets/asset-hub-rococo/src/lib.rs | 16 ++-------------- .../runtimes/assets/asset-hub-westend/src/lib.rs | 16 ++-------------- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 14 ++------------ .../bridge-hubs/bridge-hub-westend/src/lib.rs | 14 ++------------ .../collectives/collectives-westend/src/lib.rs | 14 ++------------ .../contracts/contracts-rococo/src/lib.rs | 14 ++------------ .../runtimes/coretime/coretime-rococo/src/lib.rs | 14 ++------------ .../coretime/coretime-westend/src/lib.rs | 14 ++------------ .../runtimes/people/people-rococo/src/lib.rs | 14 ++------------ .../runtimes/people/people-westend/src/lib.rs | 14 ++------------ .../runtimes/testing/penpal/src/lib.rs | 14 ++------------ 11 files changed, 22 insertions(+), 136 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index dd1535826152..8f4ae4670acd 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1854,20 +1854,8 @@ impl_runtime_apis! { type ToWestend = XcmBridgeHubRouterBench; - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - //TODO: use from relay_well_known_keys::ACTIVE_CONFIG - hex_literal::hex!("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 707d1c52f743..26ef3219a1e9 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -2030,20 +2030,8 @@ impl_runtime_apis! { type ToRococo = XcmBridgeHubRouterBench; - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - //TODO: use from relay_well_known_keys::ACTIVE_CONFIG - hex_literal::hex!("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 492b731610ce..88146cecb9ef 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -1498,18 +1498,8 @@ impl_runtime_apis! { } } - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index edf79ea0c315..1ca709f0d8cb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -1315,18 +1315,8 @@ impl_runtime_apis! { } } - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 5c2ba2e24c22..d3cd285ba67a 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -1139,18 +1139,8 @@ impl_runtime_apis! { } } - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 594c9b26f57e..be369565dba9 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -849,18 +849,8 @@ impl_runtime_apis! { } } - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index e8f6e6659e13..c4d43e4361fa 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -1140,18 +1140,8 @@ impl_runtime_apis! { type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index ce965f0ad1ba..431bfc8a63ba 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -1135,18 +1135,8 @@ impl_runtime_apis! { type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index b8db687da625..ef3c90ace826 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -1055,18 +1055,8 @@ impl_runtime_apis! { type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 620ec41c071c..ebf8fcb33bd8 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -1055,18 +1055,8 @@ impl_runtime_apis! { type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); let params = (&config, &whitelist); diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index b51670c792d6..51dc95bf2c71 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -1132,18 +1132,8 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); let params = (&config, &whitelist);