diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 23820df4..730062b8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,7 +15,7 @@ concurrency: env: DASEL_VERSION: https://github.com/TomWright/dasel/releases/download/v1.24.3/dasel_linux_amd64 RUST_VERSION: 1.79 - FUEL_CORE_VERSION: 0.31.0 + FUEL_CORE_VERSION: 0.36.0 IMAGE_NAME: ${{ github.repository }} REPO_NAME: ${{ github.event.repository.name }} AWS_ROLE_ARN: arn:aws:iam::024848458133:role/github_oidc_FuelLabs_fuel-block-committer @@ -61,12 +61,16 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} - name: Check for typos uses: crate-ci/typos@v1.23.6 - - uses: dtolnay/rust-toolchain@master + - name: Setup Rust + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.RUST_VERSION }} components: clippy,rustfmt - name: Install Foundry uses: foundry-rs/foundry-toolchain@v1 + with: + # The last version of Foundry that didn't experience this issue: https://github.com/alloy-rs/alloy/issues/1371 + version: nightly-cb9dfae298fe0b5a5cdef2536955f50b8c7f0bf5 - name: Build cache uses: buildjet/cache@v4 with: @@ -89,34 +93,59 @@ jobs: tar -xvf fuel-core.tar.gz chmod +x fuel-core-${{ env.FUEL_CORE_VERSION }}-x86_64-unknown-linux-gnu/fuel-core mv fuel-core-${{ env.FUEL_CORE_VERSION }}-x86_64-unknown-linux-gnu/fuel-core /usr/local/bin/fuel-core - - name: Run tests - run: ./run_tests.sh + - name: Unit/integration tests + run: cargo test --workspace --exclude e2e - publish-crates-check: + e2e-release-build: runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - - - uses: dtolnay/rust-toolchain@master + - name: Setup Rust + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.RUST_VERSION }} - - - name: Publish crate check - uses: katyo/publish-crates@v2 + - name: Build release binary + run: cargo build --release --bin fuel-block-committer + - name: Upload release binary + uses: actions/upload-artifact@v4 with: - dry-run: true - check-repo: false - ignore-unpublished-changes: true + name: fuel-block-committer-binary + path: target/release/fuel-block-committer - publish-crates: - needs: - - cargo-verifications - - publish-crates-check - # Only do this job if publishing a release - if: github.event_name == 'release' && github.event.action == 'published' + e2e-tests: + needs: e2e-release-build runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + token: ${{ secrets.GITHUB_TOKEN }} + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + with: + # The last version of Foundry that didn't experience this issue: https://github.com/alloy-rs/alloy/issues/1371 + version: nightly-cb9dfae298fe0b5a5cdef2536955f50b8c7f0bf5 + - name: Download release binary + uses: actions/download-artifact@v4 + with: + name: fuel-block-committer-binary + path: ./target/release + - name: Ensure Binary is Executable + run: chmod +x ./target/release/fuel-block-committer + - name: Install Fuel Core + run: | + curl -sSLf https://github.com/FuelLabs/fuel-core/releases/download/v${{ env.FUEL_CORE_VERSION }}/fuel-core-${{ env.FUEL_CORE_VERSION }}-x86_64-unknown-linux-gnu.tar.gz -L -o fuel-core.tar.gz + tar -xvf fuel-core.tar.gz + chmod +x fuel-core-${{ env.FUEL_CORE_VERSION }}-x86_64-unknown-linux-gnu/fuel-core + mv fuel-core-${{ env.FUEL_CORE_VERSION }}-x86_64-unknown-linux-gnu/fuel-core /usr/local/bin/fuel-core + - name: Run E2E tests + run: | + ./target/release/fuel-block-committer --version + PATH="$(pwd)/target/release:$PATH" cargo test --package e2e --jobs 1 + publish-crates-check: + runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 @@ -125,26 +154,24 @@ jobs: with: toolchain: ${{ env.RUST_VERSION }} - - name: Verify tag version - run: | - curl -sSLf "$DASEL_VERSION" -L -o dasel && chmod +x dasel - mv ./dasel /usr/local/bin/dasel - ./.github/scripts/verify_tag.sh ${{ github.ref_name }} Cargo.toml - - name: Publish crate + - name: Publish crate check uses: katyo/publish-crates@v2 with: - publish-delay: 30000 - registry-token: ${{ secrets.CARGO_REGISTRY_TOKEN }} + dry-run: true + check-repo: false + ignore-unpublished-changes: true build-docker-images: - needs: - - cargo-verifications + # needs: + # - cargo-verifications + # - e2e-tests strategy: matrix: arch: [ # build on native runners instead of using emulation {platform: linux/amd64, runner: buildjet-8vcpu-ubuntu-2204}, - {platform: linux/arm64, runner: buildjet-8vcpu-ubuntu-2204-arm} + # don't currently need arm builds + # {platform: linux/arm64, runner: buildjet-8vcpu-ubuntu-2204-arm} ] runs-on: ${{ matrix.arch.runner }} permissions: diff --git a/.gitignore b/.gitignore index d82a6e99..534b8a12 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ # Generated by Cargo # will have compiled files and executables -/target/ +**/target/ # These are backup files generated by rustfmt **/*.rs.bk diff --git a/.sqlx/query-050da178612b738321d948e01675dd48b98ec38be553251f532ae666a34da9a0.json b/.sqlx/query-050da178612b738321d948e01675dd48b98ec38be553251f532ae666a34da9a0.json new file mode 100644 index 00000000..9a121daf --- /dev/null +++ b/.sqlx/query-050da178612b738321d948e01675dd48b98ec38be553251f532ae666a34da9a0.json @@ -0,0 +1,50 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT * FROM l1_fragments ORDER BY idx ASC", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "idx", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "data", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "total_bytes", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "unused_bytes", + "type_info": "Int8" + }, + { + "ordinal": 5, + "name": "bundle_id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false + ] + }, + "hash": "050da178612b738321d948e01675dd48b98ec38be553251f532ae666a34da9a0" +} diff --git a/.sqlx/query-1fe55a6d422b1619a6b27ae3ab04682873b73b0ab53d8df9db87744cc2367676.json b/.sqlx/query-1fe55a6d422b1619a6b27ae3ab04682873b73b0ab53d8df9db87744cc2367676.json new file mode 100644 index 00000000..67051297 --- /dev/null +++ b/.sqlx/query-1fe55a6d422b1619a6b27ae3ab04682873b73b0ab53d8df9db87744cc2367676.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE l1_transactions SET state = $1, finalized_at = $2 WHERE hash = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int2", + "Timestamptz", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "1fe55a6d422b1619a6b27ae3ab04682873b73b0ab53d8df9db87744cc2367676" +} diff --git a/.sqlx/query-c6cffaf0718065ed45442c123f7aed85456bbbb9588ab0ed2be2d685ea09364e.json b/.sqlx/query-337f06aa3fddcddc7854094ccc542c6947ddd5430a5d7fbe77a4069c43002667.json similarity index 55% rename from .sqlx/query-c6cffaf0718065ed45442c123f7aed85456bbbb9588ab0ed2be2d685ea09364e.json rename to .sqlx/query-337f06aa3fddcddc7854094ccc542c6947ddd5430a5d7fbe77a4069c43002667.json index 27e7b399..a4a5d39d 100644 --- a/.sqlx/query-c6cffaf0718065ed45442c123f7aed85456bbbb9588ab0ed2be2d685ea09364e.json +++ b/.sqlx/query-337f06aa3fddcddc7854094ccc542c6947ddd5430a5d7fbe77a4069c43002667.json @@ -1,22 +1,22 @@ { "db_name": "PostgreSQL", - "query": "SELECT * FROM l1_submissions ORDER BY fuel_block_height DESC LIMIT 1", + "query": "SELECT * FROM fuel_blocks ORDER BY height ASC", "describe": { "columns": [ { "ordinal": 0, - "name": "id", - "type_info": "Int4" + "name": "hash", + "type_info": "Bytea" }, { "ordinal": 1, - "name": "fuel_block_hash", - "type_info": "Bytea" + "name": "height", + "type_info": "Int8" }, { "ordinal": 2, - "name": "fuel_block_height", - "type_info": "Int8" + "name": "data", + "type_info": "Bytea" } ], "parameters": { @@ -28,5 +28,5 @@ false ] }, - "hash": "c6cffaf0718065ed45442c123f7aed85456bbbb9588ab0ed2be2d685ea09364e" + "hash": "337f06aa3fddcddc7854094ccc542c6947ddd5430a5d7fbe77a4069c43002667" } diff --git a/.sqlx/query-60d064e8f937e592a8080182c2a4d21f65a6ae874ab21c46f5b6290d7e1b2cf1.json b/.sqlx/query-60d064e8f937e592a8080182c2a4d21f65a6ae874ab21c46f5b6290d7e1b2cf1.json new file mode 100644 index 00000000..93f464b2 --- /dev/null +++ b/.sqlx/query-60d064e8f937e592a8080182c2a4d21f65a6ae874ab21c46f5b6290d7e1b2cf1.json @@ -0,0 +1,35 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT fb.*\n FROM fuel_blocks fb WHERE fb.height >= $1\n AND NOT EXISTS (\n SELECT 1\n FROM bundles b\n WHERE fb.height BETWEEN b.start_height AND b.end_height\n )\n ORDER BY fb.height LIMIT $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "height", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "data", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "60d064e8f937e592a8080182c2a4d21f65a6ae874ab21c46f5b6290d7e1b2cf1" +} diff --git a/.sqlx/query-898642b7e806eae6feb93d89300757d96d8ff0963c426c38f962e9254b01736c.json b/.sqlx/query-898642b7e806eae6feb93d89300757d96d8ff0963c426c38f962e9254b01736c.json new file mode 100644 index 00000000..ccf64cce --- /dev/null +++ b/.sqlx/query-898642b7e806eae6feb93d89300757d96d8ff0963c426c38f962e9254b01736c.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n MAX(l1_transactions.finalized_at) AS last_fragment_time\n FROM\n l1_transaction_fragments\n JOIN\n l1_transactions ON l1_transactions.id = l1_transaction_fragments.transaction_id\n WHERE\n l1_transactions.state = $1;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_fragment_time", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Int2" + ] + }, + "nullable": [ + null + ] + }, + "hash": "898642b7e806eae6feb93d89300757d96d8ff0963c426c38f962e9254b01736c" +} diff --git a/.sqlx/query-9be45d22c0bb43deb53da5a8fe19f5554f2a901cabc730792d51baadd2460e44.json b/.sqlx/query-9be45d22c0bb43deb53da5a8fe19f5554f2a901cabc730792d51baadd2460e44.json index 777bc248..473ede0a 100644 --- a/.sqlx/query-9be45d22c0bb43deb53da5a8fe19f5554f2a901cabc730792d51baadd2460e44.json +++ b/.sqlx/query-9be45d22c0bb43deb53da5a8fe19f5554f2a901cabc730792d51baadd2460e44.json @@ -17,6 +17,11 @@ "ordinal": 2, "name": "state", "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "finalized_at", + "type_info": "Timestamptz" } ], "parameters": { @@ -27,7 +32,8 @@ "nullable": [ false, false, - false + false, + true ] }, "hash": "9be45d22c0bb43deb53da5a8fe19f5554f2a901cabc730792d51baadd2460e44" diff --git a/.sqlx/query-a0a9a31c75e25328bc90ad6f90003cd79be3de844e42b818ffb2168461aab0e5.json b/.sqlx/query-a0a9a31c75e25328bc90ad6f90003cd79be3de844e42b818ffb2168461aab0e5.json new file mode 100644 index 00000000..d2c07aba --- /dev/null +++ b/.sqlx/query-a0a9a31c75e25328bc90ad6f90003cd79be3de844e42b818ffb2168461aab0e5.json @@ -0,0 +1,54 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT f.*\n FROM l1_fragments f\n LEFT JOIN l1_transaction_fragments tf ON tf.fragment_id = f.id\n LEFT JOIN l1_transactions t ON t.id = tf.transaction_id\n JOIN bundles b ON b.id = f.bundle_id\n WHERE (t.id IS NULL OR t.state = $1) \n AND b.end_height >= $2 -- Exclude bundles ending before starting_height\n ORDER BY b.start_height ASC, f.idx ASC\n LIMIT $3;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "idx", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "data", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "total_bytes", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "unused_bytes", + "type_info": "Int8" + }, + { + "ordinal": 5, + "name": "bundle_id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int2", + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false + ] + }, + "hash": "a0a9a31c75e25328bc90ad6f90003cd79be3de844e42b818ffb2168461aab0e5" +} diff --git a/.sqlx/query-b3e422ba5518d62297afe5fc97440249be2af4c93243b961f68b028232185992.json b/.sqlx/query-b3e422ba5518d62297afe5fc97440249be2af4c93243b961f68b028232185992.json deleted file mode 100644 index 3191a907..00000000 --- a/.sqlx/query-b3e422ba5518d62297afe5fc97440249be2af4c93243b961f68b028232185992.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "UPDATE l1_transactions SET state = $1 WHERE hash = $2", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int2", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "b3e422ba5518d62297afe5fc97440249be2af4c93243b961f68b028232185992" -} diff --git a/.sqlx/query-bce910f42b45949e8ab08355c5b6d7679c267ef5946d7c360a24abfdefa1abe2.json b/.sqlx/query-bce910f42b45949e8ab08355c5b6d7679c267ef5946d7c360a24abfdefa1abe2.json deleted file mode 100644 index 28d12388..00000000 --- a/.sqlx/query-bce910f42b45949e8ab08355c5b6d7679c267ef5946d7c360a24abfdefa1abe2.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "INSERT INTO l1_fragments (fragment_idx, submission_id, data, created_at) VALUES ($1, $2, $3, $4)", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Bytea", - "Timestamptz" - ] - }, - "nullable": [] - }, - "hash": "bce910f42b45949e8ab08355c5b6d7679c267ef5946d7c360a24abfdefa1abe2" -} diff --git a/.sqlx/query-daa42cdb26e7b8e6d1d586367cbe42d1defc42b001b71e53a86e47f91c521c69.json b/.sqlx/query-bf323ca1f8864ccce576910302b297e3313dd9ee63c1f1edd812a2d3095c720d.json similarity index 56% rename from .sqlx/query-daa42cdb26e7b8e6d1d586367cbe42d1defc42b001b71e53a86e47f91c521c69.json rename to .sqlx/query-bf323ca1f8864ccce576910302b297e3313dd9ee63c1f1edd812a2d3095c720d.json index 51c7304f..606daf15 100644 --- a/.sqlx/query-daa42cdb26e7b8e6d1d586367cbe42d1defc42b001b71e53a86e47f91c521c69.json +++ b/.sqlx/query-bf323ca1f8864ccce576910302b297e3313dd9ee63c1f1edd812a2d3095c720d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "INSERT INTO l1_submissions (fuel_block_hash, fuel_block_height) VALUES ($1, $2) RETURNING id", + "query": "INSERT INTO bundles(start_height, end_height) VALUES ($1, $2) RETURNING id", "describe": { "columns": [ { @@ -11,7 +11,7 @@ ], "parameters": { "Left": [ - "Bytea", + "Int8", "Int8" ] }, @@ -19,5 +19,5 @@ false ] }, - "hash": "daa42cdb26e7b8e6d1d586367cbe42d1defc42b001b71e53a86e47f91c521c69" + "hash": "bf323ca1f8864ccce576910302b297e3313dd9ee63c1f1edd812a2d3095c720d" } diff --git a/.sqlx/query-f258b9822f1b060c13cd895fdbe61020fa605fdba844cb8c0071111f78342b5e.json b/.sqlx/query-f258b9822f1b060c13cd895fdbe61020fa605fdba844cb8c0071111f78342b5e.json deleted file mode 100644 index ec52bb46..00000000 --- a/.sqlx/query-f258b9822f1b060c13cd895fdbe61020fa605fdba844cb8c0071111f78342b5e.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "SELECT l1_fragments.*\n FROM l1_fragments\n WHERE l1_fragments.id NOT IN (\n SELECT l1_fragments.id\n FROM l1_fragments\n JOIN l1_transaction_fragments ON l1_fragments.id = l1_transaction_fragments.fragment_id\n JOIN l1_transactions ON l1_transaction_fragments.transaction_id = l1_transactions.id\n WHERE l1_transactions.state IN ($1, $2)\n )\n ORDER BY l1_fragments.created_at\n LIMIT $3;", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "fragment_idx", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "submission_id", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "data", - "type_info": "Bytea" - }, - { - "ordinal": 4, - "name": "created_at", - "type_info": "Timestamptz" - } - ], - "parameters": { - "Left": [ - "Int2", - "Int2", - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false - ] - }, - "hash": "f258b9822f1b060c13cd895fdbe61020fa605fdba844cb8c0071111f78342b5e" -} diff --git a/.sqlx/query-f62bc595ae70229a6e37d7f1459ffe569d9ae2701cee893f2d2ef77a5e20e6f1.json b/.sqlx/query-f62bc595ae70229a6e37d7f1459ffe569d9ae2701cee893f2d2ef77a5e20e6f1.json new file mode 100644 index 00000000..9fb24a77 --- /dev/null +++ b/.sqlx/query-f62bc595ae70229a6e37d7f1459ffe569d9ae2701cee893f2d2ef77a5e20e6f1.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "WITH all_heights AS (SELECT generate_series($1::BIGINT, $2::BIGINT) AS height)\n SELECT ah.height\n FROM all_heights ah\n LEFT JOIN fuel_blocks fb ON fb.height = ah.height\n WHERE fb.height IS NULL\n ORDER BY ah.height;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "height", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "f62bc595ae70229a6e37d7f1459ffe569d9ae2701cee893f2d2ef77a5e20e6f1" +} diff --git a/Cargo.lock b/Cargo.lock index 3427a5b3..9702c1e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -44,7 +44,7 @@ dependencies = [ "bitflags 2.6.0", "bytes", "bytestring", - "derive_more", + "derive_more 0.99.18", "encoding_rs", "futures-core", "http 0.2.12", @@ -71,7 +71,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -155,7 +155,7 @@ dependencies = [ "bytes", "bytestring", "cfg-if", - "derive_more", + "derive_more 0.99.18", "encoding_rs", "futures-core", "futures-util", @@ -185,24 +185,18 @@ dependencies = [ "actix-router", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -239,9 +233,9 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f4a4aaae80afd4be443a6aecd92a6b255dcdd000f97996928efb33d8a71e100" +checksum = "8367891bf380210abb0d6aa30c5f85a9080cb4a066c4d5c5acadad630823751b" dependencies = [ "alloy-consensus", "alloy-contract", @@ -263,9 +257,9 @@ dependencies = [ [[package]] name = "alloy-chains" -version = "0.1.29" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb07629a5d0645d29f68d2fb6f4d0cf15c89ec0965be915f303967180929743f" +checksum = "805f7a974de5804f5c053edc6ca43b20883bdd3a733b3691200ae3a4b454a2db" dependencies = [ "num_enum", "strum 0.26.3", @@ -273,9 +267,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04c309895995eaa4bfcc345f5515a39c7df9447798645cc8bf462b6c5bf1dc96" +checksum = "629b62e38d471cc15fea534eb7283d2f8a4e8bdb1811bcc5d66dda6cfce6fae1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -287,9 +281,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f4e0ef72b0876ae3068b2ed7dfae9ae1779ce13cfaec2ee1f08f5bd0348dc57" +checksum = "0eefe64fd344cffa9cf9e3435ec4e93e6e9c3481bc37269af988bf497faf4a6a" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -308,9 +302,9 @@ dependencies = [ [[package]] name = "alloy-core" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "529fc6310dc1126c8de51c376cbc59c79c7f662bd742be7dc67055d5421a81b4" +checksum = "5ce854562e7cafd5049189d0268d6e5cba05fe6c9cb7c6f8126a79b94800629c" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -320,9 +314,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413902aa18a97569e60f679c23f46a18db1656d87ab4d4e49d0e1e52042f66df" +checksum = "0b499852e1d0e9b8c6db0f24c48998e647c0d5762a01090f955106a7700e4611" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -335,17 +329,41 @@ dependencies = [ "winnow", ] +[[package]] +name = "alloy-eip2930" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0069cf0642457f87a01a014f6dc29d5d893cd4fd8fddf0c3cdfad1bb3ebafc41" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", +] + +[[package]] +name = "alloy-eip7702" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37d319bb544ca6caeab58c39cea8921c55d924d4f68f2c60f24f914673f9a74a" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "serde", +] + [[package]] name = "alloy-eips" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9431c99a3b3fe606ede4b3d4043bdfbcb780c45b8d8d226c3804e2b75cfbe68" +checksum = "f923dd5fca5f67a43d81ed3ebad0880bd41f6dd0ada930030353ac356c54cd0f" dependencies = [ + "alloy-eip2930", + "alloy-eip7702", "alloy-primitives", "alloy-rlp", "alloy-serde", "c-kzg", - "derive_more", + "derive_more 1.0.0", "once_cell", "serde", "sha2", @@ -353,9 +371,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc05b04ac331a9f07e3a4036ef7926e49a8bf84a99a1ccfc7e2ab55a5fcbb372" +checksum = "a438d4486b5d525df3b3004188f9d5cd1d65cd30ecc41e5a3ccef6f6342e8af9" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -365,9 +383,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e2865c4c3bb4cdad3f0d9ec1ab5c0c657ba69a375651bd35e32fb6c180ccc2" +checksum = "d3c717b5298fad078cd3a418335b266eba91b511383ca9bd497f742d5975d5ab" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -379,9 +397,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e701fc87ef9a3139154b0b4ccb935b565d27ffd9de020fe541bf2dec5ae4ede" +checksum = "fb3705ce7d8602132bcf5ac7a1dd293a42adc2f183abf5907c30ac535ceca049" dependencies = [ "alloy-consensus", "alloy-eips", @@ -400,10 +418,11 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec9d5a0f9170b10988b6774498a022845e13eda94318440d17709d50687f67f9" +checksum = "94ad40869867ed2d9cd3842b1e800889e5b49e6b92da346e93862b4a741bedf3" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-serde", "serde", @@ -411,31 +430,36 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb3ead547f4532bc8af961649942f0b9c16ee9226e26caa3f38420651cc0bf4" +checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" dependencies = [ "alloy-rlp", "bytes", "cfg-if", "const-hex", - "derive_more", + "derive_more 1.0.0", + "hashbrown 0.14.5", "hex-literal", + "indexmap 2.5.0", "itoa", "k256", "keccak-asm", + "paste", "proptest", "rand", "ruint", + "rustc-hash", "serde", + "sha3", "tiny-keccak", ] [[package]] name = "alloy-provider" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9c0ab10b93de601a6396fc7ff2ea10d3b28c46f079338fa562107ebf9857c8" +checksum = "927f708dd457ed63420400ee5f06945df9632d5d101851952056840426a10dc5" dependencies = [ "alloy-chains", "alloy-consensus", @@ -461,6 +485,7 @@ dependencies = [ "reqwest 0.12.7", "serde", "serde_json", + "thiserror", "tokio", "tracing", "url", @@ -468,9 +493,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f5da2c55cbaf229bad3c5f8b00b5ab66c74ef093e5f3a753d874cfecf7d2281" +checksum = "2d05f63677e210d758cd5d6d1ce10f20c980c3560ccfbe79ba1997791862a04f" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -481,7 +506,7 @@ dependencies = [ "serde_json", "tokio", "tokio-stream", - "tower", + "tower 0.5.1", "tracing", ] @@ -504,14 +529,14 @@ checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "alloy-rpc-client" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b38e3ffdb285df5d9f60cb988d336d9b8e3505acb78750c3bc60336a7af41d3" +checksum = "7d82952dca71173813d4e5733e2c986d8b04aea9e0f3b0a576664c232ad050a5" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -526,16 +551,16 @@ dependencies = [ "serde_json", "tokio", "tokio-stream", - "tower", + "tower 0.5.1", "tracing", "url", ] [[package]] name = "alloy-rpc-types" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c31a3750b8f5a350d17354e46a52b0f2f19ec5f2006d816935af599dedc521" +checksum = "64333d639f2a0cf73491813c629a405744e16343a4bc5640931be707c345ecc5" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -545,27 +570,22 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff63f51b2fb2f547df5218527fd0653afb1947bf7fead5b3ce58c75d170b30f7" +checksum = "1464c4dd646e1bdfde86ae65ce5ba168dbb29180b478011fe87117ae46b1629b" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types-eth", - "alloy-serde", - "jsonwebtoken", - "rand", - "serde", - "thiserror", + "derive_more 1.0.0", ] [[package]] name = "alloy-rpc-types-eth" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81e18424d962d7700a882fe423714bd5b9dde74c7a7589d4255ea64068773aef" +checksum = "83aa984386deda02482660aa31cb8ca1e63d533f1c31a52d7d181ac5ec68e9b8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -574,17 +594,19 @@ dependencies = [ "alloy-rlp", "alloy-serde", "alloy-sol-types", + "cfg-if", + "derive_more 1.0.0", + "hashbrown 0.14.5", "itertools 0.13.0", "serde", "serde_json", - "thiserror", ] [[package]] name = "alloy-serde" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33feda6a53e6079895aed1d08dcb98a1377b000d80d16370fbbdb8155d547ef" +checksum = "731f75ec5d383107fd745d781619bd9cedf145836c51ecb991623d41278e71fa" dependencies = [ "alloy-primitives", "serde", @@ -593,9 +615,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740a25b92e849ed7b0fa013951fe2f64be9af1ad5abe805037b44fb7770c5c47" +checksum = "307324cca94354cd654d6713629f0383ec037e1ff9e3e3d547212471209860c0" dependencies = [ "alloy-primitives", "async-trait", @@ -607,9 +629,9 @@ dependencies = [ [[package]] name = "alloy-signer-aws" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b1a47bd8487fb2d715f8a203c3bfe7de0b7443eeacb00bd96d8d4eb0d67e184" +checksum = "076be69aa26a4c500919f1ad3847662aa6d1e9bc2995e263ed826b1546d1b990" dependencies = [ "alloy-consensus", "alloy-network", @@ -625,17 +647,17 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b0707d4f63e4356a110b30ef3add8732ab6d181dd7be4607bf79b8777105cee" +checksum = "9fabe917ab1778e760b4701628d1cae8e028ee9d52ac6307de4e1e9286ab6b5f" dependencies = [ "alloy-consensus", "alloy-network", "alloy-primitives", "alloy-signer", "async-trait", - "coins-bip32 0.11.1", - "coins-bip39 0.11.1", + "coins-bip32 0.12.0", + "coins-bip39 0.12.0", "k256", "rand", "thiserror", @@ -643,42 +665,42 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b40397ddcdcc266f59f959770f601ce1280e699a91fc1862f29cef91707cd09" +checksum = "68e7f6e8fe5b443f82b3f1e15abfa191128f71569148428e49449d01f6f49e8b" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", - "proc-macro-error", + "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "867a5469d61480fea08c7333ffeca52d5b621f5ca2e44f271b117ec1fc9a0525" +checksum = "6b96ce28d2fde09abb6135f410c41fad670a3a770b6776869bd852f1df102e6f" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", "const-hex", "heck 0.5.0", - "indexmap 2.4.0", - "proc-macro-error", + "indexmap 2.5.0", + "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e482dc33a32b6fadbc0f599adea520bd3aaa585c141a80b404d0a3e3fa72528" +checksum = "906746396a8296537745711630d9185746c0b50c033d5e9d18b0a6eba3d53f90" dependencies = [ "alloy-json-abi", "const-hex", @@ -687,15 +709,15 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.76", + "syn 2.0.77", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbcba3ca07cf7975f15d871b721fb18031eec8bce51103907f6dcce00b255d98" +checksum = "bc85178909a49c8827ffccfc9103a7ce1767ae66a801b69bdc326913870bf8e6" dependencies = [ "serde", "winnow", @@ -703,9 +725,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91ca40fa20793ae9c3841b83e74569d1cc9af29a2f5237314fd3452d51e38c7" +checksum = "d86a533ce22525969661b25dfe296c112d35eb6861f188fd284f8bd4bb3842ae" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -716,9 +738,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d0590afbdacf2f8cca49d025a2466f3b6584a016a8b28f532f29f8da1007bae" +checksum = "33616b2edf7454302a1d48084db185e52c309f73f6c10be99b0fe39354b3f1e9" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -728,37 +750,37 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tower", + "tower 0.5.1", "tracing", "url", ] [[package]] name = "alloy-transport-http" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2437d145d80ea1aecde8574d2058cceb8b3c9cba05f6aea8e67907c660d46698" +checksum = "a944f5310c690b62bbb3e7e5ce34527cbd36b2d18532a797af123271ce595a49" dependencies = [ "alloy-json-rpc", "alloy-transport", "reqwest 0.12.7", "serde_json", - "tower", + "tower 0.5.1", "tracing", "url", ] [[package]] name = "alloy-transport-ws" -version = "0.2.1" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af855163e7df008799941aa6dd324a43ef2bf264b08ba4b22d44aad6ced65300" +checksum = "a9704761f6297fe482276bee7f77a93cb42bd541c2bd6c1c560b6f3a9ece672e" dependencies = [ "alloy-pubsub", "alloy-transport", "futures", "http 1.1.0", - "rustls 0.23.12", + "rustls 0.23.13", "serde_json", "tokio", "tokio-tungstenite", @@ -832,9 +854,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arbitrary" @@ -1000,18 +1022,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1057,7 +1079,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1068,9 +1090,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "aws-config" -version = "1.5.5" +version = "1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e95816a168520d72c0e7680c405a5a8c1fb6a035b4bc4b9d7b0de8e1a941697" +checksum = "848d7b9b605720989929279fa644ce8f244d0ce3146fcca5b70e4eb7b3c020fc" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1098,9 +1120,9 @@ dependencies = [ [[package]] name = "aws-credential-types" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16838e6c9e12125face1c1eff1343c75e3ff540de98ff7ebd61874a89bcfeb9" +checksum = "60e8f6b615cb5fc60a98132268508ad104310f0cfb25a1c22eee76efdf9154da" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -1110,14 +1132,15 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.4.0" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f42c2d4218de4dcd890a109461e2f799a1a2ba3bcd2cde9af88360f5df9266c6" +checksum = "a10d5c055aa540164d9561a0e2e74ad30f0dcf7393c3a92f6733ddf9c5762468" dependencies = [ "aws-credential-types", "aws-sigv4", "aws-smithy-async", "aws-smithy-http", + "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", @@ -1134,9 +1157,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "1.40.0" +version = "1.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ebbbc319551583b9233a74b359ede7349102e779fc12371d2478e80b50d218" +checksum = "c6550445e0913c9383375f4a5a2f550817567a19a178107fce1e1afd767f802a" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1156,9 +1179,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.39.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11822090cf501c316c6f75711d77b96fba30658e3867a7762e5e2f5d32d31e81" +checksum = "70a9d27ed1c12b1140c47daf1bc541606c43fdafd918c4797d520db0043ceef2" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1178,9 +1201,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.40.0" +version = "1.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78a2a06ff89176123945d1bbe865603c4d7101bea216a550bb4d2e4e9ba74d74" +checksum = "44514a6ca967686cde1e2a1b81df6ef1883d0e3e570da8d8bc5c491dcb6fc29b" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1200,9 +1223,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.39.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20a91795850826a6f456f4a48eff1dfa59a0e69bdbf5b8c50518fd372106574" +checksum = "cd7a4d279762a35b9df97209f6808b95d4fe78547fe2316b4d200a0283960c5a" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1223,9 +1246,9 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.2.3" +version = "1.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5df1b0fa6be58efe9d4ccc257df0a53b89cd8909e86591a13ca54817c87517be" +checksum = "cc8db6904450bafe7473c6ca9123f88cc11089e41a025408f992db4e22d3be68" dependencies = [ "aws-credential-types", "aws-smithy-http", @@ -1257,9 +1280,9 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.60.10" +version = "0.60.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01dbcb6e2588fd64cfb6d7529661b06466419e4c54ed1c62d6510d2d0350a728" +checksum = "5c8bc3e8fdc6b8d07d976e301c02fe553f72a39b7a9fea820e023268467d7ab6" dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", @@ -1340,9 +1363,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.2.4" +version = "1.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "273dcdfd762fae3e1650b8024624e7cd50e484e37abdab73a7a706188ad34543" +checksum = "03701449087215b5369c7ea17fef0dd5d24cb93439ec5af0c7615f58c3f22605" dependencies = [ "base64-simd", "bytes", @@ -1366,9 +1389,9 @@ dependencies = [ [[package]] name = "aws-smithy-xml" -version = "0.60.8" +version = "0.60.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d123fbc2a4adc3c301652ba8e149bf4bc1d1725affb9784eb20c953ace06bf55" +checksum = "ab0b0166827aa700d3dc519f72f8b3a91c35d0b8d042dc5d643a91e6f80648fc" dependencies = [ "xmlparser", ] @@ -1389,18 +1412,18 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", - "miniz_oxide 0.7.4", + "miniz_oxide", "object", "rustc-demangle", "serde", + "windows-targets 0.52.6", ] [[package]] @@ -1592,9 +1615,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" dependencies = [ "serde", ] @@ -1609,6 +1632,12 @@ dependencies = [ "either", ] +[[package]] +name = "bytesize" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" + [[package]] name = "bytestring" version = "1.3.1" @@ -1635,9 +1664,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.15" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57b6a275aa2903740dc87da01c62040406b8812552e97129a63ea8850a17c6e6" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "shlex", ] @@ -1663,9 +1692,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.16" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" +checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" dependencies = [ "clap_builder", "clap_derive", @@ -1673,9 +1702,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.15" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" +checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" dependencies = [ "anstream", "anstyle", @@ -1685,14 +1714,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -1701,6 +1730,15 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +[[package]] +name = "clock" +version = "0.9.0" +dependencies = [ + "clock", + "ports", + "tokio", +] + [[package]] name = "cobs" version = "0.2.3" @@ -1725,12 +1763,12 @@ dependencies = [ [[package]] name = "coins-bip32" -version = "0.11.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c43ff7fd9ff522219058808a259e61423335767b1071d5b346de60d9219657" +checksum = "2073678591747aed4000dd468b97b14d7007f7936851d3f2f01846899f5ebf08" dependencies = [ "bs58", - "coins-core 0.11.1", + "coins-core 0.12.0", "digest 0.10.7", "hmac", "k256", @@ -1757,12 +1795,12 @@ dependencies = [ [[package]] name = "coins-bip39" -version = "0.11.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4587c0b4064da887ed39a6522f577267d57e58bdd583178cd877d721b56a2e" +checksum = "74b169b26623ff17e9db37a539fe4f15342080df39f129ef7631df7683d6d9d4" dependencies = [ "bitvec", - "coins-bip32 0.11.1", + "coins-bip32 0.12.0", "hmac", "once_cell", "pbkdf2", @@ -1793,9 +1831,9 @@ dependencies = [ [[package]] name = "coins-core" -version = "0.11.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b3aeeec621f4daec552e9d28befd58020a78cfc364827d06a753e8bc13c6c4b" +checksum = "62b962ad8545e43a28e14e87377812ba9ae748dd4fd963f4c10e9fcc6d13475b" dependencies = [ "base64 0.21.7", "bech32", @@ -1938,9 +1976,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -1975,6 +2013,25 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f64009896348fc5af4222e9cf7d7d82a95a256c634ebcf61c53e4ea461422242" +[[package]] +name = "crossbeam-deque" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-queue" version = "0.3.11" @@ -2041,7 +2098,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2129,7 +2186,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2151,16 +2208,17 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "dashmap" -version = "5.5.3" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ "cfg-if", + "crossbeam-utils", "hashbrown 0.14.5", "lock_api", "once_cell", @@ -2173,6 +2231,17 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +[[package]] +name = "delegate" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5060bb0febb73fa907273f8a7ed17ab4bf831d585eac835b28ec24a1e2460956" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "der" version = "0.7.9" @@ -2213,7 +2282,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2226,9 +2295,36 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.76", + "syn 2.0.77", +] + +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl", ] +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", + "unicode-xid", +] + +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + [[package]] name = "digest" version = "0.9.0" @@ -2279,7 +2375,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2313,7 +2409,7 @@ checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] name = "e2e" -version = "0.6.0" +version = "0.9.0" dependencies = [ "alloy", "anyhow", @@ -2324,14 +2420,14 @@ dependencies = [ "fuel", "fuel-core-chain-config", "fuel-core-types", - "futures-util", + "futures", "hex", "itertools 0.13.0", "portpicker", "ports", "rand", "reqwest 0.12.7", - "secp256k1 0.29.0", + "secp256k1", "serde", "serde_json", "storage", @@ -2339,7 +2435,6 @@ dependencies = [ "testcontainers", "tokio", "url", - "validator", "walkdir", "zip", ] @@ -2430,14 +2525,14 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2457,7 +2552,7 @@ checksum = "a1ab991c1362ac86c61ab6f556cff143daa22e5a15e4e189df818b2fd19fe65b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -2489,17 +2584,24 @@ dependencies = [ [[package]] name = "eth" -version = "0.6.0" +version = "0.9.0" dependencies = [ "alloy", "async-trait", "aws-config", "aws-sdk-kms", "c-kzg", + "delegate", "futures", + "itertools 0.13.0", "metrics", "mockall", "ports", + "pretty_assertions", + "rand", + "rayon", + "serde", + "test-case", "thiserror", "tokio", "tracing", @@ -2586,7 +2688,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -2644,22 +2746,25 @@ checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" [[package]] name = "fuel" -version = "0.6.0" +version = "0.9.0" dependencies = [ - "async-trait", + "cynic", + "delegate", "fuel-core-client", "fuel-core-types", + "futures", "metrics", "ports", "tokio", + "trait-variant", "url", ] [[package]] name = "fuel-asm" -version = "0.55.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "491f1777538b0e1d479609d0d75bca5242c7fd3394f2ddd4ea55b8c96bcc8387" +checksum = "b29ea55a794c00d0dfaad06f11720a05fa928603f812dca1c38163f2b240860a" dependencies = [ "bitflags 2.6.0", "fuel-types", @@ -2669,16 +2774,18 @@ dependencies = [ [[package]] name = "fuel-block-committer" -version = "0.6.0" +version = "0.9.0" dependencies = [ "actix-web", "anyhow", "clap", + "clock", "config", "eth", "fuel", "humantime", "metrics", + "num_cpus", "ports", "serde", "serde_json", @@ -2690,14 +2797,13 @@ dependencies = [ "tracing", "tracing-subscriber", "url", - "validator", ] [[package]] name = "fuel-core-chain-config" -version = "0.31.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05c13f888fb9b705b64bbcb56d022345cf85a86535d646bf53e20771eb4b986a" +checksum = "7a4c5a71702426b8354bff2010131c0abb4a4f0b608cc7a6dfd72f9e785ba478" dependencies = [ "anyhow", "bech32", @@ -2715,13 +2821,13 @@ dependencies = [ [[package]] name = "fuel-core-client" -version = "0.31.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd1910fce3eebe33b5acba656e092e5ede267acb4b1c3f17c122a0477270091" +checksum = "5770dbda6220e641eb57ee204dd5914fa15170afe3009473f57cdf15e2339fd8" dependencies = [ "anyhow", "cynic", - "derive_more", + "derive_more 0.99.18", "eventsource-client", "fuel-core-types", "futures", @@ -2739,12 +2845,12 @@ dependencies = [ [[package]] name = "fuel-core-storage" -version = "0.31.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a3ee3b462cc9b7e62b3ae04d5e3b792e6742c479bd75d6bc0987443a92b5299" +checksum = "1daa7422e48120b1623b53fe1a1152d11314f30fb290a73dc80f7e128c1f9014" dependencies = [ "anyhow", - "derive_more", + "derive_more 0.99.18", "enum-iterator", "fuel-core-types", "fuel-vm", @@ -2761,28 +2867,27 @@ dependencies = [ [[package]] name = "fuel-core-types" -version = "0.31.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "615783f63b40075d1bf64a42b4fd4edce076458c94b0fab2278a570b2b7a8e0e" +checksum = "7aa1c54f09cc7c29a11ca1129f73105745f8374a192e3e24040c10822871d83f" dependencies = [ "anyhow", "bs58", "derivative", - "derive_more", + "derive_more 0.99.18", "fuel-vm", "rand", "secrecy", "serde", "tai64", - "thiserror", "zeroize", ] [[package]] name = "fuel-crypto" -version = "0.55.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f74f03ba9b27f375a0482b1afe20d5b8cfd032fedba683a584cdbd6d10147439" +checksum = "2661b2a6c43e811be4892250513a6f4c46a69cc7092a1e5b240f49697f08292e" dependencies = [ "coins-bip32 0.8.7", "coins-bip39 0.8.7", @@ -2793,7 +2898,7 @@ dependencies = [ "lazy_static", "p256", "rand", - "secp256k1 0.26.0", + "secp256k1", "serde", "sha2", "zeroize", @@ -2801,23 +2906,23 @@ dependencies = [ [[package]] name = "fuel-derive" -version = "0.55.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ad30ad1a11e5a811ae67b6b0cb6785ce21bcd5ef0afd442fd963d5be95d09d" +checksum = "03509567813a351ca60d8507b2ac476b06c1590f2e9edbe72bc205bb04e0af12" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "synstructure", ] [[package]] name = "fuel-merkle" -version = "0.55.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5433c41ffbf531eed1380148cd68e37f9dd7e25966a9c59518f6b09e346e80e2" +checksum = "24938ee8a5e9efe71994203527dffb4c81872aa2953de0c347ad38696527b58a" dependencies = [ - "derive_more", + "derive_more 0.99.18", "digest 0.10.7", "fuel-storage", "hashbrown 0.13.2", @@ -2828,19 +2933,19 @@ dependencies = [ [[package]] name = "fuel-storage" -version = "0.55.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce3fc3cd96fe312442cdf35966b96d66becd02582b505f856f74953f57adf020" +checksum = "4283f9cabc26a1154a31268e79de1e0f317d57231b4dc8d7282efb22e49d2ed3" [[package]] name = "fuel-tx" -version = "0.55.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e00cc42ae3121b1881a6ae8306696d1bea73adca424216d9f676ee91d3927c74" +checksum = "572f9e8fdda6abfe83cf1456a11eabf1de66d682176fb097f2f950704cc50c26" dependencies = [ "bitflags 2.6.0", "derivative", - "derive_more", + "derive_more 0.99.18", "fuel-asm", "fuel-crypto", "fuel-merkle", @@ -2857,9 +2962,9 @@ dependencies = [ [[package]] name = "fuel-types" -version = "0.55.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae98e143dec4e6cb114a92435e314f1d4815e17e8fded24332fb285319d60167" +checksum = "7f196060a10db0293cdfca455f7e2f3a7914f46f25e0fbc2d28cf0a11e835a86" dependencies = [ "fuel-derive", "hex", @@ -2869,16 +2974,16 @@ dependencies = [ [[package]] name = "fuel-vm" -version = "0.55.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "641a2ee5a3398633fa243fba3343cbe2225ae335a09141f6b94041720cfc3520" +checksum = "a6f4e0cc4ae65d00df6f3dcae90b81dd21135b45b932a79e368f35d255df12a1" dependencies = [ "anyhow", "async-trait", "backtrace", "bitflags 2.6.0", "derivative", - "derive_more", + "derive_more 0.99.18", "ethnum", "fuel-asm", "fuel-crypto", @@ -2974,7 +3079,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3031,17 +3136,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", - "js-sys", "libc", "wasi", - "wasm-bindgen", ] [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "glob" @@ -3082,7 +3185,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.4.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -3101,7 +3204,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.4.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -3456,21 +3559,21 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.12", - "rustls-native-certs 0.7.3", + "rustls 0.23.13", + "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", "tower-service", - "webpki-roots 0.26.3", + "webpki-roots 0.26.6", ] [[package]] @@ -3503,9 +3606,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-channel", @@ -3516,7 +3619,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower", + "tower 0.4.13", "tower-service", "tracing", ] @@ -3538,9 +3641,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -3619,7 +3722,7 @@ dependencies = [ "autocfg", "impl-tools-lib", "proc-macro-error", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3631,7 +3734,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -3658,9 +3761,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", "hashbrown 0.14.5", @@ -3681,9 +3784,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "is_terminal_polyfill" @@ -3733,26 +3836,11 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "jsonwebtoken" -version = "9.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" -dependencies = [ - "base64 0.21.7", - "js-sys", - "pem", - "ring", - "serde", - "serde_json", - "simple_asn1", -] - [[package]] name = "k256" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", "ecdsa", @@ -3773,9 +3861,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "422fbc7ff2f2f5bdffeb07718e5a5324dca72b0c9293d50df4026652385e3314" +checksum = "505d1856a39b200489082f90d897c3f07c455563880bc5952e38eabf731c83b6" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -3922,7 +4010,7 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "metrics" -version = "0.6.0" +version = "0.9.0" dependencies = [ "prometheus", ] @@ -3939,15 +4027,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -3994,7 +4073,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -4024,6 +4103,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nonempty" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "303e8749c804ccd6ca3b428de7fe0d86cb86bc7606bc15291f100fd487960bb8" + [[package]] name = "num-bigint" version = "0.4.6" @@ -4115,14 +4200,14 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "object" -version = "0.36.3" +version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "memchr", ] @@ -4156,7 +4241,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -4245,7 +4330,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.3", + "redox_syscall", "smallvec", "windows-targets 0.52.6", ] @@ -4272,7 +4357,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -4297,16 +4382,6 @@ dependencies = [ "hmac", ] -[[package]] -name = "pem" -version = "3.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" -dependencies = [ - "base64 0.22.1", - "serde", -] - [[package]] name = "pem-rfc7468" version = "0.7.0" @@ -4324,9 +4399,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.11" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" dependencies = [ "memchr", "thiserror", @@ -4360,7 +4435,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -4398,9 +4473,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "portpicker" @@ -4413,19 +4488,20 @@ dependencies = [ [[package]] name = "ports" -version = "0.6.0" +version = "0.9.0" dependencies = [ "alloy", "async-trait", + "delegate", "fuel-core-client", "futures", - "impl-tools", + "itertools 0.13.0", "mockall", + "nonempty", "rand", - "serde", "sqlx", "thiserror", - "validator", + "trait-variant", ] [[package]] @@ -4482,6 +4558,16 @@ dependencies = [ "termtree", ] +[[package]] +name = "pretty_assertions" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" +dependencies = [ + "diff", + "yansi", +] + [[package]] name = "primeorder" version = "0.13.6" @@ -4520,7 +4606,6 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn 1.0.109", "version_check", ] @@ -4535,6 +4620,28 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "proc-macro2" version = "1.0.86" @@ -4602,16 +4709,16 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quinn" -version = "0.11.3" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b22d8e7369034b9a7132bc2008cac12f2013c8132b45e0554e6e20e2617f2156" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ "bytes", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.12", + "rustls 0.23.13", "socket2", "thiserror", "tokio", @@ -4620,15 +4727,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba92fb39ec7ad06ca2582c0ca834dfeadcaf06ddfc8e635c80aa7e1c05315fdd" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", "rand", "ring", "rustc-hash", - "rustls 0.23.12", + "rustls 0.23.13", "slab", "thiserror", "tinyvec", @@ -4637,15 +4744,15 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" +checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", "once_cell", "socket2", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4672,6 +4779,7 @@ dependencies = [ "libc", "rand_chacha", "rand_core", + "serde", ] [[package]] @@ -4703,19 +4811,30 @@ dependencies = [ ] [[package]] -name = "redox_syscall" -version = "0.4.1" +name = "rayon" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ - "bitflags 1.3.2", + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", ] [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags 2.6.0", ] @@ -4826,7 +4945,7 @@ dependencies = [ "http-body 1.0.1", "http-body-util", "hyper 1.4.1", - "hyper-rustls 0.27.2", + "hyper-rustls 0.27.3", "hyper-tls", "hyper-util", "ipnet", @@ -4838,7 +4957,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-native-certs 0.7.3", "rustls-pemfile 2.1.3", "rustls-pki-types", @@ -4854,7 +4973,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots 0.26.3", + "webpki-roots 0.26.6", "windows-registry", ] @@ -5000,9 +5119,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.35" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a85d50532239da68e9addb745ba38ff4612a242c1c7ceea689c4bc7c2f43c36f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -5032,21 +5151,21 @@ dependencies = [ "log", "ring", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.7", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -5076,6 +5195,19 @@ dependencies = [ "security-framework", ] +[[package]] +name = "rustls-native-certs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.1.3", + "rustls-pki-types", + "schannel", + "security-framework", +] + [[package]] name = "rustls-pemfile" version = "1.0.4" @@ -5113,9 +5245,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.7" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring", "rustls-pki-types", @@ -5157,11 +5289,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -5222,38 +5354,19 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.26.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4124a35fe33ae14259c490fd70fa199a32b9ce9502f2ee6bc4f81ec06fa65894" +checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" dependencies = [ "rand", - "secp256k1-sys 0.8.1", -] - -[[package]] -name = "secp256k1" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0cc0f1cf93f4969faf3ea1c7d8a9faed25918d96affa959720823dfe86d4f3" -dependencies = [ - "rand", - "secp256k1-sys 0.10.0", -] - -[[package]] -name = "secp256k1-sys" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a129b9e9efbfb223753b9163c4ab3b13cff7fd9c7f010fbac25ab4099fa07e" -dependencies = [ - "cc", + "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1433bd67156263443f14d603720b082dd3121779323fce20cba2aa07b874bc1b" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" dependencies = [ "cc", ] @@ -5282,9 +5395,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -5322,29 +5435,29 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", @@ -5360,7 +5473,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -5394,7 +5507,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.4.0", + "indexmap 2.5.0", "serde", "serde_derive", "serde_json", @@ -5411,14 +5524,18 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] name = "services" -version = "0.6.0" +version = "0.9.0" dependencies = [ - "async-trait", + "bytesize", + "clock", + "delegate", + "eth", + "flate2", "fuel-crypto", "futures", "hex", @@ -5426,15 +5543,18 @@ dependencies = [ "metrics", "mockall", "ports", + "pretty_assertions", "rand", + "rayon", "serde", + "services", "storage", "tai64", "thiserror", "tokio", "tokio-util", "tracing", - "validator", + "trait-variant", ] [[package]] @@ -5471,9 +5591,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d79b758b7cb2085612b11a235055e485605a5103faccdd633f35bd7aee69dd" +checksum = "c28efc5e327c837aa837c59eae585fc250715ef939ac32881bcc11677cd02d46" dependencies = [ "cc", "cfg-if", @@ -5529,18 +5649,6 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" -[[package]] -name = "simple_asn1" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" -dependencies = [ - "num-bigint", - "num-traits", - "thiserror", - "time", -] - [[package]] name = "slab" version = "0.4.9" @@ -5587,9 +5695,9 @@ dependencies = [ [[package]] name = "sqlformat" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f895e3734318cc55f1fe66258926c9b910c124d47520339efecbb6c59cec7c1f" +checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790" dependencies = [ "nom", "unicode_categories", @@ -5630,7 +5738,7 @@ dependencies = [ "futures-util", "hashlink", "hex", - "indexmap 2.4.0", + "indexmap 2.5.0", "log", "memchr", "once_cell", @@ -5814,10 +5922,11 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "storage" -version = "0.6.0" +version = "0.9.0" dependencies = [ - "async-trait", + "delegate", "hex", + "itertools 0.13.0", "ports", "rand", "serde", @@ -5860,7 +5969,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -5871,7 +5980,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -5921,7 +6030,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -5934,7 +6043,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -5956,9 +6065,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.76" +version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ "proc-macro2", "quote", @@ -5967,14 +6076,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.7.7" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c837dc8852cb7074e46b444afb81783140dab12c58867b49fb3898fbafedf7ea" +checksum = "0ab661c8148c2261222a4d641ad5477fd4bea79406a99056096a0b41b35617a5" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -6000,7 +6109,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -6058,6 +6167,39 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" +[[package]] +name = "test-case" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8" +dependencies = [ + "test-case-macros", +] + +[[package]] +name = "test-case-core" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adcb7fd841cd518e279be3d5a3eb0636409487998a4aff22f3de87b81e88384f" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.77", +] + +[[package]] +name = "test-case-macros" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", + "test-case-core", +] + [[package]] name = "testcontainers" version = "0.20.1" @@ -6091,22 +6233,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -6185,9 +6327,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.3" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", @@ -6219,7 +6361,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -6259,16 +6401,16 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -6284,19 +6426,19 @@ checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" dependencies = [ "futures-util", "log", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", "tungstenite", - "webpki-roots 0.26.3", + "webpki-roots 0.26.6", ] [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -6328,11 +6470,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ - "indexmap 2.4.0", + "indexmap 2.5.0", "serde", "serde_spanned", "toml_datetime", @@ -6352,7 +6494,20 @@ dependencies = [ "tokio", "tower-layer", "tower-service", - "tracing", +] + +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", ] [[package]] @@ -6387,7 +6542,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -6424,6 +6579,17 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "trait-variant" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -6443,7 +6609,7 @@ dependencies = [ "httparse", "log", "rand", - "rustls 0.23.12", + "rustls 0.23.13", "rustls-pki-types", "sha1", "thiserror", @@ -6488,15 +6654,15 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] @@ -6509,9 +6675,15 @@ checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + +[[package]] +name = "unicode-xid" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "unicode_categories" @@ -6570,21 +6742,6 @@ version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" -[[package]] -name = "validator" -version = "0.6.0" -dependencies = [ - "fuel-core-client", - "fuel-crypto", - "hex", - "mockall", - "rand", - "serde", - "tai64", - "thiserror", - "validator", -] - [[package]] name = "valuable" version = "0.1.0" @@ -6677,7 +6834,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "wasm-bindgen-shared", ] @@ -6711,7 +6868,7 @@ checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6740,20 +6897,20 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "webpki-roots" -version = "0.26.3" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] [[package]] name = "whoami" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall 0.4.1", + "redox_syscall", "wasite", ] @@ -7034,6 +7191,12 @@ version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" +[[package]] +name = "yansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + [[package]] name = "zerocopy" version = "0.7.35" @@ -7052,7 +7215,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -7072,7 +7235,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.77", ] [[package]] @@ -7086,7 +7249,7 @@ dependencies = [ "crossbeam-utils", "displaydoc", "flate2", - "indexmap 2.4.0", + "indexmap 2.5.0", "memchr", "thiserror", "zopfli", diff --git a/Cargo.toml b/Cargo.toml index 40c0466f..1e100fd6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,17 +3,17 @@ resolver = "2" members = [ "committer", "e2e", + "packages/clock", "packages/eth", "packages/fuel", "packages/metrics", "packages/ports", "packages/services", "packages/storage", - "packages/validator", ] [workspace.package] -version = "0.6.0" +version = "0.9.0" authors = ["Fuel Labs "] edition = "2021" homepage = "https://fuel.network/" @@ -29,47 +29,53 @@ metrics = { path = "./packages/metrics", default-features = false } ports = { path = "./packages/ports", default-features = false } storage = { path = "./packages/storage", default-features = false } services = { path = "./packages/services", default-features = false } -validator = { path = "./packages/validator", default-features = false } +clock = { path = "./packages/clock", default-features = false } actix-web = { version = "4", default-features = false } -alloy = { version = "0.2.1", default-features = false } -alloy-chains = { version = "0.1.0", default-features = false } +bytesize = { version = "1.3", default-features = false } +alloy = { version = "0.3.6", default-features = false } +rayon = { version = "1.10", default-features = false } +num_cpus = { version = "1.16", default-features = false } anyhow = { version = "1.0", default-features = false } +async-trait = { version = "0.1", default-features = false } aws-config = { version = "1.5.5", default-features = false } aws-sdk-kms = { version = "1.36", default-features = false } -async-trait = { version = "0.1", default-features = false } -c-kzg = { version = "1.0", default-features = false } clap = { version = "4.5", default-features = false } config = { version = "0.14", default-features = false } +delegate = { version = "0.13", default-features = false } +flate2 = { version = "1.0", default-features = false } fs_extra = { version = "1.3", default-features = false } -fuel-core-chain-config = { version = "0.31", default-features = false } -fuel-core-client = { version = "0.31", default-features = false } -fuel-core-types = { version = "0.31", default-features = false } -fuel-crypto = { version = "0.55", default-features = false } +fuel-core-chain-config = { version = "0.36", default-features = false } +fuel-core-client = { version = "0.36", default-features = false } +fuel-core-types = { version = "0.36", default-features = false } +fuel-crypto = { version = "0.57", default-features = false } futures = { version = "0.3", default-features = false } -futures-util = { version = "0.3", default-features = false } hex = { version = "0.4", default-features = false } humantime = { version = "2.1", default-features = false } -impl-tools = { version = "0.10.0", default-features = false } itertools = { version = "0.13", default-features = false } mockall = { version = "0.12", default-features = false } +nonempty = { version = "0.10", default-features = false } portpicker = { version = "0.1", default-features = false } +pretty_assertions = { version = "1.4", default-features = false } prometheus = { version = "0.13", default-features = false } rand = { version = "0.8", default-features = false } reqwest = { version = "0.12", default-features = false } -rlp = { version = "0.5.2", default-features = false } secp256k1 = { version = "0.29", default-features = false } serde = { version = "1.0", default-features = false } serde_json = { version = "1.0", default-features = false } sqlx = { version = "0.7.4", default-features = false } tai64 = { version = "4.0.0", default-features = false } tempfile = { version = "3.10", default-features = false } +test-case = { version = "3.3", default-features = false } testcontainers = { version = "0.20", default-features = false } +# to be removed once alloy fixes the issue with blob encoding +c-kzg = { version = "1.0", default-features = false } thiserror = { version = "1.0", default-features = false } tokio = { version = "1.37", default-features = false } tokio-util = { version = "0.7", default-features = false } tracing = { version = "0.1", default-features = false } tracing-subscriber = { version = "0.3", default-features = false } +trait-variant = { version = "0.1", default-features = false } url = { version = "2.3", default-features = false } walkdir = { version = "2.5", default-features = false } zip = { version = "2.1", default-features = false } diff --git a/committer/Cargo.toml b/committer/Cargo.toml index 3a8e9d23..7dad3a77 100644 --- a/committer/Cargo.toml +++ b/committer/Cargo.toml @@ -10,8 +10,10 @@ publish = { workspace = true } rust-version = { workspace = true } [dependencies] +num_cpus = { workspace = true } actix-web = { workspace = true, features = ["macros"] } clap = { workspace = true, features = ["default", "derive"] } +clock = { workspace = true } config = { workspace = true, features = ["toml", "async"] } eth = { workspace = true } fuel = { workspace = true } @@ -28,7 +30,6 @@ tokio-util = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["fmt", "json"] } url = { workspace = true } -validator = { workspace = true, features = ["validator"] } [dev-dependencies] anyhow = { workspace = true } diff --git a/committer/src/config.rs b/committer/src/config.rs index 7a09b224..80499161 100644 --- a/committer/src/config.rs +++ b/committer/src/config.rs @@ -1,8 +1,14 @@ -use std::{net::Ipv4Addr, path::PathBuf, str::FromStr, time::Duration}; +use std::{ + net::Ipv4Addr, + num::{NonZeroU32, NonZeroUsize}, + str::FromStr, + time::Duration, +}; use clap::{command, Parser}; use eth::Address; use serde::Deserialize; +use services::CompressionLevel; use storage::DbConfig; use url::Url; @@ -23,6 +29,19 @@ impl Config { } } + if self.app.bundle.fragments_to_accumulate.get() > 6 { + return Err(crate::errors::Error::Other( + "Fragments to accumulate must be <= 6".to_string(), + )); + } + + if self.app.bundle.block_height_lookback < self.app.bundle.blocks_to_accumulate.get() as u32 + { + return Err(crate::errors::Error::Other( + "block_height_lookback must be >= blocks_to_accumulate".to_string(), + )); + } + Ok(()) } } @@ -34,6 +53,7 @@ pub struct Fuel { pub graphql_endpoint: Url, /// Block producer address pub block_producer_address: ports::fuel::FuelBytes32, + pub max_full_blocks_per_request: NonZeroU32, } #[derive(Debug, Clone, Deserialize)] @@ -47,6 +67,9 @@ pub struct Eth { pub rpc: Url, /// Ethereum address of the fuel chain state contract. pub state_contract_address: Address, + /// To manually be enabled if a transaction gets stuck. Geth requires a multiplier of 2. Should + /// be removed once the tx manager is implemented. + pub first_tx_gas_estimation_multiplier: Option, } fn parse_url<'de, D>(deserializer: D) -> Result @@ -68,11 +91,86 @@ pub struct App { pub host: Ipv4Addr, /// Postgres database configuration pub db: DbConfig, - /// How often to check the latest fuel block + /// How often to check for fuel blocks #[serde(deserialize_with = "human_readable_duration")] pub block_check_interval: Duration, + /// How often to check for finalized l1 txs + #[serde(deserialize_with = "human_readable_duration")] + pub tx_finalization_check_interval: Duration, /// Number of L1 blocks that need to pass to accept the tx as finalized pub num_blocks_to_finalize_tx: u64, + ///// Contains configs relating to block state posting to l1 + pub bundle: BundleConfig, +} + +/// Configuration settings for managing fuel block bundling and fragment submission operations. +/// +/// This struct defines how blocks and fragments are accumulated, optimized, and eventually submitted to L1. +#[derive(Debug, Clone, Deserialize)] +pub struct BundleConfig { + /// Duration to wait for additional fuel blocks before initiating the bundling process. + /// + /// This timeout is measured from the moment the last blob transaction was finalized, or, if + /// missing, from the application startup time. + #[serde(deserialize_with = "human_readable_duration")] + pub accumulation_timeout: Duration, + + /// The number of fuel blocks to accumulate before initiating the bundling process. + /// + /// If the system successfully accumulates this number of blocks before the `accumulation_timeout` is reached, + /// the bundling process will start immediately. Otherwise, the bundling process will be triggered when the + /// `accumulation_timeout` fires, regardless of the number of blocks accumulated. + pub blocks_to_accumulate: NonZeroUsize, + + /// Maximum duration allocated for determining the optimal bundle size. + /// + /// This timeout limits the amount of time the system can spend searching for the ideal + /// number of fuel blocks to include in a bundle. Once this duration is reached, the + /// bundling process will proceed with the best configuration found within the allotted time. + #[serde(deserialize_with = "human_readable_duration")] + pub optimization_timeout: Duration, + + /// How big should the optimization step be at the start of the optimization process. Setting + /// this value to 100 and giving the bundler a 1000 blocks would result in the following + /// attempts: + /// 1000, 900, ..., 100, 1, 950, 850, ..., 50, 975, 925, ... + pub optimization_step: NonZeroUsize, + + /// Duration to wait for additional fragments before submitting them in a transaction to L1. + /// + /// Similar to `accumulation_timeout`, this timeout starts from the last finalized fragment submission. If no new + /// fragments are received within this period, the system will proceed to submit the currently accumulated fragments. + #[serde(deserialize_with = "human_readable_duration")] + pub fragment_accumulation_timeout: Duration, + + /// The number of fragments to accumulate before submitting them in a transaction to L1. + pub fragments_to_accumulate: NonZeroUsize, + + /// Only blocks within the `block_height_lookback` window + /// value will be considered for importing, bundling, fragmenting, and submitting to L1. + /// + /// This parameter defines a sliding window based on block height to determine which blocks are + /// eligible for processing. Specifically: + /// + /// - **Exclusion of Stale Blocks:** If a block arrives with a height less than the current + /// height minus the `block_height_lookback`, it will be excluded from the bundling process. + /// + /// - **Bundling Behavior:** + /// - **Unbundled Blocks:** Blocks outside the lookback window will not be bundled. + /// - **Already Bundled Blocks:** If a block has already been bundled, its fragments will + /// not be sent to L1. + /// - **Failed Submissions:** If fragments of a bundled block were sent to L1 but failed, + /// they will not be retried. + /// + /// This approach effectively "gives up" on blocks that fall outside the defined window. + pub block_height_lookback: u32, + + /// Compression level used for compressing block data before submission. + /// + /// Compression is applied to the blocks to minimize the transaction size. Valid values are: + /// - `"disabled"`: No compression is applied. + /// - `"min"` to `"max"`: Compression levels where higher numbers indicate more aggressive compression. + pub compression_level: CompressionLevel, } fn human_readable_duration<'de, D>(deserializer: D) -> Result @@ -92,6 +190,7 @@ pub struct Internal { pub between_eth_event_stream_restablishing_attempts: Duration, pub eth_errors_before_unhealthy: usize, pub balance_update_interval: Duration, + pub new_bundle_check_interval: Duration, } impl Default for Internal { @@ -101,6 +200,7 @@ impl Default for Internal { between_eth_event_stream_restablishing_attempts: Duration::from_secs(3), eth_errors_before_unhealthy: 3, balance_update_interval: Duration::from_secs(10), + new_bundle_check_interval: Duration::from_secs(10), } } } @@ -109,19 +209,20 @@ impl Default for Internal { name = "fuel-block-committer", version, about, - propagate_version = true, - arg_required_else_help(true) + propagate_version = true )] struct Cli { - #[arg(value_name = "FILE", help = "Path to the configuration file")] - config_path: PathBuf, + #[arg( + value_name = "FILE", + help = "Used to be the path to the configuration, unused currently until helm charts are updated." + )] + config_path: Option, } pub fn parse() -> crate::errors::Result { - let cli = Cli::parse(); + let _ = Cli::parse(); let config = config::Config::builder() - .add_source(config::File::from(cli.config_path)) .add_source(config::Environment::with_prefix("COMMITTER").separator("__")) .build()?; diff --git a/committer/src/main.rs b/committer/src/main.rs index b93f97ab..cb8abbdb 100644 --- a/committer/src/main.rs +++ b/committer/src/main.rs @@ -16,7 +16,7 @@ pub type L1 = eth::WebsocketClient; pub type AwsClient = eth::AwsClient; pub type Database = storage::Postgres; pub type FuelApi = fuel::HttpClient; -pub type Validator = validator::BlockValidator; +pub type Validator = services::BlockValidator; #[tokio::main] async fn main() -> Result<()> { @@ -80,7 +80,16 @@ async fn main() -> Result<()> { // If the blob pool wallet key is set, we need to start // the state committer and state importer if config.eth.blob_pool_key_arn.is_some() { + let block_bundler = setup::block_bundler( + fuel_adapter.clone(), + storage.clone(), + cancel_token.clone(), + &config, + &internal_config, + ); + let state_committer_handle = setup::state_committer( + fuel_adapter.clone(), ethereum_rpc.clone(), storage.clone(), cancel_token.clone(), @@ -88,7 +97,7 @@ async fn main() -> Result<()> { ); let state_importer_handle = - setup::state_importer(fuel_adapter, storage.clone(), cancel_token.clone(), &config); + setup::block_importer(fuel_adapter, storage.clone(), cancel_token.clone(), &config); let state_listener_handle = setup::state_listener( ethereum_rpc, @@ -100,6 +109,7 @@ async fn main() -> Result<()> { handles.push(state_committer_handle); handles.push(state_importer_handle); + handles.push(block_bundler); handles.push(state_listener_handle); } diff --git a/committer/src/setup.rs b/committer/src/setup.rs index f606b46e..1d3028d7 100644 --- a/committer/src/setup.rs +++ b/committer/src/setup.rs @@ -1,13 +1,15 @@ use std::{num::NonZeroU32, time::Duration}; -use eth::AwsConfig; +use clock::SystemClock; +use eth::{AwsConfig, Eip4844BlobEncoder}; use metrics::{prometheus::Registry, HealthChecker, RegistersMetrics}; -use ports::storage::Storage; -use services::{BlockCommitter, CommitListener, Runner, WalletBalanceTracker}; +use services::{ + BlockBundler, BlockBundlerConfig, BlockCommitter, BlockValidator, CommitListener, Runner, + WalletBalanceTracker, +}; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tracing::{error, info}; -use validator::BlockValidator; use crate::{config, errors::Result, AwsClient, Database, FuelApi, L1}; @@ -50,7 +52,7 @@ pub fn l1_event_listener( pub fn block_committer( commit_interval: NonZeroU32, l1: L1, - storage: impl Storage + 'static, + storage: Database, fuel: FuelApi, config: &config::Config, registry: &Registry, @@ -70,34 +72,87 @@ pub fn block_committer( ) } +pub fn block_bundler( + fuel: FuelApi, + storage: Database, + cancel_token: CancellationToken, + config: &config::Config, + internal_config: &config::Internal, +) -> tokio::task::JoinHandle<()> { + let bundler_factory = services::BundlerFactory::new( + Eip4844BlobEncoder, + config.app.bundle.compression_level, + config.app.bundle.optimization_step, + ); + + let block_bundler = BlockBundler::new( + fuel, + storage, + SystemClock, + bundler_factory, + BlockBundlerConfig { + optimization_time_limit: config.app.bundle.optimization_timeout, + block_accumulation_time_limit: config.app.bundle.accumulation_timeout, + num_blocks_to_accumulate: config.app.bundle.blocks_to_accumulate, + lookback_window: config.app.bundle.block_height_lookback, + max_bundles_per_optimization_run: num_cpus::get() + .try_into() + .expect("num cpus not zero"), + }, + ); + + schedule_polling( + internal_config.new_bundle_check_interval, + block_bundler, + "Block Bundler", + cancel_token, + ) +} + pub fn state_committer( + fuel: FuelApi, l1: L1, - storage: impl Storage + 'static, + storage: Database, cancel_token: CancellationToken, config: &config::Config, ) -> tokio::task::JoinHandle<()> { - let state_committer = services::StateCommitter::new(l1, storage); + let state_committer = services::StateCommitter::new( + l1, + fuel, + storage, + services::StateCommitterConfig { + lookback_window: config.app.bundle.block_height_lookback, + fragment_accumulation_timeout: config.app.bundle.fragment_accumulation_timeout, + fragments_to_accumulate: config.app.bundle.fragments_to_accumulate, + }, + SystemClock, + ); schedule_polling( - config.app.block_check_interval, + config.app.tx_finalization_check_interval, state_committer, "State Committer", cancel_token, ) } -pub fn state_importer( +pub fn block_importer( fuel: FuelApi, - storage: impl Storage + 'static, + storage: Database, cancel_token: CancellationToken, config: &config::Config, ) -> tokio::task::JoinHandle<()> { let validator = BlockValidator::new(*config.fuel.block_producer_address); - let state_importer = services::StateImporter::new(storage, fuel, validator); + let block_importer = services::BlockImporter::new( + storage, + fuel, + validator, + config.app.bundle.block_height_lookback, + ); schedule_polling( config.app.block_check_interval, - state_importer, + block_importer, "State Importer", cancel_token, ) @@ -105,13 +160,17 @@ pub fn state_importer( pub fn state_listener( l1: L1, - storage: impl Storage + 'static, + storage: Database, cancel_token: CancellationToken, registry: &Registry, config: &config::Config, ) -> tokio::task::JoinHandle<()> { - let state_listener = - services::StateListener::new(l1, storage, config.app.num_blocks_to_finalize_tx); + let state_listener = services::StateListener::new( + l1, + storage, + config.app.num_blocks_to_finalize_tx, + SystemClock, + ); state_listener.register_metrics(registry); @@ -139,6 +198,7 @@ pub async fn l1_adapter( config.eth.blob_pool_key_arn.clone(), internal_config.eth_errors_before_unhealthy, aws_client, + config.eth.first_tx_gas_estimation_multiplier, ) .await?; @@ -180,6 +240,7 @@ pub fn fuel_adapter( let fuel_adapter = FuelApi::new( &config.fuel.graphql_endpoint, internal_config.fuel_errors_before_unhealthy, + config.fuel.max_full_blocks_per_request, ); fuel_adapter.register_metrics(registry); diff --git a/configurations/development/config.toml b/configurations/development/config.toml deleted file mode 100644 index 9a7c5731..00000000 --- a/configurations/development/config.toml +++ /dev/null @@ -1,22 +0,0 @@ -[eth] -state_contract_address = "0xDc64a140Aa3E981100a9becA4E685f962f0cF6C9" -rpc = "ws://localhost:8545" - -[fuel] -graphql_endpoint = "http://localhost:4000" -block_producer_public_key = "0x73dc6cc8cc0041e4924954b35a71a22ccb520664c522198a6d31dc6c945347bb854a39382d296ec64c70d7cea1db75601595e29729f3fbdc7ee9dae66705beb4" - -[app] -port = 8080 -host = "0.0.0.0" -block_check_interval = "1s" -num_blocks_to_finalize_tx = "3" - -[app.db] -host = "localhost" -port = 5432 -username = "username" -password = "password" -database = "test" -max_connections = 5 -use_ssl = false diff --git a/e2e/Cargo.toml b/e2e/Cargo.toml index b6ac0a9e..9edce3c5 100644 --- a/e2e/Cargo.toml +++ b/e2e/Cargo.toml @@ -25,31 +25,29 @@ walkdir = { workspace = true } zip = { workspace = true, features = ["deflate"] } [dev-dependencies] -fs_extra = { workspace = true } alloy = { workspace = true, features = [ "signer-aws", "signer-mnemonic", "serde", ] } anyhow = { workspace = true, features = ["std"] } -aws-sdk-kms = { workspace = true, features = ["rustls"] } aws-config = { workspace = true, features = ["rustls"] } +aws-sdk-kms = { workspace = true, features = ["rustls"] } eth = { workspace = true, features = ["test-helpers"] } +fs_extra = { workspace = true } fuel = { workspace = true, features = ["test-helpers"] } fuel-core-chain-config = { workspace = true, features = [ "std", "test-helpers", ] } fuel-core-types = { workspace = true } -futures-util = { workspace = true } +futures = { workspace = true } hex = { workspace = true } +itertools = { workspace = true, features = ["use_alloc"] } portpicker = { workspace = true } ports = { workspace = true, features = ["fuel", "l1"] } rand = { workspace = true, features = ["std"] } reqwest = { workspace = true } -# `rustls` must be used because `ethers` enables it, and it cannot be enabled -# simultaneously with `native-tls`. Since we cannot configure this within -# `ethers`, we must also use `rustls`. secp256k1 = { workspace = true, features = ["rand-std"] } serde = { workspace = true } serde_json = { workspace = true } @@ -63,4 +61,3 @@ tokio = { workspace = true, features = [ "fs", ] } url = { workspace = true } -validator = { workspace = true, features = ["validator"] } diff --git a/e2e/src/committer.rs b/e2e/src/committer.rs index a480fbc7..812bcce5 100644 --- a/e2e/src/committer.rs +++ b/e2e/src/committer.rs @@ -16,13 +16,18 @@ pub struct Committer { db_port: Option, db_name: Option, kms_url: Option, + bundle_accumulation_timeout: Option, + bundle_blocks_to_accumulate: Option, + bundle_optimization_step: Option, + bundle_optimization_timeout: Option, + bundle_block_height_lookback: Option, + bundle_compression_level: Option, + bundle_fragments_to_accumulate: Option, + bundle_fragment_accumulation_timeout: Option, } impl Committer { pub async fn start(self) -> anyhow::Result { - let config = - Path::new(env!("CARGO_MANIFEST_DIR")).join("../configurations/development/config.toml"); - macro_rules! get_field { ($field:ident) => { self.$field @@ -34,8 +39,11 @@ impl Committer { let kms_url = get_field!(kms_url); let mut cmd = tokio::process::Command::new("fuel-block-committer"); - cmd.arg(config) - .env("E2E_TEST_AWS_ENDPOINT", kms_url) + + let db_port = get_field!(db_port); + let db_name = get_field!(db_name); + + cmd.env("E2E_TEST_AWS_ENDPOINT", kms_url) .env("AWS_REGION", "us-east-1") .env("AWS_ACCESS_KEY_ID", "test") .env("AWS_SECRET_ACCESS_KEY", "test") @@ -53,9 +61,51 @@ impl Committer { "COMMITTER__FUEL__BLOCK_PRODUCER_ADDRESS", get_field!(fuel_block_producer_addr), ) - .env("COMMITTER__APP__DB__PORT", get_field!(db_port).to_string()) - .env("COMMITTER__APP__DB__DATABASE", get_field!(db_name)) + .env("COMMITTER__FUEL__MAX_FULL_BLOCKS_PER_REQUEST", "100") + .env("COMMITTER__APP__DB__PORT", db_port.to_string()) + .env("COMMITTER__APP__DB__HOST", "localhost") + .env("COMMITTER__APP__DB__USERNAME", "username") + .env("COMMITTER__APP__DB__PASSWORD", "password") + .env("COMMITTER__APP__DB__MAX_CONNECTIONS", "10") + .env("COMMITTER__APP__DB__USE_SSL", "false") + .env("COMMITTER__APP__DB__DATABASE", &db_name) .env("COMMITTER__APP__PORT", unused_port.to_string()) + .env("COMMITTER__APP__HOST", "127.0.0.1") + .env("COMMITTER__APP__BLOCK_CHECK_INTERVAL", "5s") + .env("COMMITTER__APP__TX_FINALIZATION_CHECK_INTERVAL", "5s") + .env("COMMITTER__APP__NUM_BLOCKS_TO_FINALIZE_TX", "3") + .env( + "COMMITTER__APP__BUNDLE__ACCUMULATION_TIMEOUT", + get_field!(bundle_accumulation_timeout), + ) + .env( + "COMMITTER__APP__BUNDLE__BLOCKS_TO_ACCUMULATE", + get_field!(bundle_blocks_to_accumulate), + ) + .env( + "COMMITTER__APP__BUNDLE__OPTIMIZATION_TIMEOUT", + get_field!(bundle_optimization_timeout), + ) + .env( + "COMMITTER__APP__BUNDLE__BLOCK_HEIGHT_LOOKBACK", + get_field!(bundle_block_height_lookback), + ) + .env( + "COMMITTER__APP__BUNDLE__COMPRESSION_LEVEL", + get_field!(bundle_compression_level), + ) + .env( + "COMMITTER__APP__BUNDLE__OPTIMIZATION_STEP", + get_field!(bundle_optimization_step), + ) + .env( + "COMMITTER__APP__BUNDLE__FRAGMENTS_TO_ACCUMULATE", + get_field!(bundle_fragments_to_accumulate), + ) + .env( + "COMMITTER__APP__BUNDLE__FRAGMENT_ACCUMULATION_TIMEOUT", + get_field!(bundle_fragment_accumulation_timeout), + ) .current_dir(Path::new(env!("CARGO_MANIFEST_DIR")).parent().unwrap()) .kill_on_drop(true); @@ -78,6 +128,46 @@ impl Committer { }) } + pub fn with_bundle_fragment_accumulation_timeout(mut self, timeout: String) -> Self { + self.bundle_fragment_accumulation_timeout = Some(timeout); + self + } + + pub fn with_bundle_fragments_to_accumulate(mut self, fragments: String) -> Self { + self.bundle_fragments_to_accumulate = Some(fragments); + self + } + + pub fn with_bundle_optimization_step(mut self, step: String) -> Self { + self.bundle_optimization_step = Some(step); + self + } + + pub fn with_bundle_accumulation_timeout(mut self, timeout: String) -> Self { + self.bundle_accumulation_timeout = Some(timeout); + self + } + + pub fn with_bundle_blocks_to_accumulate(mut self, blocks: String) -> Self { + self.bundle_blocks_to_accumulate = Some(blocks); + self + } + + pub fn with_bundle_optimization_timeout(mut self, timeout: String) -> Self { + self.bundle_optimization_timeout = Some(timeout); + self + } + + pub fn with_bundle_block_height_lookback(mut self, lookback: String) -> Self { + self.bundle_block_height_lookback = Some(lookback); + self + } + + pub fn with_bundle_compression_level(mut self, level: String) -> Self { + self.bundle_compression_level = Some(level); + self + } + pub fn with_main_key_arn(mut self, wallet_arn: String) -> Self { self.main_key_arn = Some(wallet_arn); self @@ -108,8 +198,8 @@ impl Committer { self } - pub fn with_fuel_block_producer_addr(mut self, fuel_block_producer_addr: [u8; 32]) -> Self { - self.fuel_block_producer_addr = Some(hex::encode(fuel_block_producer_addr)); + pub fn with_fuel_block_producer_addr(mut self, fuel_block_producer_addr: String) -> Self { + self.fuel_block_producer_addr = Some(fuel_block_producer_addr); self } @@ -148,27 +238,10 @@ impl CommitterProcess { Ok(()) } - pub async fn wait_for_committed_blob(&self) -> anyhow::Result<()> { - loop { - match self.fetch_latest_blob_block().await { - Ok(_) => break, - _ => { - tokio::time::sleep(Duration::from_secs(1)).await; - continue; - } - } - } - Ok(()) - } - async fn fetch_latest_committed_block(&self) -> anyhow::Result { self.fetch_metric_value("latest_committed_block").await } - async fn fetch_latest_blob_block(&self) -> anyhow::Result { - self.fetch_metric_value("last_eth_block_w_blob").await - } - async fn fetch_metric_value(&self, metric_name: &str) -> anyhow::Result { let response = reqwest::get(format!("http://localhost:{}/metrics", self.port)) .await? diff --git a/e2e/src/eth_node/state_contract.rs b/e2e/src/eth_node/state_contract.rs index d0d3f5c9..895a88fd 100644 --- a/e2e/src/eth_node/state_contract.rs +++ b/e2e/src/eth_node/state_contract.rs @@ -9,7 +9,7 @@ use alloy::{ }; use eth::{AwsClient, AwsConfig, WebsocketClient}; use fs_extra::dir::{copy, CopyOptions}; -use ports::types::{Address, ValidatedFuelBlock}; +use ports::{fuel::FuelBlock, types::Address}; use serde::Deserialize; use tokio::process::Command; use url::Url; @@ -27,7 +27,8 @@ impl DeployedContract { let aws_client = AwsClient::new(AwsConfig::for_testing(key.url).await).await; let chain_state_contract = - WebsocketClient::connect(url, address, key.id, blob_wallet, 5, aws_client).await?; + WebsocketClient::connect(url, address, key.id, blob_wallet, 5, aws_client, None) + .await?; Ok(Self { address, @@ -35,9 +36,9 @@ impl DeployedContract { }) } - pub async fn finalized(&self, block: ValidatedFuelBlock) -> anyhow::Result { + pub async fn finalized(&self, block: FuelBlock) -> anyhow::Result { self.chain_state_contract - .finalized(block) + .finalized(*block.id, block.header.height) .await .map_err(Into::into) } diff --git a/e2e/src/fuel_node.rs b/e2e/src/fuel_node.rs index 70a862ab..0018760c 100644 --- a/e2e/src/fuel_node.rs +++ b/e2e/src/fuel_node.rs @@ -1,16 +1,18 @@ -use std::{path::PathBuf, str::FromStr}; +use std::path::PathBuf; use fuel::HttpClient; use fuel_core_chain_config::{ - ChainConfig, ConsensusConfig, SnapshotWriter, StateConfig, TESTNET_WALLET_SECRETS, + ChainConfig, CoinConfig, ConsensusConfig, SnapshotWriter, StateConfig, }; use fuel_core_types::{ fuel_crypto::SecretKey as FuelSecretKey, fuel_tx::{AssetId, Finalizable, Input, Output, TransactionBuilder, TxPointer}, fuel_types::Address, - fuel_vm::SecretKey as FuelKey, }; +use futures::{stream, StreamExt}; +use itertools::Itertools; use ports::fuel::FuelPublicKey; +use rand::Rng; use url::Url; #[derive(Default, Debug)] @@ -22,6 +24,7 @@ pub struct FuelNodeProcess { _db_dir: tempfile::TempDir, _snapshot_dir: tempfile::TempDir, _child: tokio::process::Child, + wallet_keys: Vec, url: Url, public_key: FuelPublicKey, } @@ -30,21 +33,47 @@ impl FuelNode { fn create_state_config( path: impl Into, consensus_key: &FuelPublicKey, - ) -> anyhow::Result<()> { + num_wallets: usize, + ) -> anyhow::Result> { let chain_config = ChainConfig { consensus: ConsensusConfig::PoA { signing_key: Input::owner(consensus_key), }, ..ChainConfig::local_testnet() }; - let state_config = StateConfig::local_testnet(); + + let mut rng = &mut rand::thread_rng(); + let keys = std::iter::repeat_with(|| FuelSecretKey::random(&mut rng)) + .take(num_wallets) + .collect_vec(); + + let coins = keys + .iter() + .flat_map(|key| { + std::iter::repeat_with(|| CoinConfig { + owner: Input::owner(&key.public_key()), + amount: u64::MAX, + asset_id: AssetId::zeroed(), + tx_id: rng.gen(), + output_index: rng.gen(), + ..Default::default() + }) + .take(10) + .collect_vec() + }) + .collect_vec(); + + let state_config = StateConfig { + coins, + ..StateConfig::local_testnet() + }; let snapshot = SnapshotWriter::json(path); snapshot .write_state_config(state_config, &chain_config) .map_err(|_| anyhow::anyhow!("Failed to write state config"))?; - Ok(()) + Ok(keys) } pub async fn start(&self) -> anyhow::Result { @@ -58,9 +87,18 @@ impl FuelNode { let public_key = secret_key.public_key(); let snapshot_dir = tempfile::tempdir()?; - Self::create_state_config(snapshot_dir.path(), &public_key)?; + let wallet_keys = Self::create_state_config(snapshot_dir.path(), &public_key, 1000)?; + // This ensures forward compatibility when running against a newer node with a different native executor version. + // If the node detects our older version in the chain configuration, it defaults to using the wasm executor. + // However, since we don't include a wasm executor, this would lead to code loading failure and a node crash. + // To prevent this, we force the node to use our version number to refer to its native executor. + let executor_version = fuel_core_types::blockchain::header::LATEST_STATE_TRANSITION_VERSION; + + // The lower limit for 100 Full blocks is somewhere between 400k and 500k + let gql_complexity = "--graphql-max-complexity=500000"; cmd.arg("run") + .arg(gql_complexity) .arg("--port") .arg(unused_port.to_string()) .arg("--snapshot") @@ -68,6 +106,7 @@ impl FuelNode { .arg("--db-path") .arg(db_dir.path()) .arg("--debug") + .arg(format!("--native-executor-version={executor_version}")) .env("CONSENSUS_KEY_SECRET", format!("{}", secret_key)) .kill_on_drop(true) .stdin(std::process::Stdio::null()); @@ -89,6 +128,7 @@ impl FuelNode { url, public_key, _snapshot_dir: snapshot_dir, + wallet_keys, }; process.wait_until_healthy().await; @@ -104,20 +144,39 @@ impl FuelNode { impl FuelNodeProcess { pub fn client(&self) -> HttpClient { - HttpClient::new(&self.url, 5) + HttpClient::new(&self.url, 5, 100.try_into().unwrap()) } - pub async fn produce_transaction(&self) -> anyhow::Result<()> { + pub async fn produce_transactions(&self, amount: usize) -> anyhow::Result<()> { + let num_wallets = self.wallet_keys.len(); + + let keys = self + .wallet_keys + .iter() + .cloned() + .cycle() + .take(amount) + .collect_vec(); + + stream::iter(keys) + .map(|key| async move { Self::send_transfer_tx(self.client(), key).await }) + .buffered(num_wallets) + .for_each(|_| async {}) + .await; + + Ok(()) + } + + async fn send_transfer_tx(client: HttpClient, key: FuelSecretKey) -> anyhow::Result<()> { let mut tx = TransactionBuilder::script(vec![], vec![]); tx.script_gas_limit(1_000_000); - let secret = TESTNET_WALLET_SECRETS[0]; - let secret_key = FuelKey::from_str(secret).expect("valid secret key"); + let secret_key = key; let address = Input::owner(&secret_key.public_key()); let base_asset = AssetId::zeroed(); - let coin = self.client().get_coin(address, base_asset).await?; + let coin = client.get_coin(address, base_asset).await?; tx.add_unsigned_coin_input( secret_key, @@ -141,7 +200,8 @@ impl FuelNodeProcess { }); let tx = tx.finalize(); - self.client().send_tx(&tx.into()).await?; + + client.send_tx(&tx.into()).await?; Ok(()) } diff --git a/e2e/src/lib.rs b/e2e/src/lib.rs index b2de5ef6..9e2f253a 100644 --- a/e2e/src/lib.rs +++ b/e2e/src/lib.rs @@ -11,20 +11,19 @@ mod whole_stack; #[cfg(test)] mod tests { + use std::time::Duration; + use anyhow::Result; - use ports::fuel::Api; + use ports::storage::Storage; use tokio::time::sleep_until; - use validator::{BlockValidator, Validator}; - use crate::whole_stack::WholeStack; + use crate::whole_stack::{FuelNodeType, WholeStack}; #[tokio::test(flavor = "multi_thread")] async fn submitted_correct_block_and_was_finalized() -> Result<()> { // given let show_logs = false; - // blob support disabled because this test doesn't generate blocks with transactions in it - // so there is no data to blobify - let blob_support = false; + let blob_support = true; let stack = WholeStack::deploy_default(show_logs, blob_support).await?; // when @@ -45,10 +44,7 @@ mod tests { let latest_block = stack.fuel_node.client().latest_block().await?; - let validated_block = BlockValidator::new(*stack.fuel_node.consensus_pub_key().hash()) - .validate(&latest_block)?; - - assert!(stack.deployed_contract.finalized(validated_block).await?); + assert!(stack.deployed_contract.finalized(latest_block).await?); Ok(()) } @@ -60,12 +56,62 @@ mod tests { let blob_support = true; let stack = WholeStack::deploy_default(show_logs, blob_support).await?; + let num_iterations = 10; + let blocks_per_iteration = 100; + // when - stack.fuel_node.produce_transaction().await?; - stack.fuel_node.client().produce_blocks(1).await?; + for _ in 0..num_iterations { + let FuelNodeType::Local(node) = &stack.fuel_node else { + panic!("Expected local fuel node"); + }; + + node.produce_transactions(100).await?; + + let _ = stack + .fuel_node + .client() + .produce_blocks(blocks_per_iteration) + .await; + } // then - stack.committer.wait_for_committed_blob().await?; + let state_submitting_finished = || async { + let finished = stack + .db + .lowest_sequence_of_unbundled_blocks(0, 1) + .await? + .is_none() + && stack + .db + .oldest_nonfinalized_fragments(0, 1) + .await? + .is_empty() + && !stack.db.has_pending_txs().await? + && stack + .db + .missing_blocks(0, num_iterations * blocks_per_iteration) + .await? + .is_empty(); + + anyhow::Result::<_>::Ok(finished) + }; + + while !state_submitting_finished().await? { + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + } + + Ok(()) + } + + #[ignore = "meant for running manually and tweaking configuration parameters"] + #[tokio::test(flavor = "multi_thread")] + async fn connecting_to_testnet() -> Result<()> { + // given + let show_logs = false; + let blob_support = true; + let _stack = WholeStack::connect_to_testnet(show_logs, blob_support).await?; + + tokio::time::sleep(Duration::from_secs(10000)).await; Ok(()) } diff --git a/e2e/src/whole_stack.rs b/e2e/src/whole_stack.rs index c67b39d7..78b85320 100644 --- a/e2e/src/whole_stack.rs +++ b/e2e/src/whole_stack.rs @@ -1,6 +1,8 @@ -use std::{sync::Arc, time::Duration}; +use std::time::Duration; -use storage::{Postgres, PostgresProcess}; +use fuel::HttpClient; +use storage::DbWithProcess; +use url::Url; use crate::{ committer::{Committer, CommitterProcess}, @@ -9,12 +11,46 @@ use crate::{ kms::{Kms, KmsKey, KmsProcess}, }; +pub enum FuelNodeType { + Local(FuelNodeProcess), + Testnet { + url: Url, + block_producer_addr: String, + }, +} + +impl FuelNodeType { + pub fn url(&self) -> Url { + match self { + FuelNodeType::Local(fuel_node) => fuel_node.url().clone(), + FuelNodeType::Testnet { url, .. } => url.clone(), + } + } + pub fn block_producer_addr(&self) -> String { + match self { + FuelNodeType::Local(fuel_node) => hex::encode(fuel_node.consensus_pub_key().hash()), + FuelNodeType::Testnet { + block_producer_addr, + .. + } => block_producer_addr.clone(), + } + } + pub fn client(&self) -> HttpClient { + match self { + FuelNodeType::Local(fuel_node) => fuel_node.client(), + FuelNodeType::Testnet { .. } => { + HttpClient::new(&self.url(), 10, 100.try_into().unwrap()) + } + } + } +} + #[allow(dead_code)] pub struct WholeStack { pub eth_node: EthNodeProcess, - pub fuel_node: FuelNodeProcess, + pub fuel_node: FuelNodeType, pub committer: CommitterProcess, - pub db: Arc, + pub db: DbWithProcess, pub deployed_contract: DeployedContract, pub contract_args: ContractArgs, pub kms: KmsProcess, @@ -29,16 +65,17 @@ impl WholeStack { let (contract_args, deployed_contract) = deploy_contract(ð_node, &main_key).await?; - let fuel_node = start_fuel_node(logs).await?; + let fuel_node = FuelNodeType::Local(start_fuel_node(logs).await?); - let (db_process, db) = start_db().await?; + let db = start_db().await?; let committer = start_committer( logs, blob_support, - db, + db.clone(), ð_node, - &fuel_node, + fuel_node.url(), + fuel_node.block_producer_addr(), &deployed_contract, &main_key, &secondary_key, @@ -49,7 +86,62 @@ impl WholeStack { eth_node, fuel_node, committer, - db: db_process, + db, + deployed_contract, + contract_args, + kms, + }) + } + + pub async fn connect_to_testnet(logs: bool, blob_support: bool) -> anyhow::Result { + let kms = start_kms(logs).await?; + + let eth_node = start_eth(logs).await?; + let (main_key, secondary_key) = create_and_fund_kms_keys(&kms, ð_node).await?; + + let (contract_args, deployed_contract) = deploy_contract(ð_node, &main_key).await?; + + let fuel_node = FuelNodeType::Testnet { + url: "https://testnet.fuel.network/v1/graphql".parse().unwrap(), + block_producer_addr: "d9173046b109cc24dfa1099d3c48d8b8b810e3279344cfc3d2bd13149e18c402" + .to_owned(), + }; + + let db = start_db().await?; + + let committer = { + let committer_builder = Committer::default() + .with_show_logs(true) + .with_eth_rpc((eth_node).ws_url().clone()) + .with_fuel_rpc(fuel_node.url()) + .with_db_port(db.port()) + .with_db_name(db.db_name()) + .with_state_contract_address(deployed_contract.address()) + .with_fuel_block_producer_addr(fuel_node.block_producer_addr()) + .with_main_key_arn(main_key.id.clone()) + .with_kms_url(main_key.url.clone()) + .with_bundle_accumulation_timeout("3600s".to_owned()) + .with_bundle_blocks_to_accumulate("3600".to_string()) + .with_bundle_optimization_timeout("60s".to_owned()) + .with_bundle_block_height_lookback("8500".to_owned()) + .with_bundle_optimization_step("100".to_owned()) + .with_bundle_fragments_to_accumulate("3".to_owned()) + .with_bundle_fragment_accumulation_timeout("10m".to_owned()) + .with_bundle_compression_level("level6".to_owned()); + + let committer = if blob_support { + committer_builder.with_blob_key_arn(secondary_key.id.clone()) + } else { + committer_builder + }; + committer.start().await? + }; + + Ok(WholeStack { + eth_node, + fuel_node, + committer, + db, deployed_contract, contract_args, kms, @@ -101,20 +193,22 @@ async fn start_fuel_node(logs: bool) -> anyhow::Result { FuelNode::default().with_show_logs(logs).start().await } -async fn start_db() -> anyhow::Result<(Arc, Postgres)> { - let db_process = storage::PostgresProcess::shared().await?; - let random_db = db_process.create_random_db().await?; - - Ok((db_process, random_db)) +async fn start_db() -> anyhow::Result { + storage::PostgresProcess::shared() + .await? + .create_random_db() + .await + .map_err(|e| anyhow::anyhow!("{e}")) } #[allow(clippy::too_many_arguments)] async fn start_committer( logs: bool, blob_support: bool, - random_db: Postgres, + random_db: DbWithProcess, eth_node: &EthNodeProcess, - fuel_node: &FuelNodeProcess, + fuel_node_url: Url, + fuel_node_consensus_pub_key: String, deployed_contract: &DeployedContract, main_key: &KmsKey, secondary_key: &KmsKey, @@ -122,13 +216,21 @@ async fn start_committer( let committer_builder = Committer::default() .with_show_logs(logs) .with_eth_rpc((eth_node).ws_url().clone()) - .with_fuel_rpc(fuel_node.url().clone()) + .with_fuel_rpc(fuel_node_url) .with_db_port(random_db.port()) .with_db_name(random_db.db_name()) .with_state_contract_address(deployed_contract.address()) - .with_fuel_block_producer_addr(*fuel_node.consensus_pub_key().hash()) + .with_fuel_block_producer_addr(fuel_node_consensus_pub_key) .with_main_key_arn(main_key.id.clone()) - .with_kms_url(main_key.url.clone()); + .with_kms_url(main_key.url.clone()) + .with_bundle_accumulation_timeout("5s".to_owned()) + .with_bundle_blocks_to_accumulate("400".to_string()) + .with_bundle_optimization_timeout("5s".to_owned()) + .with_bundle_block_height_lookback("20000".to_owned()) + .with_bundle_fragments_to_accumulate("3".to_owned()) + .with_bundle_fragment_accumulation_timeout("5s".to_owned()) + .with_bundle_optimization_step("100".to_owned()) + .with_bundle_compression_level("level6".to_owned()); let committer = if blob_support { committer_builder.with_blob_key_arn(secondary_key.id.clone()) diff --git a/helm/fuel-block-committer/Chart.yaml b/helm/fuel-block-committer/Chart.yaml index 3e25e7da..20b296e2 100644 --- a/helm/fuel-block-committer/Chart.yaml +++ b/helm/fuel-block-committer/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.6.0 +version: 0.9.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.6.0" +appVersion: "0.9.0" diff --git a/helm/fuel-block-committer/templates/deployment.yaml b/helm/fuel-block-committer/templates/deployment.yaml index 1299e602..faa9a31d 100644 --- a/helm/fuel-block-committer/templates/deployment.yaml +++ b/helm/fuel-block-committer/templates/deployment.yaml @@ -36,8 +36,6 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} command: [ "./fuel-block-committer" ] - args: - - "/mnt/config/config.toml" envFrom: - configMapRef: name: fuel-block-committer @@ -54,19 +52,8 @@ spec: initialDelaySeconds: 10 periodSeconds: 5 timeoutSeconds: 10 - volumeMounts: - - name: config-volume - mountPath: /mnt/config/config.toml - subPath: config.toml resources: {{- toYaml .Values.resources | nindent 12 }} - volumes: - - name: config-volume - configMap: - name: {{ include "fuel-block-committer.fullname" . }}-config - items: - - key: "committer-config" - path: "config.toml" {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} @@ -78,4 +65,4 @@ spec: {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} - {{- end }} \ No newline at end of file + {{- end }} diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml new file mode 100644 index 00000000..4e56ac6f --- /dev/null +++ b/packages/clock/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "clock" +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +repository = { workspace = true } +version = { workspace = true } +publish = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +ports = { workspace = true, features = ["clock"] } + +[dev-dependencies] +clock = { workspace = true, features = ["test-helpers"] } +tokio = { workspace = true, features = ["macros", "rt"] } + +[features] +test-helpers = [] diff --git a/packages/clock/src/lib.rs b/packages/clock/src/lib.rs new file mode 100644 index 00000000..79c0af2d --- /dev/null +++ b/packages/clock/src/lib.rs @@ -0,0 +1,82 @@ +use ports::{ + clock::Clock, + types::{DateTime, Utc}, +}; + +pub struct SystemClock; + +impl Clock for SystemClock { + fn now(&self) -> DateTime { + Utc::now() + } +} + +#[cfg(feature = "test-helpers")] +mod test_helpers { + use std::{ + sync::{atomic::AtomicI64, Arc}, + time::Duration, + }; + + use ports::{ + clock::Clock, + types::{DateTime, Utc}, + }; + + #[derive(Default, Clone)] + pub struct TestClock { + epoch_millis: Arc, + } + + impl TestClock { + pub fn advance_time(&self, adv: Duration) { + let new_time = self.now() + adv; + self.epoch_millis.store( + new_time.timestamp_millis(), + std::sync::atomic::Ordering::Relaxed, + ) + } + pub fn set_time(&self, new_time: DateTime) { + self.epoch_millis.store( + new_time.timestamp_millis(), + std::sync::atomic::Ordering::Relaxed, + ) + } + } + + impl Clock for TestClock { + fn now(&self) -> ports::types::DateTime { + DateTime::::from_timestamp_millis( + self.epoch_millis.load(std::sync::atomic::Ordering::Relaxed), + ) + .expect("DateTime to be in range") + } + } +} + +#[cfg(feature = "test-helpers")] +pub use test_helpers::TestClock; + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use ports::clock::Clock; + + use crate::TestClock; + + #[tokio::test] + async fn can_advance_clock() { + // given + let test_clock = TestClock::default(); + let starting_time = test_clock.now(); + let adv = Duration::from_secs(1); + + // when + test_clock.advance_time(adv); + + // then + let new_time = starting_time + adv; + assert_eq!(test_clock.now(), new_time); + } +} diff --git a/packages/eth/Cargo.toml b/packages/eth/Cargo.toml index 1040096c..5bcd2af5 100644 --- a/packages/eth/Cargo.toml +++ b/packages/eth/Cargo.toml @@ -10,21 +10,24 @@ publish = { workspace = true } rust-version = { workspace = true } [dependencies] +serde = { workspace = true } +c-kzg = { workspace = true } alloy = { workspace = true, features = [ - "consensus", - "network", - "provider-ws", - "kzg", - "contract", - "signer-aws", - "rpc-types", - "reqwest-rustls-tls", + "consensus", + "network", + "provider-ws", + "kzg", + "contract", + "signer-aws", + "rpc-types", + "reqwest-rustls-tls", ] } async-trait = { workspace = true } aws-config = { workspace = true, features = ["default"] } aws-sdk-kms = { workspace = true, features = ["default"] } -c-kzg = { workspace = true } +delegate = { workspace = true } futures = { workspace = true } +itertools = { workspace = true, features = ["use_alloc"] } metrics = { workspace = true } ports = { workspace = true, features = ["l1"] } thiserror = { workspace = true } @@ -32,9 +35,13 @@ tracing = { workspace = true } url = { workspace = true } [dev-dependencies] +pretty_assertions = { workspace = true, features = ["default"] } mockall = { workspace = true } ports = { workspace = true, features = ["l1", "test-helpers"] } +rand = { workspace = true, features = ["small_rng"] } +test-case = { workspace = true } tokio = { workspace = true, features = ["macros"] } +rayon = { workspace = true } [features] test-helpers = [] diff --git a/packages/eth/src/blob_encoding.rs b/packages/eth/src/blob_encoding.rs new file mode 100644 index 00000000..556222c9 --- /dev/null +++ b/packages/eth/src/blob_encoding.rs @@ -0,0 +1,4 @@ +mod copied_from_alloy; +mod encoder; + +pub use encoder::Eip4844BlobEncoder; diff --git a/packages/eth/src/blob_encoding/copied_from_alloy.rs b/packages/eth/src/blob_encoding/copied_from_alloy.rs new file mode 100644 index 00000000..ccfcf3fe --- /dev/null +++ b/packages/eth/src/blob_encoding/copied_from_alloy.rs @@ -0,0 +1,384 @@ +use std::cmp; + +use alloy::{ + consensus::{utils::WholeFe, BlobTransactionSidecar, Bytes48, EnvKzgSettings}, + eips::eip4844::{Blob, BYTES_PER_BLOB, FIELD_ELEMENTS_PER_BLOB}, +}; +use c_kzg::{KzgCommitment, KzgProof}; + +/// A builder for creating a [`BlobTransactionSidecar`]. +/// +/// [`BlobTransactionSidecar`]: crate::eip4844::BlobTransactionSidecar +#[derive(Clone, Debug)] +pub struct PartialSidecar { + /// The blobs in the sidecar. + blobs: Vec, + /// The number of field elements that we have ingested, total. + fe: usize, +} + +impl Default for PartialSidecar { + fn default() -> Self { + Self::new() + } +} + +#[allow(dead_code)] +impl PartialSidecar { + /// Create a new builder, and push an empty blob to it. This is the default + /// constructor, and allocates space for 2 blobs (256 KiB). If you want to + /// preallocate a specific number of blobs, use + /// [`PartialSidecar::with_capacity`]. + pub fn new() -> Self { + Self::with_capacity(2) + } + + /// Create a new builder, preallocating room for `capacity` blobs, and push + /// an empty blob to it. + pub fn with_capacity(capacity: usize) -> Self { + let mut blobs = Vec::with_capacity(capacity); + blobs.push(Blob::new([0u8; BYTES_PER_BLOB])); + Self { blobs, fe: 0 } + } + + /// Get the number of unused field elements that have been allocated + fn free_fe(&self) -> usize { + self.blobs.len() * FIELD_ELEMENTS_PER_BLOB as usize - self.fe + } + + /// Calculate the length of used field elements IN BYTES in the builder. + /// + /// This is always strictly greater than the number of bytes that have been + /// ingested. + pub const fn len(&self) -> usize { + self.fe * 32 + } + + /// Check if the builder is empty. + pub const fn is_empty(&self) -> bool { + self.fe == 0 + } + + /// Push an empty blob to the builder. + fn push_empty_blob(&mut self) { + self.blobs.push(Blob::new([0u8; BYTES_PER_BLOB])); + } + + /// Allocate enough space for the required number of new field elements. + pub fn alloc_fes(&mut self, required_fe: usize) { + while self.free_fe() < required_fe { + self.push_empty_blob() + } + } + + /// Get a mutable reference to the field element at the given index, in + /// the current blob. + fn fe_at_mut(&mut self, index: usize) -> &mut [u8] { + let target_blob = index / FIELD_ELEMENTS_PER_BLOB as usize; + let fe_offset_in_target_blob = index % FIELD_ELEMENTS_PER_BLOB as usize; + &mut self.blobs[target_blob] + [fe_offset_in_target_blob * 32..(fe_offset_in_target_blob + 1) * 32] + } + + /// Get a mutable reference to the next unused field element. + fn next_unused_fe_mut(&mut self) -> &mut [u8] { + self.fe_at_mut(self.fe) + } + + /// Ingest a partial FE into the current blobs. + /// + /// # Panics + /// + /// If the data is >=32 bytes. Or if there are not enough free FEs to + /// encode the data. + pub fn ingest_partial_fe(&mut self, data: &[u8]) { + let fe = self.next_unused_fe_mut(); + fe[1..1 + data.len()].copy_from_slice(data); + self.fe += 1; + } +} + +#[allow(dead_code)] +/// A strategy for coding and decoding data into sidecars. +/// +/// Coder instances are responsible for encoding and decoding data into and from the sidecar. They +/// are called by the [`SidecarBuilder`] during the [`ingest`], [`take`], and (if `c_kzg` feature +/// enabled) `build` methods. +/// +/// This trait allows different downstream users to use different bit-packing +/// strategies. For example, a simple coder might only use the last 31 bytes of +/// each blob, while a more complex coder might use a more sophisticated +/// strategy to pack data into the low 6 bits of the top byte. +/// +/// [`ingest`]: SidecarBuilder::ingest +/// [`take`]: SidecarBuilder::take +pub trait SidecarCoder { + /// Calculate the number of field elements required to store the given + /// data. + fn required_fe(&self, data: &[u8]) -> usize; + + /// Code a slice of data into the builder. + fn code(&mut self, builder: &mut PartialSidecar, data: &[u8]); + + /// Finish the sidecar, and commit to the data. This method should empty + /// any buffer or scratch space in the coder, and is called by + /// [`SidecarBuilder`]'s `take` and `build` methods. + fn finish(self, builder: &mut PartialSidecar); + + /// Decode all slices of data from the blobs. + fn decode_all(&mut self, blobs: &[Blob]) -> Option>>; +} + +/// Simple coder that only uses the last 31 bytes of each blob. This is the +/// default coder for the [`SidecarBuilder`]. +/// +/// # Note +/// +/// Because this coder sacrifices around 3% of total sidecar space, we do not +/// recommend its use in production. It is provided for convenience and +/// non-prod environments. +/// +/// # Behavior +/// +/// This coder encodes data as follows: +/// - The first byte of every 32-byte word is empty. +/// - Data is pre-pended with a 64-bit big-endian length prefix, which is right padded with zeros to +/// form a complete word. +/// - The rest of the data is packed into the remaining 31 bytes of each word. +/// - If the data is not a multiple of 31 bytes, the last word is right-padded with zeros. +/// +/// This means that the following regions cannot be used to store data, and are +/// considered "wasted": +/// +/// - The first byte of every 32-byte word. +/// - The right padding on the header word containing the data length. +/// - Any right padding on the last word for each piece of data. +#[derive(Clone, Copy, Debug, Default)] +#[non_exhaustive] +pub struct SimpleCoder; + +impl SimpleCoder { + /// Decode an some bytes from an iterator of valid FEs. + /// + /// Returns `Ok(Some(data))` if there is some data. + /// Returns `Ok(None)` if there is no data (length prefix is 0). + /// Returns `Err(())` if there is an error. + fn decode_one<'a>(mut fes: impl Iterator>) -> Result>, ()> { + let first = fes.next().ok_or(())?; + let mut num_bytes = u64::from_be_bytes(first.as_ref()[1..9].try_into().unwrap()) as usize; + + // if no more bytes is 0, we're done + if num_bytes == 0 { + return Ok(None); + } + + let mut res = Vec::with_capacity(num_bytes); + while num_bytes > 0 { + let to_copy = cmp::min(31, num_bytes); + let fe = fes.next().ok_or(())?; + res.extend_from_slice(&fe.as_ref()[1..1 + to_copy]); + num_bytes -= to_copy; + } + Ok(Some(res)) + } +} + +impl SidecarCoder for SimpleCoder { + fn required_fe(&self, data: &[u8]) -> usize { + data.len().div_ceil(31) + 1 + } + + fn code(&mut self, builder: &mut PartialSidecar, mut data: &[u8]) { + if data.is_empty() { + return; + } + + // first FE is the number of following bytes + builder.ingest_partial_fe(&(data.len() as u64).to_be_bytes()); + + // ingest the rest of the data + while !data.is_empty() { + let (left, right) = data.split_at(cmp::min(31, data.len())); + builder.ingest_partial_fe(left); + data = right + } + } + + /// No-op + fn finish(self, _builder: &mut PartialSidecar) {} + + fn decode_all(&mut self, blobs: &[Blob]) -> Option>> { + let mut fes = blobs + .iter() + .flat_map(|blob| blob.chunks(32).map(WholeFe::new)) + .map(Option::unwrap); + + let mut res = Vec::new(); + loop { + match Self::decode_one(&mut fes) { + Ok(Some(data)) => res.push(data), + Ok(None) => break, + Err(()) => return None, + } + } + Some(res) + } +} + +/// Build a [`BlobTransactionSidecar`] from an arbitrary amount of data. +/// +/// This is useful for creating a sidecar from a large amount of data, +/// which is then split into blobs. It delays KZG commitments and proofs +/// until all data is ready. +/// +/// [`BlobTransactionSidecar`]: crate::eip4844::BlobTransactionSidecar +#[derive(Clone, Debug)] +pub struct SidecarBuilder { + /// The blob array we will code data into + inner: PartialSidecar, + /// The coder to use for ingesting and decoding data. + coder: T, +} + +impl Default for SidecarBuilder +where + T: Default + SidecarCoder, +{ + fn default() -> Self { + Self::new() + } +} + +#[allow(dead_code)] +impl SidecarBuilder { + /// Instantiate a new builder and new coder instance. + /// + /// By default, this allocates space for 2 blobs (256 KiB). If you want to + /// preallocate a specific number of blobs, use + /// [`SidecarBuilder::with_capacity`]. + pub fn new() -> Self { + T::default().into() + } + + /// Create a new builder from a slice of data by calling + /// [`SidecarBuilder::from_coder_and_data`] + pub fn from_slice(data: &[u8]) -> Self { + Self::from_coder_and_data(T::default(), data) + } + + /// Create a new builder with a pre-allocated capacity. This capacity is + /// measured in blobs, each of which is 128 KiB. + pub fn with_capacity(capacity: usize) -> Self { + Self::from_coder_and_capacity(T::default(), capacity) + } +} + +#[allow(dead_code)] +impl SidecarBuilder { + /// Instantiate a new builder with the provided coder and capacity. This + /// capacity is measured in blobs, each of which is 128 KiB. + pub fn from_coder_and_capacity(coder: T, capacity: usize) -> Self { + Self { + inner: PartialSidecar::with_capacity(capacity), + coder, + } + } + + /// Calculate the length of bytes used by field elements in the builder. + /// + /// This is always strictly greater than the number of bytes that have been + /// ingested. + pub const fn len(&self) -> usize { + self.inner.len() + } + + /// Check if the builder is empty. + pub const fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + /// Create a new builder from a slice of data. + pub fn from_coder_and_data(coder: T, data: &[u8]) -> Self { + let required_fe = coder.required_fe(data); + let mut this = Self::from_coder_and_capacity( + coder, + required_fe.div_ceil(FIELD_ELEMENTS_PER_BLOB as usize), + ); + this.ingest(data); + this + } + + /// Ingest a slice of data into the builder. + pub fn ingest(&mut self, data: &[u8]) { + self.inner.alloc_fes(self.coder.required_fe(data)); + self.coder.code(&mut self.inner, data); + } + + /// Build the sidecar from the data with the provided settings. + pub fn build_with_settings( + self, + settings: &c_kzg::KzgSettings, + ) -> Result { + let mut commitments = Vec::with_capacity(self.inner.blobs.len()); + let mut proofs = Vec::with_capacity(self.inner.blobs.len()); + for blob in self.inner.blobs.iter() { + // SAFETY: same size + let blob = unsafe { core::mem::transmute::<&Blob, &c_kzg::Blob>(blob) }; + let commitment = KzgCommitment::blob_to_kzg_commitment(blob, settings)?; + let proof = KzgProof::compute_blob_kzg_proof(blob, &commitment.to_bytes(), settings)?; + + // SAFETY: same size + unsafe { + commitments.push(core::mem::transmute::( + commitment.to_bytes(), + )); + proofs.push(core::mem::transmute::( + proof.to_bytes(), + )); + } + } + + Ok(BlobTransactionSidecar::new( + self.inner.blobs, + commitments, + proofs, + )) + } + + /// Build the sidecar from the data, with default (Ethereum Mainnet) + /// settings. + pub fn build(self) -> Result { + self.build_with_settings(EnvKzgSettings::Default.get()) + } + + /// Take the blobs from the builder, without committing them to a KZG proof. + pub fn take(self) -> Vec { + self.inner.blobs + } +} + +impl From for SidecarBuilder { + /// Instantiate a new builder with the provided coder. + /// + /// This is equivalent to calling + /// [`SidecarBuilder::from_coder_and_capacity`] with a capacity of 1. + /// If you want to preallocate a specific number of blobs, use + /// [`SidecarBuilder::from_coder_and_capacity`]. + fn from(coder: T) -> Self { + Self::from_coder_and_capacity(coder, 1) + } +} + +impl FromIterator for SidecarBuilder +where + T: SidecarCoder + Default, + R: AsRef<[u8]>, +{ + fn from_iter>(iter: I) -> Self { + let mut this = Self::new(); + for data in iter { + this.ingest(data.as_ref()); + } + this + } +} diff --git a/packages/eth/src/blob_encoding/encoder.rs b/packages/eth/src/blob_encoding/encoder.rs new file mode 100644 index 00000000..f6e77c01 --- /dev/null +++ b/packages/eth/src/blob_encoding/encoder.rs @@ -0,0 +1,343 @@ +use std::num::NonZeroUsize; + +use alloy::{ + consensus::BlobTransactionSidecar, + eips::eip4844::{ + Blob, Bytes48, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_PROOF, DATA_GAS_PER_BLOB, + FIELD_ELEMENTS_PER_BLOB, + }, +}; +use itertools::{izip, Itertools}; +use ports::types::{CollectNonEmpty, Fragment, NonEmpty}; + +// Until the issue is fixed be careful that we use the `SidecarBuilder` and `SimpleCoder` from +// `copied_from_alloy`, there is a unit test that should protect against accidental import from the +// original location. +use super::copied_from_alloy::{SidecarBuilder, SimpleCoder}; + +#[derive(Debug, Clone, Copy)] +pub struct Eip4844BlobEncoder; + +impl Eip4844BlobEncoder { + #[cfg(feature = "test-helpers")] + pub const FRAGMENT_SIZE: usize = + FIELD_ELEMENTS_PER_BLOB as usize * alloy::eips::eip4844::FIELD_ELEMENT_BYTES as usize; + + pub(crate) fn decode( + fragments: impl IntoIterator, + ) -> crate::error::Result { + let fragments: Vec<_> = fragments + .into_iter() + .map(SingleBlob::decode) + .try_collect()?; + + Ok(merge_into_sidecar(fragments)) + } +} + +impl ports::l1::FragmentEncoder for Eip4844BlobEncoder { + fn encode(&self, data: NonEmpty) -> ports::l1::Result> { + let builder = SidecarBuilder::::from_coder_and_data( + SimpleCoder::default(), + &Vec::from(data), + ); + + let single_blobs = + split_sidecar(builder).map_err(|e| ports::l1::Error::Other(e.to_string()))?; + + Ok(single_blobs + .into_iter() + .map(|blob| blob.encode()) + .collect_nonempty() + .expect("cannot be empty")) + } + + fn gas_usage(&self, num_bytes: NonZeroUsize) -> u64 { + let num_bytes = u64::try_from(num_bytes.get()).unwrap_or(u64::MAX); + + // Taken from the SimpleCoder impl + let required_fe = num_bytes.div_ceil(31).saturating_add(1); + + let blob_num = required_fe.div_ceil(FIELD_ELEMENTS_PER_BLOB); + + blob_num.saturating_mul(DATA_GAS_PER_BLOB) + } +} + +struct SingleBlob { + // needs to be heap allocated because it's large enough to cause a stack overflow + blobs: Box, + commitment: Bytes48, + proof: Bytes48, + unused_bytes: u32, +} + +impl SingleBlob { + const SIZE: usize = BYTES_PER_BLOB + BYTES_PER_COMMITMENT + BYTES_PER_PROOF; + + fn decode(fragment: Fragment) -> crate::error::Result { + let data = Vec::from(fragment.data); + let bytes: &[u8; Self::SIZE] = data.as_slice().try_into().map_err(|_| { + crate::error::Error::Other(format!( + "Failed to decode blob: expected {} bytes, got {}", + Self::SIZE, + data.len() + )) + })?; + + let len_checked = "checked earlier that enough bytes are available"; + + let blobs = Box::new(bytes[..BYTES_PER_BLOB].try_into().expect(len_checked)); + let remaining_bytes = &bytes[BYTES_PER_BLOB..]; + + let commitment: [u8; BYTES_PER_COMMITMENT] = remaining_bytes[..BYTES_PER_COMMITMENT] + .try_into() + .expect(len_checked); + let remaining_bytes = &remaining_bytes[BYTES_PER_COMMITMENT..]; + + let proof: [u8; BYTES_PER_COMMITMENT] = remaining_bytes[..BYTES_PER_PROOF] + .try_into() + .expect(len_checked); + + Ok(Self { + blobs, + commitment: commitment.into(), + proof: proof.into(), + unused_bytes: fragment.unused_bytes, + }) + } + + fn encode(&self) -> Fragment { + let mut bytes = Vec::with_capacity(Self::SIZE); + bytes.extend_from_slice(self.blobs.as_slice()); + bytes.extend_from_slice(self.commitment.as_ref()); + bytes.extend_from_slice(self.proof.as_ref()); + let data = NonEmpty::from_vec(bytes).expect("cannot be empty"); + + Fragment { + data, + unused_bytes: self.unused_bytes, + total_bytes: (BYTES_PER_BLOB as u32).try_into().expect("not zero"), + } + } +} + +fn split_sidecar(builder: SidecarBuilder) -> crate::error::Result> { + let num_bytes = u32::try_from(builder.len()).map_err(|_| { + crate::error::Error::Other("cannot handle more than u32::MAX bytes".to_string()) + })?; + let sidecar = builder + .build() + .map_err(|e| crate::error::Error::Other(e.to_string()))?; + + let num_blobs = u32::try_from(sidecar.blobs.len()).map_err(|_| { + crate::error::Error::Other("cannot handle more than u32::MAX blobs".to_string()) + })?; + + if num_blobs == 0 { + return Err(crate::error::Error::Other("no blobs to split".to_string())); + } + + let unused_data_in_last_blob = + (BYTES_PER_BLOB as u32).saturating_sub(num_bytes % BYTES_PER_BLOB as u32); + + // blobs not consumed here because that would place them on the stack at some point. the aloy + // type being huge it then causes a stack overflow + let single_blobs = izip!(&sidecar.blobs, sidecar.commitments, sidecar.proofs) + .enumerate() + .map(|(index, (data, commitment, proof))| { + let index = u32::try_from(index) + .expect("checked earlier there are no more than u32::MAX blobs"); + + let unused_data = if index == num_blobs.saturating_sub(1) { + unused_data_in_last_blob + } else { + 0 + }; + + SingleBlob { + blobs: Box::new(data.as_slice().try_into().expect("number of bytes match")), + commitment, + proof, + unused_bytes: unused_data, + } + }) + .collect_nonempty() + .expect("checked is not empty"); + + Ok(single_blobs) +} + +fn merge_into_sidecar( + single_blobs: impl IntoIterator, +) -> BlobTransactionSidecar { + let mut blobs = vec![]; + let mut commitments = vec![]; + let mut proofs = vec![]; + + for blob in single_blobs { + blobs.push(*blob.blobs); + commitments.push(blob.commitment); + proofs.push(blob.proof); + } + + BlobTransactionSidecar { + blobs, + commitments, + proofs, + } +} + +#[cfg(test)] +mod tests { + use ports::l1::FragmentEncoder; + use rand::{rngs::SmallRng, Rng, RngCore, SeedableRng}; + use rayon::iter::{IntoParallelIterator, ParallelIterator}; + use test_case::test_case; + + use super::*; + use crate::blob_encoding::copied_from_alloy::SidecarCoder; + + #[test_case(100, 1; "one blob")] + #[test_case(129 * 1024, 2; "two blobs")] + #[test_case(257 * 1024, 3; "three blobs")] + #[test_case(385 * 1024, 4; "four blobs")] + #[test_case(513 * 1024, 5; "five blobs")] + #[test_case(740 * 1024, 6; "six blobs")] + #[test_case(768 * 1024, 7; "seven blobs")] + #[test_case(896 * 1024, 8; "eight blobs")] + fn gas_usage_for_data_storage(num_bytes: usize, num_blobs: usize) { + // given + + // when + let usage = Eip4844BlobEncoder.gas_usage(num_bytes.try_into().unwrap()); + + // then + assert_eq!( + usage, + num_blobs as u64 * alloy::eips::eip4844::DATA_GAS_PER_BLOB + ); + + let mut rng = SmallRng::from_seed([0; 32]); + let mut data = vec![0; num_bytes]; + rng.fill(&mut data[..]); + + let mut builder = SidecarBuilder::from_coder_and_capacity(SimpleCoder::default(), 0); + builder.ingest(&data); + + assert_eq!(builder.build().unwrap().blobs.len(), num_blobs,); + } + + #[test] + fn decoding_fails_if_extra_bytes_present() { + let data = Fragment { + data: NonEmpty::collect(vec![0; SingleBlob::SIZE + 1]).unwrap(), + unused_bytes: 0, + total_bytes: 1.try_into().unwrap(), + }; + + assert!(SingleBlob::decode(data).is_err()); + } + + #[test] + fn decoding_fails_if_bytes_missing() { + let data = Fragment { + data: NonEmpty::collect(vec![0; SingleBlob::SIZE - 1]).unwrap(), + unused_bytes: 0, + total_bytes: 1.try_into().unwrap(), + }; + + assert!(SingleBlob::decode(data).is_err()); + } + + #[test] + fn roundtrip_split_encode_decode_merge() { + let mut random_data = vec![0; 110_000]; + let mut rng = rand::rngs::SmallRng::from_seed([0; 32]); + rng.fill_bytes(&mut random_data); + + let builder = SidecarBuilder::from_coder_and_data(SimpleCoder::default(), &random_data); + + let single_blobs = split_sidecar(builder.clone()).unwrap(); + + let merged_sidecar = merge_into_sidecar(single_blobs); + assert_eq!(merged_sidecar, builder.build().unwrap()); + + let should_be_original_data = SimpleCoder::default() + .decode_all(&merged_sidecar.blobs) + .unwrap() + .into_iter() + .flatten() + .collect_vec(); + + assert_eq!(should_be_original_data, random_data); + } + + #[test] + fn shows_unused_bytes() { + let mut random_data = vec![0; 1000]; + let mut rng = rand::rngs::SmallRng::from_seed([0; 32]); + rng.fill_bytes(&mut random_data); + + let sidecar = SidecarBuilder::from_coder_and_data(SimpleCoder::default(), &random_data); + + let single_blobs = split_sidecar(sidecar.clone()).unwrap(); + + assert_eq!(single_blobs.len(), 1); + assert_eq!(single_blobs[0].unused_bytes, 129984); + } + + #[test] + fn alloy_blob_encoding_issue_regression() { + let test = |amount| { + // given + let encoder = Eip4844BlobEncoder; + let mut rng = SmallRng::from_seed([0; 32]); + let mut data = vec![0; amount]; + rng.fill_bytes(&mut data[..]); + + // when + let fragments = encoder + .encode(NonEmpty::from_vec(data.clone()).unwrap()) + .map_err(|e| { + crate::error::Error::Other(format!("cannot encode {amount}B : {}", e)) + })?; + + // then + let sidecar = Eip4844BlobEncoder::decode(fragments).unwrap(); + + let mut builder = SidecarBuilder::::new(); + for byte in &data { + builder.ingest(std::slice::from_ref(byte)); + } + + let decoded_data = SimpleCoder::default() + .decode_all(&sidecar.blobs) + .ok_or_else(|| { + crate::error::Error::Other(format!("cannot decode blobs for amount {amount}",)) + })? + .into_iter() + .flatten() + .collect_vec(); + + if data != decoded_data { + Err(crate::error::Error::Other(format!( + "data mismatch for {amount} B" + ))) + } else { + Ok(amount) + } + }; + + let failure = (126_000..2_000_000) + .step_by(50_000) + .collect_vec() + .into_par_iter() + .map(test) + .find_any(|ret| ret.is_err()); + + if let Some(Err(amount)) = failure { + panic!("Alloy blob issue found for {amount} B"); + } + } +} diff --git a/packages/eth/src/error.rs b/packages/eth/src/error.rs index d442d765..6efc7b6b 100644 --- a/packages/eth/src/error.rs +++ b/packages/eth/src/error.rs @@ -26,12 +26,6 @@ impl From for Error { } } -impl From for Error { - fn from(value: c_kzg::Error) -> Self { - Self::Other(value.to_string()) - } -} - impl From for Error { fn from(value: alloy::sol_types::Error) -> Self { Self::Other(value.to_string()) diff --git a/packages/eth/src/lib.rs b/packages/eth/src/lib.rs index 047d7305..eccb970b 100644 --- a/packages/eth/src/lib.rs +++ b/packages/eth/src/lib.rs @@ -1,13 +1,11 @@ -#![deny(unused_crate_dependencies)] - use std::{num::NonZeroU32, pin::Pin}; use alloy::primitives::U256; -use async_trait::async_trait; +use delegate::delegate; use futures::{stream::TryStreamExt, Stream}; use ports::{ - l1::{Api, Contract, EventStreamer, Result}, - types::{FuelBlockCommittedOnL1, L1Height, TransactionResponse, ValidatedFuelBlock}, + l1::{Api, Contract, EventStreamer, FragmentsSubmitted, Result}, + types::{Fragment, FuelBlockCommittedOnL1, L1Height, NonEmpty, TransactionResponse}, }; use websocket::EthEventStreamer; @@ -20,44 +18,40 @@ pub use alloy::primitives::Address; pub use aws::*; pub use websocket::WebsocketClient; -#[async_trait] impl Contract for WebsocketClient { - async fn submit(&self, block: ValidatedFuelBlock) -> Result<()> { - self.submit(block).await + delegate! { + to self { + async fn submit(&self, hash: [u8; 32], height: u32) -> Result<()>; + fn commit_interval(&self) -> NonZeroU32; + } } fn event_streamer(&self, height: L1Height) -> Box { Box::new(self.event_streamer(height.into())) } - - fn commit_interval(&self) -> NonZeroU32 { - self.commit_interval() - } } -#[async_trait] -impl Api for WebsocketClient { - async fn submit_l2_state(&self, state_data: Vec) -> Result<[u8; 32]> { - Ok(self.submit_l2_state(state_data).await?) - } +mod blob_encoding; +pub use blob_encoding::Eip4844BlobEncoder; - async fn balance(&self) -> Result { - Ok(self.balance().await?) +impl Api for WebsocketClient { + delegate! { + to (*self) { + async fn submit_state_fragments( + &self, + fragments: NonEmpty, + ) -> Result; + async fn balance(&self) -> Result; + async fn get_transaction_response(&self, tx_hash: [u8; 32],) -> Result>; + } } async fn get_block_number(&self) -> Result { - let block_num = self.get_block_number().await?; + let block_num = self._get_block_number().await?; let height = L1Height::try_from(block_num)?; Ok(height) } - - async fn get_transaction_response( - &self, - tx_hash: [u8; 32], - ) -> Result> { - Ok(self.get_transaction_response(tx_hash).await?) - } } #[async_trait::async_trait] diff --git a/packages/eth/src/websocket.rs b/packages/eth/src/websocket.rs index eff0b793..2eb5f786 100644 --- a/packages/eth/src/websocket.rs +++ b/packages/eth/src/websocket.rs @@ -3,8 +3,8 @@ use std::num::NonZeroU32; use ::metrics::{prometheus::core::Collector, HealthChecker, RegistersMetrics}; use alloy::primitives::Address; use ports::{ - l1::Result, - types::{TransactionResponse, ValidatedFuelBlock, U256}, + l1::{FragmentsSubmitted, Result}, + types::{Fragment, NonEmpty, TransactionResponse, U256}, }; use url::Url; @@ -32,6 +32,7 @@ impl WebsocketClient { blob_pool_key_arn: Option, unhealthy_after_n_errors: usize, aws_client: AwsClient, + first_tx_gas_estimation_multiplier: Option, ) -> ports::l1::Result { let blob_signer = if let Some(key_arn) = blob_pool_key_arn { Some(aws_client.make_signer(key_arn).await?) @@ -41,8 +42,14 @@ impl WebsocketClient { let main_signer = aws_client.make_signer(main_key_arn).await?; - let provider = - WsConnection::connect(url, contract_address, main_signer, blob_signer).await?; + let provider = WsConnection::connect( + url, + contract_address, + main_signer, + blob_signer, + first_tx_gas_estimation_multiplier, + ) + .await?; Ok(Self { inner: HealthTrackingMiddleware::new(provider, unhealthy_after_n_errors), @@ -58,15 +65,15 @@ impl WebsocketClient { self.inner.event_streamer(eth_block_height) } - pub(crate) async fn submit(&self, block: ValidatedFuelBlock) -> Result<()> { - Ok(self.inner.submit(block).await?) + pub(crate) async fn submit(&self, hash: [u8; 32], height: u32) -> Result<()> { + Ok(self.inner.submit(hash, height).await?) } pub(crate) fn commit_interval(&self) -> NonZeroU32 { self.inner.commit_interval() } - pub(crate) async fn get_block_number(&self) -> Result { + pub(crate) async fn _get_block_number(&self) -> Result { Ok(self.inner.get_block_number().await?) } @@ -81,13 +88,16 @@ impl WebsocketClient { Ok(self.inner.balance().await?) } - pub async fn submit_l2_state(&self, tx: Vec) -> Result<[u8; 32]> { - Ok(self.inner.submit_l2_state(tx).await?) + pub(crate) async fn submit_state_fragments( + &self, + fragments: NonEmpty, + ) -> ports::l1::Result { + Ok(self.inner.submit_state_fragments(fragments).await?) } #[cfg(feature = "test-helpers")] - pub async fn finalized(&self, block: ValidatedFuelBlock) -> Result { - Ok(self.inner.finalized(block).await?) + pub async fn finalized(&self, hash: [u8; 32], height: u32) -> Result { + Ok(self.inner.finalized(hash, height).await?) } #[cfg(feature = "test-helpers")] diff --git a/packages/eth/src/websocket/connection.rs b/packages/eth/src/websocket/connection.rs index 64c0005f..1aeff84e 100644 --- a/packages/eth/src/websocket/connection.rs +++ b/packages/eth/src/websocket/connection.rs @@ -1,46 +1,62 @@ -use std::num::NonZeroU32; +use std::{ + cmp::min, + num::NonZeroU32, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; use alloy::{ - consensus::{SidecarBuilder, SimpleCoder}, - network::{Ethereum, EthereumWallet, TransactionBuilder, TxSigner}, + eips::eip4844::BYTES_PER_BLOB, + network::{Ethereum, EthereumWallet, TransactionBuilder, TransactionBuilder4844, TxSigner}, primitives::{Address, U256}, - providers::{ - fillers::{ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller}, - Identity, Provider, ProviderBuilder, RootProvider, WsConnect, - }, + providers::{utils::Eip1559Estimation, Provider, ProviderBuilder, SendableTx, WsConnect}, pubsub::PubSubFrontend, rpc::types::{TransactionReceipt, TransactionRequest}, signers::aws::AwsSigner, sol, }; -use ports::types::{TransactionResponse, ValidatedFuelBlock}; +use itertools::Itertools; +use metrics::{ + prometheus::{self, histogram_opts}, + RegistersMetrics, +}; +use ports::{ + l1::FragmentsSubmitted, + types::{Fragment, NonEmpty, TransactionResponse}, +}; +use tracing::info; use url::Url; use super::{event_streamer::EthEventStreamer, health_tracking_middleware::EthApi}; -use crate::error::{Error, Result}; - -pub type WsProvider = FillProvider< - JoinFill< - JoinFill, NonceFiller>, ChainIdFiller>, - WalletFiller, - >, - RootProvider, - PubSubFrontend, - Ethereum, ->; +use crate::{ + error::{Error, Result}, + Eip4844BlobEncoder, +}; -type FuelStateContract = IFuelStateContract::IFuelStateContractInstance< - PubSubFrontend, - FillProvider< - JoinFill< - JoinFill, NonceFiller>, ChainIdFiller>, - WalletFiller, +pub type WsProvider = alloy::providers::fillers::FillProvider< + alloy::providers::fillers::JoinFill< + alloy::providers::fillers::JoinFill< + alloy::providers::Identity, + alloy::providers::fillers::JoinFill< + alloy::providers::fillers::GasFiller, + alloy::providers::fillers::JoinFill< + alloy::providers::fillers::BlobGasFiller, + alloy::providers::fillers::JoinFill< + alloy::providers::fillers::NonceFiller, + alloy::providers::fillers::ChainIdFiller, + >, + >, + >, >, - RootProvider, - PubSubFrontend, - Ethereum, + alloy::providers::fillers::WalletFiller, >, + alloy::providers::RootProvider, + alloy::pubsub::PubSubFrontend, + Ethereum, >; +type FuelStateContract = IFuelStateContract::IFuelStateContractInstance; sol!( #[sol(rpc)] @@ -56,18 +72,71 @@ sol!( #[derive(Clone)] pub struct WsConnection { provider: WsProvider, + first_tx_gas_estimation_multiplier: Option, + first_blob_tx_sent: Arc, blob_provider: Option, address: Address, blob_signer_address: Option
, contract: FuelStateContract, commit_interval: NonZeroU32, + metrics: Metrics, +} + +impl RegistersMetrics for WsConnection { + fn metrics(&self) -> Vec> { + vec![ + Box::new(self.metrics.blobs_per_tx.clone()), + Box::new(self.metrics.blob_used_bytes.clone()), + ] + } +} + +#[derive(Clone)] +struct Metrics { + blobs_per_tx: prometheus::Histogram, + blob_used_bytes: prometheus::Histogram, +} + +fn custom_exponential_buckets(start: f64, end: f64, steps: usize) -> Vec { + let factor = (end / start).powf(1.0 / (steps - 1) as f64); + let mut buckets = Vec::with_capacity(steps); + + let mut value = start; + for _ in 0..(steps - 1) { + buckets.push(value.ceil()); + value *= factor; + } + + buckets.push(end.ceil()); + + buckets +} + +impl Default for Metrics { + fn default() -> Self { + Self { + blobs_per_tx: prometheus::Histogram::with_opts(histogram_opts!( + "blob_per_tx", + "Number of blobs per blob transaction", + vec![1.0f64, 2., 3., 4., 5., 6.] + )) + .expect("to be correctly configured"), + + blob_used_bytes: prometheus::Histogram::with_opts(histogram_opts!( + "blob_utilization", + "bytes filled per blob", + custom_exponential_buckets(1000f64, BYTES_PER_BLOB as f64, 20) + )) + .expect("to be correctly configured"), + } + } } #[async_trait::async_trait] impl EthApi for WsConnection { - async fn submit(&self, block: ValidatedFuelBlock) -> Result<()> { - let commit_height = Self::calculate_commit_height(block.height(), self.commit_interval); - let contract_call = self.contract.commit(block.hash().into(), commit_height); + async fn submit(&self, hash: [u8; 32], height: u32) -> Result<()> { + let commit_height = Self::calculate_commit_height(height, self.commit_interval); + let contract_call = self.contract.commit(hash.into(), commit_height); let tx = contract_call.send().await?; tracing::info!("tx: {} submitted", tx.tx_hash()); @@ -109,27 +178,83 @@ impl EthApi for WsConnection { Self::convert_to_tx_response(tx_receipt) } - async fn submit_l2_state(&self, state_data: Vec) -> Result<[u8; 32]> { + async fn submit_state_fragments( + &self, + fragments: NonEmpty, + ) -> Result { let (blob_provider, blob_signer_address) = match (&self.blob_provider, &self.blob_signer_address) { (Some(provider), Some(address)) => (provider, address), _ => return Err(Error::Other("blob pool signer not configured".to_string())), }; - let blob_tx = self - .prepare_blob_tx(&state_data, *blob_signer_address) - .await?; + // we only want to add it to the metrics if the submission succeeds + let used_bytes_per_fragment = fragments.iter().map(|f| f.used_bytes()).collect_vec(); + + let num_fragments = min(fragments.len(), 6); + + let limited_fragments = fragments.into_iter().take(num_fragments); + let sidecar = Eip4844BlobEncoder::decode(limited_fragments)?; + + let blob_tx = match ( + self.first_blob_tx_sent.load(Ordering::Relaxed), + self.first_tx_gas_estimation_multiplier, + ) { + (false, Some(gas_estimation_multiplier)) => { + let max_fee_per_blob_gas = blob_provider.get_blob_base_fee().await?; + let Eip1559Estimation { + max_fee_per_gas, + max_priority_fee_per_gas, + } = blob_provider.estimate_eip1559_fees(None).await?; + + TransactionRequest::default() + .with_max_fee_per_blob_gas( + max_fee_per_blob_gas.saturating_mul(gas_estimation_multiplier.into()), + ) + .with_max_fee_per_gas( + max_fee_per_gas.saturating_mul(gas_estimation_multiplier.into()), + ) + .with_max_priority_fee_per_gas( + max_priority_fee_per_gas.saturating_mul(gas_estimation_multiplier.into()), + ) + .with_blob_sidecar(sidecar) + .with_to(*blob_signer_address) + } + _ => TransactionRequest::default() + .with_blob_sidecar(sidecar) + .with_to(*blob_signer_address), + }; + + let blob_tx = blob_provider.fill(blob_tx).await?; + let SendableTx::Envelope(blob_tx) = blob_tx else { + return Err(crate::error::Error::Other( + "Expected an envelope because we have a wallet filler as well, but got a builder from alloy. This is a bug.".to_string(), + )); + }; + let tx_id = *blob_tx.tx_hash(); + info!("sending blob tx: {tx_id}",); + + let _ = blob_provider.send_tx_envelope(blob_tx).await?; + + self.first_blob_tx_sent.store(true, Ordering::Relaxed); - let tx = blob_provider.send_transaction(blob_tx).await?; + self.metrics.blobs_per_tx.observe(num_fragments as f64); - Ok(tx.tx_hash().0) + for bytes in used_bytes_per_fragment { + self.metrics.blob_used_bytes.observe(bytes as f64); + } + + Ok(FragmentsSubmitted { + tx: tx_id.0, + num_fragments: num_fragments.try_into().expect("cannot be zero"), + }) } #[cfg(feature = "test-helpers")] - async fn finalized(&self, block: ValidatedFuelBlock) -> Result { + async fn finalized(&self, hash: [u8; 32], height: u32) -> Result { Ok(self .contract - .finalized(block.hash().into(), U256::from(block.height())) + .finalized(hash.into(), U256::from(height)) .call() .await? ._0) @@ -153,6 +278,7 @@ impl WsConnection { contract_address: Address, main_signer: AwsSigner, blob_signer: Option, + first_tx_gas_estimation_multiplier: Option, ) -> Result { let address = main_signer.address(); @@ -187,6 +313,9 @@ impl WsConnection { blob_signer_address, contract, commit_interval, + metrics: Default::default(), + first_blob_tx_sent: Arc::new(AtomicBool::new(false)), + first_tx_gas_estimation_multiplier, }) } @@ -208,16 +337,6 @@ impl WsConnection { Ok(self.provider.get_balance(address).await?) } - async fn prepare_blob_tx(&self, data: &[u8], to: Address) -> Result { - let sidecar = SidecarBuilder::from_coder_and_data(SimpleCoder::default(), data).build()?; - - let blob_tx = TransactionRequest::default() - .with_to(to) - .with_blob_sidecar(sidecar); - - Ok(blob_tx) - } - fn convert_to_tx_response( tx_receipt: Option, ) -> Result> { @@ -242,6 +361,7 @@ impl WsConnection { #[cfg(test)] mod tests { + use super::*; #[test] diff --git a/packages/eth/src/websocket/health_tracking_middleware.rs b/packages/eth/src/websocket/health_tracking_middleware.rs index 09bf8607..18b5dd10 100644 --- a/packages/eth/src/websocket/health_tracking_middleware.rs +++ b/packages/eth/src/websocket/health_tracking_middleware.rs @@ -3,7 +3,8 @@ use std::num::NonZeroU32; use ::metrics::{ prometheus::core::Collector, ConnectionHealthTracker, HealthChecker, RegistersMetrics, }; -use ports::types::{TransactionResponse, ValidatedFuelBlock, U256}; +use delegate::delegate; +use ports::types::{Fragment, NonEmpty, TransactionResponse, U256}; use crate::{ error::{Error, Result}, @@ -14,7 +15,7 @@ use crate::{ #[cfg_attr(test, mockall::automock)] #[async_trait::async_trait] pub trait EthApi { - async fn submit(&self, block: ValidatedFuelBlock) -> Result<()>; + async fn submit(&self, hash: [u8; 32], height: u32) -> Result<()>; async fn get_block_number(&self) -> Result; async fn balance(&self) -> Result; fn commit_interval(&self) -> NonZeroU32; @@ -23,13 +24,23 @@ pub trait EthApi { &self, tx_hash: [u8; 32], ) -> Result>; - async fn submit_l2_state(&self, state_data: Vec) -> Result<[u8; 32]>; + async fn submit_state_fragments( + &self, + fragments: NonEmpty, + ) -> Result; #[cfg(feature = "test-helpers")] - async fn finalized(&self, block: ValidatedFuelBlock) -> Result; + async fn finalized(&self, hash: [u8; 32], height: u32) -> Result; #[cfg(feature = "test-helpers")] async fn block_hash_at_commit_height(&self, commit_height: u32) -> Result<[u8; 32]>; } +#[cfg(test)] +impl RegistersMetrics for MockEthApi { + fn metrics(&self) -> Vec> { + vec![] + } +} + #[derive(Clone)] pub struct HealthTrackingMiddleware { adapter: T, @@ -64,10 +75,13 @@ impl HealthTrackingMiddleware { } } -// User responsible for registering any metrics T might have -impl RegistersMetrics for HealthTrackingMiddleware { +impl RegistersMetrics for HealthTrackingMiddleware { fn metrics(&self) -> Vec> { - self.metrics.metrics() + self.metrics + .metrics() + .into_iter() + .chain(self.adapter.metrics()) + .collect() } } @@ -76,8 +90,15 @@ impl EthApi for HealthTrackingMiddleware where T: EthApi + Send + Sync, { - async fn submit(&self, block: ValidatedFuelBlock) -> Result<()> { - let response = self.adapter.submit(block).await; + delegate! { + to self.adapter { + fn event_streamer(&self, eth_block_height: u64) -> EthEventStreamer; + fn commit_interval(&self) -> NonZeroU32; + } + } + + async fn submit(&self, hash: [u8; 32], height: u32) -> Result<()> { + let response = self.adapter.submit(hash, height).await; self.note_network_status(&response); response } @@ -97,36 +118,36 @@ where response } - fn event_streamer(&self, eth_block_height: u64) -> EthEventStreamer { - self.adapter.event_streamer(eth_block_height) - } - async fn balance(&self) -> Result { let response = self.adapter.balance().await; self.note_network_status(&response); response } - fn commit_interval(&self) -> NonZeroU32 { - self.adapter.commit_interval() - } - - async fn submit_l2_state(&self, tx: Vec) -> Result<[u8; 32]> { - let response = self.adapter.submit_l2_state(tx).await; + async fn submit_state_fragments( + &self, + fragments: NonEmpty, + ) -> Result { + let response = self.adapter.submit_state_fragments(fragments).await; self.note_network_status(&response); response } #[cfg(feature = "test-helpers")] - async fn finalized(&self, block: ValidatedFuelBlock) -> Result { - self.adapter.finalized(block).await + async fn finalized(&self, hash: [u8; 32], height: u32) -> Result { + let response = self.adapter.finalized(hash, height).await; + self.note_network_status(&response); + response } #[cfg(feature = "test-helpers")] async fn block_hash_at_commit_height(&self, commit_height: u32) -> Result<[u8; 32]> { - self.adapter + let response = self + .adapter .block_hash_at_commit_height(commit_height) - .await + .await; + self.note_network_status(&response); + response } } @@ -142,7 +163,7 @@ mod tests { let mut eth_adapter = MockEthApi::new(); eth_adapter .expect_submit() - .returning(|_| Err(Error::Network("An error".into()))); + .returning(|_, _| Err(Error::Network("An error".into()))); eth_adapter .expect_get_block_number() @@ -151,7 +172,7 @@ mod tests { let adapter = HealthTrackingMiddleware::new(eth_adapter, 1); let health_check = adapter.connection_health_checker(); - let _ = adapter.submit(given_a_block(42)).await; + let _ = adapter.submit([0; 32], 0).await; // when let _ = adapter.get_block_number().await; @@ -166,7 +187,7 @@ mod tests { let mut eth_adapter = MockEthApi::new(); eth_adapter .expect_submit() - .returning(|_| Err(Error::Other("An error".into()))); + .returning(|_, _| Err(Error::Other("An error".into()))); eth_adapter .expect_get_block_number() @@ -175,7 +196,7 @@ mod tests { let adapter = HealthTrackingMiddleware::new(eth_adapter, 2); let health_check = adapter.connection_health_checker(); - let _ = adapter.submit(given_a_block(42)).await; + let _ = adapter.submit([0; 32], 0).await; // when let _ = adapter.get_block_number().await; @@ -189,7 +210,7 @@ mod tests { let mut eth_adapter = MockEthApi::new(); eth_adapter .expect_submit() - .returning(|_| Err(Error::Network("An error".into()))); + .returning(|_, _| Err(Error::Network("An error".into()))); eth_adapter .expect_get_block_number() @@ -199,7 +220,7 @@ mod tests { let health_check = adapter.connection_health_checker(); assert!(health_check.healthy()); - let _ = adapter.submit(given_a_block(42)).await; + let _ = adapter.submit([0; 32], 0).await; assert!(health_check.healthy()); let _ = adapter.get_block_number().await; @@ -214,7 +235,7 @@ mod tests { let mut eth_adapter = MockEthApi::new(); eth_adapter .expect_submit() - .returning(|_| Err(Error::Network("An error".into()))); + .returning(|_, _| Err(Error::Network("An error".into()))); eth_adapter .expect_get_block_number() @@ -224,7 +245,7 @@ mod tests { let adapter = HealthTrackingMiddleware::new(eth_adapter, 3); adapter.register_metrics(®istry); - let _ = adapter.submit(given_a_block(42)).await; + let _ = adapter.submit([0; 32], 0).await; let _ = adapter.get_block_number().await; let metrics = registry.gather(); @@ -237,8 +258,4 @@ mod tests { assert_eq!(eth_network_err_metric.get_value(), 2f64); } - - fn given_a_block(block_height: u32) -> ValidatedFuelBlock { - ValidatedFuelBlock::new([0; 32], block_height) - } } diff --git a/packages/fuel/Cargo.toml b/packages/fuel/Cargo.toml index 9cb7f0da..c6d59f37 100644 --- a/packages/fuel/Cargo.toml +++ b/packages/fuel/Cargo.toml @@ -8,17 +8,24 @@ repository = { workspace = true } version = { workspace = true } publish = { workspace = true } rust-version = { workspace = true } +build = "build.rs" [dependencies] -async-trait = { workspace = true } +cynic = { version = "2.2", features = ["http-reqwest"] } +delegate = { workspace = true } fuel-core-client = { workspace = true, features = ["subscriptions"] } -fuel-core-types = { workspace = true, optional = true } +fuel-core-types = { workspace = true } +futures = { workspace = true } metrics = { workspace = true } ports = { workspace = true, features = ["fuel"] } +trait-variant = { workspace = true } url = { workspace = true } [dev-dependencies] tokio = { workspace = true, features = ["macros"] } +[build-dependencies] +fuel-core-client = { workspace = true } + [features] -test-helpers = ["fuel-core-types"] +test-helpers = [] diff --git a/packages/fuel/build.rs b/packages/fuel/build.rs new file mode 100644 index 00000000..1df60ffe --- /dev/null +++ b/packages/fuel/build.rs @@ -0,0 +1,9 @@ +use std::fs; + +fn main() { + fs::create_dir_all("target").expect("Unable to create target directory"); + fs::write("target/schema.sdl", fuel_core_client::SCHEMA_SDL) + .expect("Unable to write schema file"); + + println!("cargo:rerun-if-changed=build.rs"); +} diff --git a/packages/fuel/src/client.rs b/packages/fuel/src/client.rs index cb736866..717081a7 100644 --- a/packages/fuel/src/client.rs +++ b/packages/fuel/src/client.rs @@ -1,11 +1,19 @@ +use std::{cmp::min, num::NonZeroU32, ops::RangeInclusive}; + +use block_ext::{ClientExt, FullBlock}; #[cfg(feature = "test-helpers")] use fuel_core_client::client::types::{ primitives::{Address, AssetId}, Coin, CoinType, }; -use fuel_core_client::client::{types::Block, FuelClient as GqlClient}; +use fuel_core_client::client::{ + pagination::{PageDirection, PaginatedResult, PaginationRequest}, + types::Block, + FuelClient as GqlClient, +}; #[cfg(feature = "test-helpers")] use fuel_core_types::fuel_tx::Transaction; +use futures::{stream, Stream}; use metrics::{ prometheus::core::Collector, ConnectionHealthTracker, HealthChecker, RegistersMetrics, }; @@ -13,21 +21,29 @@ use url::Url; use crate::{metrics::Metrics, Error, Result}; +mod block_ext; + #[derive(Clone)] pub struct HttpClient { client: GqlClient, metrics: Metrics, health_tracker: ConnectionHealthTracker, + full_blocks_req_size: NonZeroU32, } impl HttpClient { #[must_use] - pub fn new(url: &Url, unhealthy_after_n_errors: usize) -> Self { + pub fn new( + url: &Url, + unhealthy_after_n_errors: usize, + full_blocks_req_size: NonZeroU32, + ) -> Self { let client = GqlClient::new(url).expect("Url to be well formed"); Self { client, metrics: Metrics::default(), health_tracker: ConnectionHealthTracker::new(unhealthy_after_n_errors), + full_blocks_req_size, } } @@ -81,7 +97,7 @@ impl HttpClient { } } - pub(crate) async fn _block_at_height(&self, height: u32) -> Result> { + pub(crate) async fn block_at_height(&self, height: u32) -> Result> { match self.client.block_by_height(height.into()).await { Ok(maybe_block) => { self.handle_network_success(); @@ -94,7 +110,89 @@ impl HttpClient { } } - pub(crate) async fn _latest_block(&self) -> Result { + pub(crate) fn block_in_height_range( + &self, + range: RangeInclusive, + ) -> impl Stream>> + '_ { + struct Progress { + cursor: Option, + blocks_so_far: usize, + target_amount: usize, + } + + impl Progress { + pub fn new(range: RangeInclusive) -> Self { + // Cursor represents the block height of the last block in the previous request. + let cursor = range.start().checked_sub(1).map(|v| v.to_string()); + + Self { + cursor, + blocks_so_far: 0, + target_amount: range.count(), + } + } + } + + impl Progress { + fn consume(&mut self, result: PaginatedResult) -> Vec { + self.blocks_so_far += result.results.len(); + self.cursor = result.cursor; + result.results + } + + fn take_cursor(&mut self) -> Option { + self.cursor.take() + } + + fn remaining(&self) -> i32 { + self.target_amount.saturating_sub(self.blocks_so_far) as i32 + } + } + + let initial_progress = Progress::new(range); + + stream::try_unfold(initial_progress, move |mut current_progress| async move { + if current_progress.remaining() <= 0 { + return Ok(None); + } + + let request = PaginationRequest { + cursor: current_progress.take_cursor(), + results: min( + current_progress.remaining(), + self.full_blocks_req_size + .get() + .try_into() + .unwrap_or(i32::MAX), + ), + direction: PageDirection::Forward, + }; + + let response = self + .client + .full_blocks(request.clone()) + .await + .map_err(|e| { + Error::Network(format!( + "While sending request for full blocks: {request:?} got error: {e}" + )) + })?; + + let results: Vec<_> = current_progress + .consume(response) + .into_iter() + .map(ports::fuel::FullFuelBlock::try_from) + .collect::>()?; + + if results.is_empty() { + Ok(None) + } else { + Ok(Some((results, current_progress))) + } + }) + } + + pub async fn latest_block(&self) -> Result { match self.client.chain_info().await { Ok(chain_info) => { self.handle_network_success(); diff --git a/packages/fuel/src/client/block_ext.rs b/packages/fuel/src/client/block_ext.rs new file mode 100644 index 00000000..e5fa30ec --- /dev/null +++ b/packages/fuel/src/client/block_ext.rs @@ -0,0 +1,135 @@ +use cynic::QueryBuilder; +use fuel_core_client::client::{ + pagination::{PaginatedResult, PaginationRequest}, + schema::{ + block::{Consensus, Header}, + primitives::TransactionId, + schema, + tx::TransactionStatus, + BlockId, ConnectionArgs, HexString, PageInfo, + }, + FuelClient, +}; +use fuel_core_types::fuel_crypto::PublicKey; +use ports::types::NonEmpty; + +#[derive(cynic::QueryFragment, Debug)] +#[cynic( + schema_path = "./target/schema.sdl", + graphql_type = "Query", + variables = "ConnectionArgs" +)] +pub struct FullBlocksQuery { + #[arguments(after: $after, before: $before, first: $first, last: $last)] + pub blocks: FullBlockConnection, +} + +#[derive(cynic::QueryFragment, Debug)] +#[cynic(schema_path = "./target/schema.sdl", graphql_type = "BlockConnection")] +pub struct FullBlockConnection { + pub edges: Vec, + pub page_info: PageInfo, +} + +#[derive(cynic::QueryFragment, Debug)] +#[cynic(schema_path = "./target/schema.sdl", graphql_type = "BlockEdge")] +pub struct FullBlockEdge { + pub cursor: String, + pub node: FullBlock, +} + +#[derive(cynic::QueryFragment, Debug)] +#[cynic(schema_path = "./target/schema.sdl", graphql_type = "Block")] +pub struct FullBlock { + pub id: BlockId, + pub header: Header, + pub consensus: Consensus, + pub transactions: Vec, +} + +impl TryFrom for ports::fuel::FullFuelBlock { + type Error = crate::Error; + + fn try_from(value: FullBlock) -> Result { + let raw_transactions = value + .transactions + .into_iter() + .map(|t| { + NonEmpty::collect(t.raw_payload.to_vec()).ok_or_else(|| { + crate::Error::Other(format!( + "encountered empty transaction in block: {}", + value.id + )) + }) + }) + .collect::, Self::Error>>()?; + + let header = value.header.try_into().map_err(|e| { + crate::Error::Other(format!( + "failed to convert block header of fuel block {}: {e}", + value.id + )) + })?; + + Ok(Self { + id: value.id.into(), + header, + consensus: value.consensus.into(), + raw_transactions, + }) + } +} + +impl FullBlock { + /// Returns the block producer public key, if any. + pub fn block_producer(&self) -> Option { + let message = self.header.id.clone().into_message(); + match &self.consensus { + Consensus::Genesis(_) => Some(Default::default()), + Consensus::PoAConsensus(poa) => { + let signature = poa.signature.clone().into_signature(); + let producer_pub_key = signature.recover(&message); + producer_pub_key.ok() + } + Consensus::Unknown => None, + } + } +} + +impl From for PaginatedResult { + fn from(conn: FullBlockConnection) -> Self { + PaginatedResult { + cursor: conn.page_info.end_cursor, + has_next_page: conn.page_info.has_next_page, + has_previous_page: conn.page_info.has_previous_page, + results: conn.edges.into_iter().map(|e| e.node).collect(), + } + } +} + +#[derive(cynic::QueryFragment, Clone, Debug)] +#[cynic(schema_path = "./target/schema.sdl", graphql_type = "Transaction")] +pub struct OpaqueTransaction { + pub id: TransactionId, + pub raw_payload: HexString, + pub status: Option, +} + +#[trait_variant::make(Send)] +pub trait ClientExt { + async fn full_blocks( + &self, + request: PaginationRequest, + ) -> std::io::Result>; +} + +impl ClientExt for FuelClient { + async fn full_blocks( + &self, + request: PaginationRequest, + ) -> std::io::Result> { + let query = FullBlocksQuery::build(request.into()); + let blocks = self.query(query).await?.blocks.into(); + Ok(blocks) + } +} diff --git a/packages/fuel/src/lib.rs b/packages/fuel/src/lib.rs index 0f93449b..2452374e 100644 --- a/packages/fuel/src/lib.rs +++ b/packages/fuel/src/lib.rs @@ -1,21 +1,34 @@ #![deny(unused_crate_dependencies)] -use ports::fuel::FuelBlock; +use std::ops::RangeInclusive; + +use futures::StreamExt; +use ports::fuel::{BoxStream, FuelBlock}; mod client; mod metrics; pub use client::*; +use delegate::delegate; type Error = ports::fuel::Error; type Result = ports::fuel::Result; -#[async_trait::async_trait] impl ports::fuel::Api for client::HttpClient { - async fn block_at_height(&self, height: u32) -> ports::fuel::Result> { - self._block_at_height(height).await + delegate! { + to self { + async fn block_at_height(&self, height: u32) -> ports::fuel::Result>; + async fn latest_block(&self) -> ports::fuel::Result; + } + } + + async fn latest_height(&self) -> Result { + self.latest_block().await.map(|b| b.header.height) } - async fn latest_block(&self) -> ports::fuel::Result { - self._latest_block().await + fn full_blocks_in_height_range( + &self, + range: RangeInclusive, + ) -> BoxStream<'_, Result>> { + self.block_in_height_range(range).boxed() } } @@ -25,7 +38,6 @@ mod tests { prometheus::{proto::Metric, Registry}, RegistersMetrics, }; - use ports::fuel::Api; use url::Url; use super::*; @@ -89,7 +101,7 @@ mod tests { // killing the node once the SDK supports it. let url = Url::parse("localhost:12344").unwrap(); - let fuel_adapter = HttpClient::new(&url, 1); + let fuel_adapter = HttpClient::new(&url, 1, 1.try_into().unwrap()); let registry = Registry::default(); fuel_adapter.register_metrics(®istry); @@ -116,7 +128,7 @@ mod tests { // killing the node once the SDK supports it. let url = Url::parse("http://localhost:12344").unwrap(); - let fuel_adapter = client::HttpClient::new(&url, 3); + let fuel_adapter = client::HttpClient::new(&url, 3, 1.try_into().unwrap()); let health_check = fuel_adapter.connection_health_checker(); assert!(health_check.healthy()); diff --git a/packages/ports/Cargo.toml b/packages/ports/Cargo.toml index 14296b0e..4db99ed0 100644 --- a/packages/ports/Cargo.toml +++ b/packages/ports/Cargo.toml @@ -12,30 +12,41 @@ rust-version = { workspace = true } [dependencies] alloy = { workspace = true, optional = true } async-trait = { workspace = true, optional = true } +delegate = { workspace = true, optional = true } fuel-core-client = { workspace = true, optional = true } futures = { workspace = true, optional = true } -impl-tools = { workspace = true, optional = true } +itertools = { workspace = true, features = ["use_std"], optional = true } mockall = { workspace = true, optional = true } +nonempty = { workspace = true } rand = { workspace = true, optional = true } -serde = { workspace = true, features = ["derive"] } sqlx = { workspace = true, features = ["chrono"] } thiserror = { workspace = true, optional = true } -validator = { workspace = true, optional = true } +trait-variant = { workspace = true, optional = true } [features] -test-helpers = ["dep:mockall", "dep:rand", "validator?/test-helpers"] +test-helpers = ["dep:mockall", "dep:rand"] l1 = [ + "dep:async-trait", "dep:alloy", "dep:futures", "dep:thiserror", - "dep:async-trait", - "dep:validator", + "dep:trait-variant", ] fuel = [ "dep:thiserror", - "dep:async-trait", "dep:fuel-core-client", - "dep:validator", + "dep:futures", + "dep:trait-variant", ] -storage = ["dep:impl-tools", "dep:thiserror", "dep:async-trait"] -full = ["l1", "fuel", "storage"] +storage = [ + "dep:trait-variant", + "dep:thiserror", + "dep:futures", + "dep:delegate", + "dep:itertools", +] +clock = [] +full = ["l1", "fuel", "storage", "clock"] + +[dev-dependencies] +rand = { workspace = true, features = ["std", "std_rng"] } diff --git a/packages/ports/src/lib.rs b/packages/ports/src/lib.rs index 205d7efa..801770d4 100644 --- a/packages/ports/src/lib.rs +++ b/packages/ports/src/lib.rs @@ -7,8 +7,16 @@ mod ports { #[cfg(feature = "storage")] pub mod storage; + + #[cfg(feature = "clock")] + pub mod clock; } -#[cfg(any(feature = "l1", feature = "fuel", feature = "storage"))] +#[cfg(any( + feature = "l1", + feature = "fuel", + feature = "storage", + feature = "clock" +))] pub use ports::*; pub mod types; diff --git a/packages/ports/src/ports/clock.rs b/packages/ports/src/ports/clock.rs new file mode 100644 index 00000000..62962da2 --- /dev/null +++ b/packages/ports/src/ports/clock.rs @@ -0,0 +1,5 @@ +use sqlx::types::chrono::{DateTime, Utc}; + +pub trait Clock { + fn now(&self) -> DateTime; +} diff --git a/packages/ports/src/ports/fuel.rs b/packages/ports/src/ports/fuel.rs index 0cfdab9e..8b08569a 100644 --- a/packages/ports/src/ports/fuel.rs +++ b/packages/ports/src/ports/fuel.rs @@ -1,11 +1,26 @@ +use std::ops::RangeInclusive; + pub use fuel_core_client::client::types::{ block::{ - Block as FuelBlock, Consensus as FuelConsensus, Header as FuelHeader, - PoAConsensus as FuelPoAConsensus, + Block as FuelBlock, Consensus as FuelConsensus, Genesis, Genesis as FuelGenesis, + Header as FuelHeader, PoAConsensus as FuelPoAConsensus, }, primitives::{BlockId as FuelBlockId, Bytes32 as FuelBytes32, PublicKey as FuelPublicKey}, + Consensus, }; +#[derive(Debug, Clone)] +pub struct FullFuelBlock { + pub id: FuelBytes32, + pub header: FuelHeader, + pub consensus: Consensus, + pub raw_transactions: Vec>, +} + +pub use futures::stream::BoxStream; + +use crate::types::NonEmpty; + #[derive(Debug, thiserror::Error)] pub enum Error { #[error("{0}")] @@ -16,9 +31,15 @@ pub enum Error { pub type Result = std::result::Result; +#[allow(async_fn_in_trait)] +#[trait_variant::make(Send)] #[cfg_attr(feature = "test-helpers", mockall::automock)] -#[async_trait::async_trait] pub trait Api: Send + Sync { async fn block_at_height(&self, height: u32) -> Result>; + fn full_blocks_in_height_range( + &self, + range: RangeInclusive, + ) -> BoxStream<'_, Result>>; async fn latest_block(&self) -> Result; + async fn latest_height(&self) -> Result; } diff --git a/packages/ports/src/ports/l1.rs b/packages/ports/src/ports/l1.rs index 1143b1d5..ce305949 100644 --- a/packages/ports/src/ports/l1.rs +++ b/packages/ports/src/ports/l1.rs @@ -1,8 +1,8 @@ -use std::pin::Pin; +use std::{num::NonZeroUsize, pin::Pin}; use crate::types::{ - FuelBlockCommittedOnL1, InvalidL1Height, L1Height, Stream, TransactionResponse, - ValidatedFuelBlock, U256, + Fragment, FuelBlockCommittedOnL1, InvalidL1Height, L1Height, NonEmpty, Stream, + TransactionResponse, U256, }; #[derive(Debug, thiserror::Error)] @@ -21,18 +21,29 @@ impl From for Error { } } +#[allow(async_fn_in_trait)] +#[trait_variant::make(Send)] #[cfg_attr(feature = "test-helpers", mockall::automock)] -#[async_trait::async_trait] pub trait Contract: Send + Sync { - async fn submit(&self, block: ValidatedFuelBlock) -> Result<()>; + async fn submit(&self, hash: [u8; 32], height: u32) -> Result<()>; fn event_streamer(&self, height: L1Height) -> Box; fn commit_interval(&self) -> std::num::NonZeroU32; } +#[derive(Debug, Clone, Copy)] +pub struct FragmentsSubmitted { + pub tx: [u8; 32], + pub num_fragments: NonZeroUsize, +} + +#[allow(async_fn_in_trait)] +#[trait_variant::make(Send)] #[cfg_attr(feature = "test-helpers", mockall::automock)] -#[async_trait::async_trait] pub trait Api { - async fn submit_l2_state(&self, state_data: Vec) -> Result<[u8; 32]>; + async fn submit_state_fragments( + &self, + fragments: NonEmpty, + ) -> Result; async fn get_block_number(&self) -> Result; async fn balance(&self) -> Result; async fn get_transaction_response( @@ -41,6 +52,11 @@ pub trait Api { ) -> Result>; } +pub trait FragmentEncoder { + fn encode(&self, data: NonEmpty) -> Result>; + fn gas_usage(&self, num_bytes: NonZeroUsize) -> u64; +} + #[cfg_attr(feature = "test-helpers", mockall::automock)] #[async_trait::async_trait] pub trait EventStreamer { diff --git a/packages/ports/src/ports/storage.rs b/packages/ports/src/ports/storage.rs index ffac048c..ff4108c6 100644 --- a/packages/ports/src/ports/storage.rs +++ b/packages/ports/src/ports/storage.rs @@ -1,7 +1,18 @@ -use std::sync::Arc; +use std::{ + fmt::{Display, Formatter}, + iter::{Chain, Once}, + num::NonZeroUsize, + ops::{Index, RangeInclusive}, + sync::Arc, +}; + +use delegate::delegate; +pub use futures::stream::BoxStream; +use itertools::Itertools; +pub use sqlx::types::chrono::{DateTime, Utc}; use crate::types::{ - BlockSubmission, StateFragment, StateSubmission, SubmissionTx, TransactionState, + BlockSubmission, CollectNonEmpty, Fragment, L1Tx, NonEmpty, NonNegative, TransactionState, }; #[derive(Debug, thiserror::Error)] @@ -12,29 +23,579 @@ pub enum Error { Conversion(String), } +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct FuelBlock { + pub hash: [u8; 32], + pub height: u32, + pub data: NonEmpty, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BundleFragment { + pub id: NonNegative, + pub idx: NonNegative, + pub bundle_id: NonNegative, + pub fragment: Fragment, +} + pub type Result = std::result::Result; -#[async_trait::async_trait] -#[impl_tools::autoimpl(for &T, &mut T, Arc, Box)] -#[cfg_attr(feature = "test-helpers", mockall::automock)] +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SequentialFuelBlocks { + blocks: NonEmpty, +} + +impl IntoIterator for SequentialFuelBlocks { + type Item = FuelBlock; + type IntoIter = Chain, std::vec::IntoIter>; + fn into_iter(self) -> Self::IntoIter { + self.blocks.into_iter() + } +} + +impl Index for SequentialFuelBlocks { + type Output = FuelBlock; + fn index(&self, index: usize) -> &Self::Output { + &self.blocks[index] + } +} + +impl SequentialFuelBlocks { + pub fn into_inner(self) -> NonEmpty { + self.blocks + } + + pub fn from_first_sequence(blocks: NonEmpty) -> Self { + let blocks = blocks + .into_iter() + .scan(None, |prev, block| match prev { + Some(height) if *height + 1 == block.height => { + *prev = Some(block.height); + Some(block) + } + None => { + *prev = Some(block.height); + Some(block) + } + _ => None, + }) + .collect_nonempty() + .expect("at least the first block"); + + blocks.try_into().expect("blocks are sequential") + } + + pub fn len(&self) -> NonZeroUsize { + self.blocks.len_nonzero() + } + + pub fn height_range(&self) -> RangeInclusive { + let first = self.blocks.first().height; + let last = self.blocks.last().height; + first..=last + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct InvalidSequence { + reason: String, +} + +impl InvalidSequence { + pub fn new(reason: String) -> Self { + Self { reason } + } +} + +impl Display for InvalidSequence { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + write!(f, "invalid sequence: {}", self.reason) + } +} + +impl std::error::Error for InvalidSequence {} + +impl TryFrom> for SequentialFuelBlocks { + type Error = InvalidSequence; + + fn try_from(blocks: NonEmpty) -> std::result::Result { + let is_sorted = blocks + .iter() + .tuple_windows() + .all(|(l, r)| l.height < r.height); + + if !is_sorted { + return Err(InvalidSequence::new( + "blocks are not sorted by height".to_string(), + )); + } + + let is_sequential = blocks + .iter() + .tuple_windows() + .all(|(l, r)| l.height + 1 == r.height); + if !is_sequential { + return Err(InvalidSequence::new( + "blocks are not sequential by height".to_string(), + )); + } + + Ok(Self { blocks }) + } +} + +#[allow(async_fn_in_trait)] +#[trait_variant::make(Send)] pub trait Storage: Send + Sync { async fn insert(&self, submission: BlockSubmission) -> Result<()>; async fn submission_w_latest_block(&self) -> Result>; async fn set_submission_completed(&self, fuel_block_hash: [u8; 32]) -> Result; + async fn insert_blocks(&self, block: NonEmpty) -> Result<()>; + async fn missing_blocks( + &self, + starting_height: u32, + current_height: u32, + ) -> Result>>; + async fn lowest_sequence_of_unbundled_blocks( + &self, + starting_height: u32, + limit: usize, + ) -> Result>; + async fn insert_bundle_and_fragments( + &self, + block_range: RangeInclusive, + fragments: NonEmpty, + ) -> Result<()>; - async fn insert_state_submission( + async fn record_pending_tx( &self, - submission: StateSubmission, - fragments: Vec, + tx_hash: [u8; 32], + fragments: NonEmpty>, ) -> Result<()>; - async fn get_unsubmitted_fragments(&self) -> Result>; - async fn record_pending_tx(&self, tx_hash: [u8; 32], fragment_ids: Vec) -> Result<()>; - async fn get_pending_txs(&self) -> Result>; + async fn get_pending_txs(&self) -> Result>; async fn has_pending_txs(&self) -> Result; - async fn state_submission_w_latest_block(&self) -> Result>; - async fn update_submission_tx_state( + async fn oldest_nonfinalized_fragments( &self, - hash: [u8; 32], - state: TransactionState, - ) -> Result<()>; + starting_height: u32, + limit: usize, + ) -> Result>; + async fn last_time_a_fragment_was_finalized(&self) -> Result>>; + async fn update_tx_state(&self, hash: [u8; 32], state: TransactionState) -> Result<()>; +} + +impl Storage for Arc { + delegate! { + to (**self) { + async fn insert(&self, submission: BlockSubmission) -> Result<()>; + async fn submission_w_latest_block(&self) -> Result>; + async fn set_submission_completed(&self, fuel_block_hash: [u8; 32]) -> Result; + async fn insert_blocks(&self, block: NonEmpty) -> Result<()>; + async fn missing_blocks( + &self, + starting_height: u32, + current_height: u32, + ) -> Result>>; + async fn lowest_sequence_of_unbundled_blocks( + &self, + starting_height: u32, + limit: usize, + ) -> Result>; + async fn insert_bundle_and_fragments( + &self, + block_range: RangeInclusive, + fragments: NonEmpty, + ) -> Result<()>; + async fn record_pending_tx( + &self, + tx_hash: [u8; 32], + fragment_id: NonEmpty>, + ) -> Result<()>; + async fn get_pending_txs(&self) -> Result>; + async fn has_pending_txs(&self) -> Result; + async fn oldest_nonfinalized_fragments( + &self, + starting_height: u32, + limit: usize, + ) -> Result>; + async fn last_time_a_fragment_was_finalized(&self) -> Result>>; + async fn update_tx_state(&self, hash: [u8; 32], state: TransactionState) -> Result<()>; + } + } +} + +impl Storage for &T { + delegate! { + to (**self) { + async fn insert(&self, submission: BlockSubmission) -> Result<()>; + async fn submission_w_latest_block(&self) -> Result>; + async fn set_submission_completed(&self, fuel_block_hash: [u8; 32]) -> Result; + async fn insert_blocks(&self, block: NonEmpty) -> Result<()>; + async fn missing_blocks( + &self, + starting_height: u32, + current_height: u32, + ) -> Result>>; + async fn lowest_sequence_of_unbundled_blocks( + &self, + starting_height: u32, + limit: usize, + ) -> Result>; + async fn insert_bundle_and_fragments( + &self, + block_range: RangeInclusive, + fragments: NonEmpty, + ) -> Result<()>; + async fn record_pending_tx( + &self, + tx_hash: [u8; 32], + fragment_id: NonEmpty>, + ) -> Result<()>; + async fn get_pending_txs(&self) -> Result>; + async fn has_pending_txs(&self) -> Result; + async fn oldest_nonfinalized_fragments( + &self, + starting_height: u32, + limit: usize, + ) -> Result>; + async fn last_time_a_fragment_was_finalized(&self) -> Result>>; + async fn update_tx_state(&self, hash: [u8; 32], state: TransactionState) -> Result<()>; + } + } +} + +#[cfg(test)] +mod tests { + use nonempty::{nonempty, NonEmpty}; + + use super::*; + + fn create_fuel_block(height: u32) -> FuelBlock { + let mut hash = [0; 32]; + hash[..4].copy_from_slice(&height.to_be_bytes()); + + FuelBlock { + hash, + height, + data: nonempty![0u8], + } + } + + fn create_non_empty_fuel_blocks(block_heights: &[u32]) -> NonEmpty { + block_heights + .iter() + .cloned() + .map(create_fuel_block) + .collect_nonempty() + .unwrap() + } + + // Test: Successful conversion from a valid, sequential list of FuelBlocks + #[test] + fn try_from_with_valid_sequential_blocks_returns_ok() { + // given + let blocks = create_non_empty_fuel_blocks(&[1, 2, 3, 4, 5]); + + // when + let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()); + + // then + assert!( + seq_blocks.is_ok(), + "Conversion should succeed for sequential blocks" + ); + let seq_blocks = seq_blocks.unwrap(); + assert_eq!( + seq_blocks.blocks, blocks, + "SequentialFuelBlocks should contain the original blocks" + ); + } + + // Test: Conversion fails when blocks are not sorted by height + #[test] + fn try_from_with_non_sorted_blocks_returns_error() { + // given + let blocks = create_non_empty_fuel_blocks(&[1, 3, 2, 4, 5]); + + // when + let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()); + + // then + assert!( + seq_blocks.is_err(), + "Conversion should fail for non-sorted blocks" + ); + let error = seq_blocks.unwrap_err(); + assert_eq!( + error.to_string(), + "invalid sequence: blocks are not sorted by height", + "Error message should indicate sorting issue" + ); + } + + // Test: Conversion fails when blocks have gaps in their heights + #[test] + fn try_from_with_non_sequential_blocks_returns_error() { + // given + let blocks = create_non_empty_fuel_blocks(&[1, 2, 4, 5]); + + // when + let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()); + + // then + assert!( + seq_blocks.is_err(), + "Conversion should fail for non-sequential blocks" + ); + let error = seq_blocks.unwrap_err(); + assert_eq!( + error.to_string(), + "invalid sequence: blocks are not sequential by height", + "Error message should indicate sequentiality issue" + ); + } + + // Test: Iterating over SequentialFuelBlocks yields all blocks in order + #[test] + fn iterates_over_sequential_fuel_blocks_correctly() { + // given + let blocks = create_non_empty_fuel_blocks(&[10, 11, 12]); + let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()).unwrap(); + + // when + let collected: Vec = seq_blocks.clone().into_iter().collect(); + + // then + assert_eq!( + collected, + vec![ + create_fuel_block(10), + create_fuel_block(11), + create_fuel_block(12) + ], + "Iterated blocks should match the original sequence" + ); + } + + // Test: Indexing into SequentialFuelBlocks retrieves the correct FuelBlock + #[test] + fn indexing_returns_correct_fuel_block() { + // given + let blocks = create_non_empty_fuel_blocks(&[100, 101, 102, 103]); + let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()).unwrap(); + + // when & Then + assert_eq!( + seq_blocks[0], + create_fuel_block(100), + "First block should match" + ); + assert_eq!( + seq_blocks[1], + create_fuel_block(101), + "Second block should match" + ); + assert_eq!( + seq_blocks[3], + create_fuel_block(103), + "Fourth block should match" + ); + } + + // Test: Accessing an out-of-bounds index panics as expected + #[test] + #[should_panic(expected = "index out of bounds")] + fn indexing_out_of_bounds_panics() { + // given + let blocks = create_non_empty_fuel_blocks(&[1, 2, 3]); + let seq_blocks = SequentialFuelBlocks::try_from(blocks).unwrap(); + + // when + let _ = &seq_blocks[5]; + + // then + // Panic is expected + } + + // Test: len method returns the correct number of blocks + #[test] + fn len_returns_correct_number_of_blocks() { + // given + let blocks = create_non_empty_fuel_blocks(&[7, 8, 9, 10]); + let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()).unwrap(); + + // when + let length = seq_blocks.len(); + + // then + assert_eq!( + length, + NonZeroUsize::new(4).unwrap(), + "Length should be equal to the number of blocks" + ); + } + + // Test: height_range method returns the correct inclusive range + #[test] + fn height_range_returns_correct_range() { + // given + let blocks = create_non_empty_fuel_blocks(&[20, 21, 22, 23]); + let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()).unwrap(); + + // when + let range = seq_blocks.height_range(); + + // then + assert_eq!( + range, + 20..=23, + "Height range should span from the first to the last block's height" + ); + } + + // Test: from_first_sequence includes all blocks when they are sequential + #[test] + fn from_first_sequence_with_all_sequential_blocks_includes_all() { + // given + let blocks = create_non_empty_fuel_blocks(&[5, 6, 7, 8]); + + // when + let seq_blocks = SequentialFuelBlocks::from_first_sequence(blocks.clone()); + + // then + assert_eq!( + seq_blocks.blocks, blocks, + "All sequential blocks should be included" + ); + } + + // Test: from_first_sequence stops at the first gap in block heights + #[test] + fn from_first_sequence_with_gaps_includes_up_to_first_gap() { + // given + let blocks = create_non_empty_fuel_blocks(&[1, 2, 4, 5, 7]); + + // when + let seq_blocks = SequentialFuelBlocks::from_first_sequence(blocks); + + // then + let expected = nonempty![create_fuel_block(1), create_fuel_block(2)]; + assert_eq!( + seq_blocks.blocks, expected, + "Only blocks up to the first gap should be included" + ); + } + + // Test: from_first_sequence correctly handles a single block + #[test] + fn from_first_sequence_with_single_block_includes_it() { + // given + let blocks = nonempty![create_fuel_block(42)]; + + // when + let seq_blocks = SequentialFuelBlocks::from_first_sequence(blocks.clone()); + + // then + assert_eq!( + seq_blocks.blocks, blocks, + "Single block should be included correctly" + ); + } + + // Test: into_inner retrieves the original NonEmpty + #[test] + fn into_inner_returns_original_nonempty_blocks() { + // given + let blocks = create_non_empty_fuel_blocks(&[10, 11, 12]); + let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()).unwrap(); + + // when + let inner = seq_blocks.into_inner(); + + // then + assert_eq!( + inner, blocks, + "into_inner should return the original NonEmpty" + ); + } + + // Test: InvalidSequence error displays correctly + #[test] + fn invalid_sequence_display_formats_correctly() { + // given + let error = InvalidSequence::new("test reason".to_string()); + + // when + let display = error.to_string(); + + // then + assert_eq!( + display, "invalid sequence: test reason", + "Error display should match the expected format" + ); + } + + // Test: Single block is always considered sequential + #[test] + fn single_block_is_always_sequential() { + // given + let blocks = nonempty![create_fuel_block(999)]; + + // when + let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()); + + // then + assert!( + seq_blocks.is_ok(), + "Single block should be considered sequential" + ); + let seq_blocks = seq_blocks.unwrap(); + assert_eq!( + seq_blocks.blocks, blocks, + "SequentialFuelBlocks should contain the single block" + ); + } + + // Test: Two blocks with the same height result in an error + #[test] + fn two_blocks_with_same_height_returns_error() { + // given + let blocks = nonempty![create_fuel_block(1), create_fuel_block(1)]; + + // when + let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()); + + // then + assert!( + seq_blocks.is_err(), + "Duplicate heights should result in an error" + ); + let error = seq_blocks.unwrap_err(); + assert_eq!( + error.to_string(), + "invalid sequence: blocks are not sorted by height", + "Error message should indicate sorting issue due to duplicate heights" + ); + } + + // Test: Two blocks with non-consecutive heights result in an error + #[test] + fn two_blocks_with_non_consecutive_heights_returns_error() { + // given + let blocks = nonempty![create_fuel_block(1), create_fuel_block(3)]; + + // when + let seq_blocks = SequentialFuelBlocks::try_from(blocks.clone()); + + // then + assert!( + seq_blocks.is_err(), + "Non-consecutive heights should result in an error" + ); + let error = seq_blocks.unwrap_err(); + assert_eq!( + error.to_string(), + "invalid sequence: blocks are not sequential by height", + "Error message should indicate sequentiality issue" + ); + } } diff --git a/packages/ports/src/types.rs b/packages/ports/src/types.rs index c382fc53..8cfac958 100644 --- a/packages/ports/src/types.rs +++ b/packages/ports/src/types.rs @@ -1,18 +1,23 @@ #[cfg(feature = "l1")] pub use alloy::primitives::{Address, U256}; -#[cfg(feature = "l1")] +#[cfg(any(feature = "l1", feature = "storage"))] pub use futures::Stream; +mod non_empty; +pub use non_empty::*; + mod block_submission; +mod fragment; #[cfg(feature = "l1")] mod fuel_block_committed_on_l1; mod l1_height; +mod serial_id; mod state_submission; pub use block_submission::*; +pub use fragment::*; #[cfg(feature = "l1")] pub use fuel_block_committed_on_l1::*; pub use l1_height::*; +pub use serial_id::*; pub use state_submission::*; -#[cfg(any(feature = "fuel", feature = "l1"))] -pub use validator::block::*; diff --git a/packages/ports/src/types/fragment.rs b/packages/ports/src/types/fragment.rs new file mode 100644 index 00000000..52df079a --- /dev/null +++ b/packages/ports/src/types/fragment.rs @@ -0,0 +1,21 @@ +use std::num::NonZeroU32; + +use crate::types::NonEmpty; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Fragment { + pub data: NonEmpty, + pub unused_bytes: u32, + pub total_bytes: NonZeroU32, +} + +impl Fragment { + pub fn used_bytes(&self) -> u32 { + self.total_bytes.get().saturating_sub(self.unused_bytes) + } + + pub fn utilization(&self) -> f64 { + self.total_bytes.get().saturating_sub(self.unused_bytes) as f64 + / self.total_bytes.get() as f64 + } +} diff --git a/packages/ports/src/types/non_empty.rs b/packages/ports/src/types/non_empty.rs new file mode 100644 index 00000000..ccd4578c --- /dev/null +++ b/packages/ports/src/types/non_empty.rs @@ -0,0 +1,34 @@ +pub use nonempty::{nonempty, NonEmpty}; + +pub trait CollectNonEmpty: Iterator { + fn collect_nonempty(self) -> Option> + where + Self: Sized, + { + NonEmpty::collect(self) + } +} +impl CollectNonEmpty for I {} + +pub trait TryCollectNonEmpty: Iterator> { + type Ok; + type Err; + + fn try_collect_nonempty(self) -> Result>, Self::Err> + where + Self: Sized, + Self::Err: std::error::Error, + { + let collected: Result, _> = self.collect(); + collected.map(NonEmpty::collect) + } +} + +impl TryCollectNonEmpty for I +where + I: Iterator>, + E: std::error::Error, +{ + type Ok = T; + type Err = E; +} diff --git a/packages/ports/src/types/serial_id.rs b/packages/ports/src/types/serial_id.rs new file mode 100644 index 00000000..04e194e1 --- /dev/null +++ b/packages/ports/src/types/serial_id.rs @@ -0,0 +1,90 @@ +use std::fmt::Display; + +#[derive(Debug, Clone)] +pub struct InvalidConversion { + pub message: String, +} + +impl std::fmt::Display for InvalidConversion { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Invalid numeric id: {}", self.message) + } +} + +impl std::error::Error for InvalidConversion {} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct NonNegative { + val: NUM, +} + +impl Display for NonNegative { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.val.fmt(f) + } +} + +impl NonNegative { + pub fn as_u32(&self) -> u32 { + self.val as u32 + } + + pub fn as_i32(&self) -> i32 { + self.val + } +} + +impl NonNegative { + pub fn as_u64(&self) -> u64 { + self.val as u64 + } + + pub fn as_i64(&self) -> i64 { + self.val + } +} + +impl From for NonNegative { + fn from(value: u32) -> Self { + Self { + val: i64::from(value), + } + } +} + +impl TryFrom for NonNegative { + type Error = InvalidConversion; + + fn try_from(value: i32) -> Result { + if value < 0 { + return Err(InvalidConversion { + message: format!("{value} is negative"), + }); + } + Ok(Self { val: value }) + } +} + +impl TryFrom for NonNegative { + type Error = InvalidConversion; + fn try_from(id: i64) -> Result { + if id < 0 { + return Err(InvalidConversion { + message: format!("{id} is negative"), + }); + } + Ok(Self { val: id }) + } +} + +impl TryFrom for NonNegative { + type Error = InvalidConversion; + fn try_from(id: u32) -> Result { + if id > i32::MAX as u32 { + return Err(InvalidConversion { + message: format!("{id} is too large for i32"), + }); + } + Ok(Self { val: id as i32 }) + } +} diff --git a/packages/ports/src/types/state_submission.rs b/packages/ports/src/types/state_submission.rs index 9c527a10..c592f97b 100644 --- a/packages/ports/src/types/state_submission.rs +++ b/packages/ports/src/types/state_submission.rs @@ -1,28 +1,18 @@ pub use sqlx::types::chrono::{DateTime, Utc}; +use super::NonNegative; + #[derive(Debug, Clone, PartialEq, Eq)] pub struct StateSubmission { - pub id: Option, + pub id: Option>, pub block_hash: [u8; 32], pub block_height: u32, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct StateFragment { - pub id: Option, - pub submission_id: Option, - pub fragment_idx: u32, pub data: Vec, - pub created_at: DateTime, -} - -impl StateFragment { - pub const MAX_FRAGMENT_SIZE: usize = 128 * 1024; } #[derive(Debug, Clone, PartialEq, Eq)] -pub struct SubmissionTx { - pub id: Option, +pub struct L1Tx { + pub id: Option, pub hash: [u8; 32], pub state: TransactionState, } @@ -30,30 +20,10 @@ pub struct SubmissionTx { #[derive(Debug, Clone, PartialEq, Eq)] pub enum TransactionState { Pending, - Finalized, + Finalized(DateTime), Failed, } -// Used for DB storage -impl TransactionState { - pub fn into_i16(&self) -> i16 { - match self { - TransactionState::Pending => 0, - TransactionState::Finalized => 1, - TransactionState::Failed => 2, - } - } - - pub fn from_i16(value: i16) -> Option { - match value { - 0 => Some(Self::Pending), - 1 => Some(Self::Finalized), - 2 => Some(Self::Failed), - _ => None, - } - } -} - pub struct TransactionResponse { block_number: u64, succeeded: bool, diff --git a/packages/services/Cargo.toml b/packages/services/Cargo.toml index 348f0175..6e1513ab 100644 --- a/packages/services/Cargo.toml +++ b/packages/services/Cargo.toml @@ -10,24 +10,36 @@ publish = { workspace = true } rust-version = { workspace = true } [dependencies] -async-trait = { workspace = true } +flate2 = { workspace = true, features = ["default"] } +fuel-crypto = { workspace = true } futures = { workspace = true } +hex = { workspace = true } itertools = { workspace = true, features = ["use_alloc"] } metrics = { workspace = true } +mockall = { workspace = true, optional = true } ports = { workspace = true, features = ["full"] } serde = { workspace = true } thiserror = { workspace = true } +tokio = { workspace = true } tokio-util = { workspace = true } tracing = { workspace = true } -hex = { workspace = true } -validator = { workspace = true } +bytesize = { workspace = true } +rayon = { workspace = true } +trait-variant = { workspace = true } [dev-dependencies] +clock = { workspace = true, features = ["test-helpers"] } +delegate = { workspace = true } +eth = { workspace = true, features = ["test-helpers"] } fuel-crypto = { workspace = true, features = ["random"] } mockall = { workspace = true } ports = { workspace = true, features = ["full", "test-helpers"] } -rand = { workspace = true } +pretty_assertions = { workspace = true, features = ["std"] } +rand = { workspace = true, features = ["small_rng"] } +services = { workspace = true, features = ["test-helpers"] } storage = { workspace = true, features = ["test-helpers"] } tai64 = { workspace = true } tokio = { workspace = true, features = ["macros"] } -validator = { workspace = true, features = ["test-helpers"] } + +[features] +test-helpers = ["dep:mockall"] diff --git a/packages/services/src/block_bundler.rs b/packages/services/src/block_bundler.rs new file mode 100644 index 00000000..89a1e5ff --- /dev/null +++ b/packages/services/src/block_bundler.rs @@ -0,0 +1,769 @@ +use std::{num::NonZeroUsize, time::Duration}; + +pub mod bundler; + +use bundler::{Bundle, BundleProposal, BundlerFactory}; +use ports::{ + clock::Clock, + storage::Storage, + types::{DateTime, Utc}, +}; +use tracing::info; + +use crate::{Error, Result, Runner}; + +#[derive(Debug, Clone, Copy)] +pub struct Config { + pub optimization_time_limit: Duration, + pub max_bundles_per_optimization_run: NonZeroUsize, + pub block_accumulation_time_limit: Duration, + pub num_blocks_to_accumulate: NonZeroUsize, + pub lookback_window: u32, +} + +#[cfg(test)] +impl Default for Config { + fn default() -> Self { + Self { + optimization_time_limit: Duration::from_secs(100), + block_accumulation_time_limit: Duration::from_secs(100), + num_blocks_to_accumulate: NonZeroUsize::new(1).unwrap(), + lookback_window: 1000, + max_bundles_per_optimization_run: 1.try_into().unwrap(), + } + } +} + +/// The `BlockBundler` bundles blocks and fragments them. Those fragments are later on submitted to +/// l1 by the [`crate::StateCommitter`] +pub struct BlockBundler { + fuel_api: F, + storage: Storage, + clock: Clock, + bundler_factory: BundlerFactory, + config: Config, + last_time_bundled: DateTime, +} + +impl BlockBundler +where + C: Clock, +{ + /// Creates a new `BlockBundler`. + pub fn new( + fuel_adapter: F, + storage: Storage, + clock: C, + bundler_factory: BF, + config: Config, + ) -> Self { + let now = clock.now(); + + Self { + fuel_api: fuel_adapter, + storage, + clock, + last_time_bundled: now, + bundler_factory, + config, + } + } +} + +impl BlockBundler +where + F: ports::fuel::Api, + Db: Storage, + C: Clock, + BF: BundlerFactory, +{ + async fn bundle_and_fragment_blocks(&mut self) -> Result<()> { + let starting_height = self.get_starting_height().await?; + let Some(blocks) = self + .storage + .lowest_sequence_of_unbundled_blocks( + starting_height, + self.config.num_blocks_to_accumulate.get(), + ) + .await? + else { + return Ok(()); + }; + + let still_time_to_accumulate_more = self.still_time_to_accumulate_more().await?; + if blocks.len() < self.config.num_blocks_to_accumulate && still_time_to_accumulate_more { + info!( + "Not enough blocks ({} < {}) to bundle. Waiting for more to accumulate.", + blocks.len(), + self.config.num_blocks_to_accumulate.get() + ); + + return Ok(()); + } + + if !still_time_to_accumulate_more { + info!("Accumulation time limit reached.",); + } + + info!("Giving {} blocks to the bundler", blocks.len()); + + let bundler = self.bundler_factory.build(blocks).await; + + let BundleProposal { + fragments, + metadata, + } = self.find_optimal_bundle(bundler).await?; + + info!("Bundler proposed: {metadata}"); + + self.storage + .insert_bundle_and_fragments(metadata.block_heights, fragments) + .await?; + + self.last_time_bundled = self.clock.now(); + + Ok(()) + } + + async fn get_starting_height(&self) -> Result { + let current_height = self.fuel_api.latest_height().await?; + let starting_height = current_height.saturating_sub(self.config.lookback_window); + Ok(starting_height) + } + + async fn find_optimal_bundle(&self, mut bundler: B) -> Result { + let optimization_start = self.clock.now(); + + while bundler + .advance(self.config.max_bundles_per_optimization_run) + .await? + { + if self.should_stop_optimizing(optimization_start)? { + info!("Optimization time limit reached! Finishing bundling."); + break; + } + } + + bundler.finish().await + } + + async fn still_time_to_accumulate_more(&self) -> Result { + let elapsed = self.elapsed(self.last_time_bundled)?; + + Ok(elapsed < self.config.block_accumulation_time_limit) + } + + fn elapsed(&self, point: DateTime) -> Result { + let now = self.clock.now(); + let elapsed = now + .signed_duration_since(point) + .to_std() + .map_err(|e| Error::Other(format!("could not calculate elapsed time: {e}")))?; + Ok(elapsed) + } + + fn should_stop_optimizing(&self, start_of_optimization: DateTime) -> Result { + let elapsed = self.elapsed(start_of_optimization)?; + + Ok(elapsed >= self.config.optimization_time_limit) + } +} + +impl Runner for BlockBundler +where + F: ports::fuel::Api + Send + Sync, + Db: Storage + Clone + Send + Sync, + C: Clock + Send + Sync, + BF: BundlerFactory + Send + Sync, +{ + async fn run(&mut self) -> Result<()> { + self.bundle_and_fragment_blocks().await?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use bundler::Metadata; + use clock::TestClock; + use eth::Eip4844BlobEncoder; + use itertools::Itertools; + use ports::{ + l1::FragmentEncoder, + storage::SequentialFuelBlocks, + types::{nonempty, CollectNonEmpty, Fragment, NonEmpty}, + }; + use tokio::sync::{ + mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, + Mutex, + }; + + use super::*; + use crate::{ + test_utils::{self, encode_and_merge, mocks, Blocks, ImportedBlocks}, + CompressionLevel, + }; + + struct ControllableBundler { + can_advance: UnboundedReceiver<()>, + notify_advanced: UnboundedSender<()>, + proposal: Option, + } + + impl ControllableBundler { + pub fn create( + proposal: Option, + ) -> (Self, UnboundedSender<()>, UnboundedReceiver<()>) { + let (send_can_advance, recv_can_advance) = unbounded_channel::<()>(); + let (notify_advanced, recv_advanced_notif) = unbounded_channel::<()>(); + ( + Self { + can_advance: recv_can_advance, + notify_advanced, + proposal, + }, + send_can_advance, + recv_advanced_notif, + ) + } + } + + impl Bundle for ControllableBundler { + async fn advance(&mut self, _: NonZeroUsize) -> Result { + self.can_advance.recv().await.unwrap(); + self.notify_advanced.send(()).unwrap(); + Ok(true) + } + + async fn finish(self) -> Result { + Ok(self.proposal.expect( + "proposal to be set inside controllable bundler if it ever was meant to finish", + )) + } + } + + struct ControllableBundlerFactory { + bundler: Mutex>, + } + + impl ControllableBundlerFactory { + pub fn setup( + proposal: Option, + ) -> (Self, UnboundedSender<()>, UnboundedReceiver<()>) { + let (bundler, send_can_advance, receive_advanced) = + ControllableBundler::create(proposal); + ( + Self { + bundler: Mutex::new(Some(bundler)), + }, + send_can_advance, + receive_advanced, + ) + } + } + + impl BundlerFactory for ControllableBundlerFactory { + type Bundler = ControllableBundler; + + async fn build(&self, _: SequentialFuelBlocks) -> Self::Bundler { + self.bundler.lock().await.take().unwrap() + } + } + + fn default_bundler_factory() -> bundler::Factory { + bundler::Factory::new( + Eip4844BlobEncoder, + CompressionLevel::Disabled, + 1.try_into().unwrap(), + ) + } + + #[tokio::test] + async fn does_nothing_if_not_enough_blocks() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + setup + .import_blocks(Blocks::WithHeights { + range: 0..=0, + tx_per_block: 1, + size_per_tx: 100, + }) + .await; + + let num_blocks_to_accumulate = 2.try_into().unwrap(); + + let mock_fuel_api = test_utils::mocks::fuel::latest_height_is(0); + + let mut block_bundler = BlockBundler::new( + mock_fuel_api, + setup.db(), + TestClock::default(), + default_bundler_factory(), + Config { + num_blocks_to_accumulate, + lookback_window: 0, // Adjust lookback_window as needed + ..Config::default() + }, + ); + + // when + block_bundler.run().await?; + + // then + assert!(setup + .db() + .oldest_nonfinalized_fragments(0, 1) + .await? + .is_empty()); + + Ok(()) + } + + #[tokio::test] + async fn stops_accumulating_blocks_if_time_runs_out_measured_from_component_creation( + ) -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + + let ImportedBlocks { + fuel_blocks: blocks, + .. + } = setup + .import_blocks(Blocks::WithHeights { + range: 0..=0, + tx_per_block: 1, + size_per_tx: 100, + }) + .await; + let data = encode_and_merge(blocks.clone()); + let expected_fragments = Eip4844BlobEncoder.encode(data).unwrap(); + + let clock = TestClock::default(); + + let latest_height = blocks.last().header.height; + let mock_fuel_api = test_utils::mocks::fuel::latest_height_is(latest_height); + + let mut block_bundler = BlockBundler::new( + mock_fuel_api, + setup.db(), + clock.clone(), + default_bundler_factory(), + Config { + block_accumulation_time_limit: Duration::from_secs(1), + num_blocks_to_accumulate: 2.try_into().unwrap(), + lookback_window: 0, + ..Default::default() + }, + ); + + clock.advance_time(Duration::from_secs(2)); + + // when + block_bundler.run().await?; + + // then + let fragments = setup + .db() + .oldest_nonfinalized_fragments(0, 1) + .await? + .into_iter() + .map(|f| f.fragment) + .collect_nonempty() + .unwrap(); + + assert_eq!(fragments, expected_fragments); + + assert!(setup + .db() + .lowest_sequence_of_unbundled_blocks(blocks.last().header.height, 1) + .await? + .is_none()); + + Ok(()) + } + + #[tokio::test] + async fn stops_accumulating_blocks_if_time_runs_out_measured_from_last_bundle_time( + ) -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + + let clock = TestClock::default(); + + let ImportedBlocks { fuel_blocks, .. } = setup + .import_blocks(Blocks::WithHeights { + range: 1..=3, + tx_per_block: 1, + size_per_tx: 100, + }) + .await; + + let mut block_bundler = BlockBundler::new( + mocks::fuel::latest_height_is(fuel_blocks.last().header.height), + setup.db(), + clock.clone(), + default_bundler_factory(), + Config { + block_accumulation_time_limit: Duration::from_secs(10), + num_blocks_to_accumulate: 2.try_into().unwrap(), + ..Default::default() + }, + ); + let fuel_blocks = Vec::from(fuel_blocks); + + block_bundler.run().await?; + clock.advance_time(Duration::from_secs(10)); + + // when + block_bundler.run().await?; + + // then + let first_bundle = + encode_and_merge(NonEmpty::from_vec(fuel_blocks[0..=1].to_vec()).unwrap()); + let first_bundle_fragments = Eip4844BlobEncoder.encode(first_bundle).unwrap(); + + let second_bundle = + encode_and_merge(NonEmpty::from_vec(fuel_blocks[2..=2].to_vec()).unwrap()); + let second_bundle_fragments = Eip4844BlobEncoder.encode(second_bundle).unwrap(); + + let unsubmitted_fragments = setup + .db() + .oldest_nonfinalized_fragments(0, 2) + .await? + .into_iter() + .map(|f| f.fragment.clone()) + .collect_nonempty() + .unwrap(); + + let expected_fragments = first_bundle_fragments + .into_iter() + .chain(second_bundle_fragments) + .collect_nonempty() + .unwrap(); + assert_eq!(unsubmitted_fragments, expected_fragments); + + Ok(()) + } + + #[tokio::test] + async fn doesnt_bundle_more_than_accumulation_blocks() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + + let ImportedBlocks { + fuel_blocks: blocks, + .. + } = setup + .import_blocks(Blocks::WithHeights { + range: 0..=2, + tx_per_block: 1, + size_per_tx: 100, + }) + .await; + + let first_two_blocks = blocks.iter().take(2).cloned().collect_nonempty().unwrap(); + let bundle_data = test_utils::encode_and_merge(first_two_blocks.clone()); + let fragments = Eip4844BlobEncoder.encode(bundle_data).unwrap(); + + let mut block_bundler = BlockBundler::new( + test_utils::mocks::fuel::latest_height_is(2), + setup.db(), + TestClock::default(), + default_bundler_factory(), + Config { + num_blocks_to_accumulate: 2.try_into().unwrap(), + ..Default::default() + }, + ); + + // when + block_bundler.run().await?; + + // then + let unsubmitted_fragments = setup + .db() + .oldest_nonfinalized_fragments(0, 10) + .await? + .into_iter() + .map(|f| f.fragment) + .collect_nonempty() + .unwrap(); + + assert_eq!(unsubmitted_fragments, fragments); + + Ok(()) + } + + #[tokio::test] + async fn doesnt_bundle_already_bundled_blocks() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + + let ImportedBlocks { + fuel_blocks: blocks, + .. + } = setup + .import_blocks(Blocks::WithHeights { + range: 0..=1, + tx_per_block: 1, + size_per_tx: 100, + }) + .await; + + let block_1 = nonempty![blocks.first().clone()]; + let bundle_1 = test_utils::encode_and_merge(block_1.clone()); + let fragments_1 = Eip4844BlobEncoder.encode(bundle_1).unwrap(); + + let block_2 = nonempty![blocks.last().clone()]; + let bundle_2 = test_utils::encode_and_merge(block_2.clone()); + let fragments_2 = Eip4844BlobEncoder.encode(bundle_2).unwrap(); + + let mut bundler = BlockBundler::new( + test_utils::mocks::fuel::latest_height_is(1), + setup.db(), + TestClock::default(), + default_bundler_factory(), + Config { + num_blocks_to_accumulate: 1.try_into().unwrap(), + ..Default::default() + }, + ); + + bundler.run().await?; + + // when + bundler.run().await?; + + // then + let unsubmitted_fragments = setup + .db() + .oldest_nonfinalized_fragments(0, usize::MAX) + .await?; + let fragments = unsubmitted_fragments + .iter() + .map(|f| f.fragment.clone()) + .collect::>(); + let all_fragments = fragments_1.into_iter().chain(fragments_2).collect_vec(); + assert_eq!(fragments, all_fragments); + + Ok(()) + } + + #[tokio::test] + async fn stops_advancing_if_optimization_time_ran_out() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + setup + .import_blocks(Blocks::WithHeights { + range: 0..=0, + tx_per_block: 1, + size_per_tx: 100, + }) + .await; + + let unoptimal_fragments = nonempty![Fragment { + data: test_utils::random_data(100usize), + unused_bytes: 1000, + total_bytes: 50.try_into().unwrap(), + }]; + + let unoptimal_bundle = BundleProposal { + fragments: unoptimal_fragments.clone(), + metadata: Metadata { + block_heights: 0..=0, + known_to_be_optimal: false, + gas_usage: 100, + optimization_attempts: 10, + compressed_data_size: 100.try_into().unwrap(), + uncompressed_data_size: 1000.try_into().unwrap(), + num_fragments: 1.try_into().unwrap(), + }, + }; + + let (bundler_factory, send_can_advance_permission, mut notify_has_advanced) = + ControllableBundlerFactory::setup(Some(unoptimal_bundle)); + + let test_clock = TestClock::default(); + + let optimization_timeout = Duration::from_secs(1); + + let mut block_bundler = BlockBundler::new( + test_utils::mocks::fuel::latest_height_is(0), + setup.db(), + test_clock.clone(), + bundler_factory, + Config { + optimization_time_limit: optimization_timeout, + ..Config::default() + }, + ); + + let block_bundler_handle = tokio::spawn(async move { + block_bundler.run().await.unwrap(); + }); + + // when + // Unblock the bundler + send_can_advance_permission.send(()).unwrap(); + notify_has_advanced.recv().await.unwrap(); + + // Advance the clock to exceed the optimization time limit + test_clock.advance_time(Duration::from_secs(1)); + + send_can_advance_permission.send(()).unwrap(); + + // then + // Wait for the BlockBundler task to complete + block_bundler_handle.await.unwrap(); + + Ok(()) + } + + #[tokio::test] + async fn doesnt_stop_advancing_if_there_is_still_time_to_optimize() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + setup + .import_blocks(Blocks::WithHeights { + range: 0..=0, + tx_per_block: 1, + size_per_tx: 100, + }) + .await; + + let (bundler_factory, send_can_advance, _notify_advanced) = + ControllableBundlerFactory::setup(None); + + // Create a TestClock + let test_clock = TestClock::default(); + + // Create the BlockBundler + let optimization_timeout = Duration::from_secs(1); + + let mut block_bundler = BlockBundler::new( + test_utils::mocks::fuel::latest_height_is(0), + setup.db(), + test_clock.clone(), + bundler_factory, + Config { + optimization_time_limit: optimization_timeout, + lookback_window: 0, + ..Config::default() + }, + ); + + // Spawn the BlockBundler run method in a separate task + let block_bundler_handle = tokio::spawn(async move { + block_bundler.run().await.unwrap(); + }); + + // Advance the clock but not beyond the optimization time limit + test_clock.advance_time(Duration::from_millis(500)); + + // when + for _ in 0..100 { + send_can_advance.send(()).unwrap(); + } + // then + let res = tokio::time::timeout(Duration::from_millis(500), block_bundler_handle).await; + + assert!(res.is_err(), "expected a timeout"); + + Ok(()) + } + + #[tokio::test] + async fn skips_blocks_outside_lookback_window() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + + let ImportedBlocks { + fuel_blocks: blocks, + .. + } = setup + .import_blocks(Blocks::WithHeights { + range: 0..=3, + tx_per_block: 1, + size_per_tx: 100, + }) + .await; + + let lookback_window = 2; + let latest_height = 5u32; + + let starting_height = latest_height.saturating_sub(lookback_window); + + let blocks_to_bundle: Vec<_> = blocks + .iter() + .filter(|block| block.header.height >= starting_height) + .cloned() + .collect(); + + assert_eq!( + blocks_to_bundle.len(), + 1, + "Expected only one block to be within the lookback window" + ); + assert_eq!( + blocks_to_bundle[0].header.height, 3, + "Expected block at height 3 to be within the lookback window" + ); + + // Encode the blocks to be bundled + let data = encode_and_merge(NonEmpty::from_vec(blocks_to_bundle.clone()).unwrap()); + let expected_fragments = Eip4844BlobEncoder.encode(data).unwrap(); + + let mut block_bundler = BlockBundler::new( + test_utils::mocks::fuel::latest_height_is(latest_height), + setup.db(), + TestClock::default(), + default_bundler_factory(), + Config { + num_blocks_to_accumulate: 1.try_into().unwrap(), + lookback_window, + ..Default::default() + }, + ); + + // when + block_bundler.run().await?; + + // then + let unsubmitted_fragments = setup + .db() + .oldest_nonfinalized_fragments(0, usize::MAX) + .await?; + let fragments = unsubmitted_fragments + .iter() + .map(|f| f.fragment.clone()) + .collect_nonempty() + .unwrap(); + + assert_eq!( + fragments, expected_fragments, + "Only blocks within the lookback window should be bundled" + ); + + // Ensure that blocks outside the lookback window are still unbundled + let unbundled_blocks = setup + .db() + .lowest_sequence_of_unbundled_blocks(0, 10) + .await? + .unwrap(); + + let unbundled_block_heights: Vec<_> = unbundled_blocks + .into_inner() + .iter() + .map(|b| b.height) + .collect(); + + assert_eq!( + unbundled_block_heights, + vec![0, 1, 2], + "Blocks outside the lookback window should remain unbundled" + ); + + Ok(()) + } +} diff --git a/packages/services/src/block_bundler/bundler.rs b/packages/services/src/block_bundler/bundler.rs new file mode 100644 index 00000000..f94ba2c7 --- /dev/null +++ b/packages/services/src/block_bundler/bundler.rs @@ -0,0 +1,641 @@ +use std::{ + cmp::min, collections::VecDeque, fmt::Display, io::Write, num::NonZeroUsize, + ops::RangeInclusive, str::FromStr, +}; + +use bytesize::ByteSize; +use flate2::{write::GzEncoder, Compression}; +use itertools::Itertools; +use ports::{ + l1::FragmentEncoder, + storage::SequentialFuelBlocks, + types::{CollectNonEmpty, Fragment, NonEmpty}, +}; +use rayon::prelude::*; + +use crate::Result; + +#[derive(Debug, Clone, Copy)] +struct Compressor { + compression: Option, +} + +#[derive(Debug, Clone, Copy)] +#[allow(dead_code)] +pub enum CompressionLevel { + Disabled, + Min, + Level1, + Level2, + Level3, + Level4, + Level5, + Level6, + Level7, + Level8, + Level9, + Max, +} + +impl<'a> serde::Deserialize<'a> for CompressionLevel { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'a>, + { + let as_string = String::deserialize(deserializer)?; + + CompressionLevel::from_str(&as_string) + .map_err(|e| serde::de::Error::custom(format!("Invalid compression level: {e}"))) + } +} + +impl FromStr for CompressionLevel { + type Err = crate::Error; + + fn from_str(s: &str) -> std::result::Result { + match s.to_lowercase().as_str() { + "disabled" => Ok(Self::Disabled), + "min" => Ok(Self::Min), + "level1" => Ok(Self::Level1), + "level2" => Ok(Self::Level2), + "level3" => Ok(Self::Level3), + "level4" => Ok(Self::Level4), + "level5" => Ok(Self::Level5), + "level6" => Ok(Self::Level6), + "level7" => Ok(Self::Level7), + "level8" => Ok(Self::Level8), + "level9" => Ok(Self::Level9), + "max" => Ok(Self::Max), + _ => Err(crate::Error::Other(format!( + "Invalid compression level: {s}" + ))), + } + } +} + +impl CompressionLevel { + pub fn levels() -> Vec { + vec![ + Self::Disabled, + Self::Min, + Self::Level1, + Self::Level2, + Self::Level3, + Self::Level4, + Self::Level5, + Self::Level6, + Self::Level7, + Self::Level8, + Self::Level9, + Self::Max, + ] + } +} + +impl Default for Compressor { + fn default() -> Self { + Self::new(CompressionLevel::Level6) + } +} + +impl Compressor { + #[cfg(test)] + pub fn no_compression() -> Self { + Self::new(CompressionLevel::Disabled) + } + + pub fn new(level: CompressionLevel) -> Self { + let level = match level { + CompressionLevel::Disabled => None, + CompressionLevel::Min => Some(0), + CompressionLevel::Level1 => Some(1), + CompressionLevel::Level2 => Some(2), + CompressionLevel::Level3 => Some(3), + CompressionLevel::Level4 => Some(4), + CompressionLevel::Level5 => Some(5), + CompressionLevel::Level6 => Some(6), + CompressionLevel::Level7 => Some(7), + CompressionLevel::Level8 => Some(8), + CompressionLevel::Level9 => Some(9), + CompressionLevel::Max => Some(10), + }; + + Self { + compression: level.map(Compression::new), + } + } + + pub fn compress(&self, data: NonEmpty) -> Result> { + let Some(level) = self.compression else { + return Ok(data); + }; + + let bytes = Vec::from(data); + + let mut encoder = GzEncoder::new(Vec::new(), level); + encoder + .write_all(&bytes) + .map_err(|e| crate::Error::Other(e.to_string()))?; + + encoder + .finish() + .map_err(|e| crate::Error::Other(e.to_string()))? + .into_iter() + .collect_nonempty() + .ok_or_else(|| crate::Error::Other("compression resulted in no data".to_string())) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct Metadata { + pub block_heights: RangeInclusive, + pub known_to_be_optimal: bool, + pub optimization_attempts: u64, + pub gas_usage: u64, + pub compressed_data_size: NonZeroUsize, + pub uncompressed_data_size: NonZeroUsize, + pub num_fragments: NonZeroUsize, +} + +impl Display for Metadata { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Metadata") + .field("num_blocks", &self.block_heights.clone().count()) + .field("block_heights", &self.block_heights) + .field("known_to_be_optimal", &self.known_to_be_optimal) + .field("optimization_attempts", &self.optimization_attempts) + .field("gas_usage", &self.gas_usage) + .field( + "compressed_data_size", + &ByteSize(self.compressed_data_size.get() as u64), + ) + .field( + "uncompressed_data_size", + &ByteSize(self.uncompressed_data_size.get() as u64), + ) + .field( + "compression_ratio", + &(self.uncompressed_data_size.get() as f64 + / self.compressed_data_size.get() as f64), + ) + .field("num_fragments", &self.num_fragments.get()) + .finish() + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct BundleProposal { + pub fragments: NonEmpty, + pub metadata: Metadata, +} + +#[trait_variant::make(Send)] +#[cfg_attr(feature = "test-helpers", mockall::automock)] +pub trait Bundle { + /// Attempts to advance the bundler by trying out a new bundle configuration. + /// + /// Returns `true` if there are more configurations to process, or `false` otherwise. + async fn advance(&mut self, num_concurrent: NonZeroUsize) -> Result; + + /// Finalizes the bundling process by selecting the best bundle based on current gas prices. + /// + /// Consumes the bundler. + async fn finish(self) -> Result; +} + +#[trait_variant::make(Send)] +pub trait BundlerFactory { + type Bundler: Bundle + Send + Sync; + async fn build(&self, blocks: SequentialFuelBlocks) -> Self::Bundler; +} + +pub struct Factory { + gas_calc: GasCalculator, + compression_level: CompressionLevel, + step_size: NonZeroUsize, +} + +impl Factory { + pub fn new(gas_calc: L1, compression_level: CompressionLevel, step_size: NonZeroUsize) -> Self { + Self { + gas_calc, + compression_level, + step_size, + } + } +} + +impl BundlerFactory for Factory +where + GasCalculator: ports::l1::FragmentEncoder + Clone + Send + Sync + 'static, +{ + type Bundler = Bundler; + + async fn build(&self, blocks: SequentialFuelBlocks) -> Self::Bundler { + Bundler::new( + self.gas_calc.clone(), + blocks, + Compressor::new(self.compression_level), + self.step_size, + ) + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +struct Proposal { + block_heights: RangeInclusive, + uncompressed_data_size: NonZeroUsize, + compressed_data: NonEmpty, + gas_usage: u64, +} + +impl Proposal { + fn gas_per_uncompressed_byte(&self) -> f64 { + self.gas_usage as f64 / self.uncompressed_data_size.get() as f64 + } +} + +#[derive(Debug, Clone)] +pub struct Bundler { + fragment_encoder: FragmentEncoder, + blocks: NonEmpty, + best_proposal: Option, + compressor: Compressor, + attempts: VecDeque, +} + +impl Bundler { + fn new( + cost_calculator: T, + blocks: SequentialFuelBlocks, + compressor: Compressor, + initial_step_size: NonZeroUsize, + ) -> Self { + let max_blocks = blocks.len(); + let initial_step = initial_step_size; + + let attempts = generate_attempts(max_blocks, initial_step); + + Self { + fragment_encoder: cost_calculator, + blocks: blocks.into_inner(), + best_proposal: None, + compressor, + attempts, + } + } + + fn save_if_best_so_far(&mut self, new_proposal: Proposal) { + match &mut self.best_proposal { + Some(best) + if new_proposal.gas_per_uncompressed_byte() < best.gas_per_uncompressed_byte() => + { + *best = new_proposal; + } + None => { + self.best_proposal = Some(new_proposal); + } + _ => {} + } + } + + fn blocks_for_new_proposal( + &self, + block_count: NonZeroUsize, + ) -> NonEmpty { + self.blocks + .iter() + .take(block_count.get()) + .cloned() + .collect_nonempty() + .expect("non-empty") + } + + fn blocks_bundles_for_analyzing( + &mut self, + num_concurrent: std::num::NonZero, + ) -> Vec> { + let mut blocks_for_attempts = vec![]; + + while !self.attempts.is_empty() && blocks_for_attempts.len() < num_concurrent.get() { + let block_count = self.attempts.pop_front().expect("not empty"); + let blocks = self.blocks_for_new_proposal(block_count); + blocks_for_attempts.push(blocks); + } + blocks_for_attempts + } + + async fn analyze(&mut self, num_concurrent: std::num::NonZero) -> Result> + where + T: ports::l1::FragmentEncoder + Send + Sync + Clone + 'static, + { + let blocks_for_analyzing = self.blocks_bundles_for_analyzing(num_concurrent); + + let compressor = self.compressor; + let fragment_encoder = self.fragment_encoder.clone(); + + // Needs to be wrapped in a blocking task to avoid blocking the executor + tokio::task::spawn_blocking(move || { + blocks_for_analyzing + .into_par_iter() + .map(|blocks| { + let fragment_encoder = fragment_encoder.clone(); + create_proposal(compressor, fragment_encoder, blocks) + }) + .collect::>>() + }) + .await + .map_err(|e| crate::Error::Other(e.to_string()))? + } +} + +impl Bundle for Bundler +where + T: ports::l1::FragmentEncoder + Send + Sync + Clone + 'static, +{ + async fn advance(&mut self, optimization_runs: NonZeroUsize) -> Result { + if self.attempts.is_empty() { + return Ok(false); + } + + for proposal in self.analyze(optimization_runs).await? { + self.save_if_best_so_far(proposal); + } + + Ok(!self.attempts.is_empty()) + } + + async fn finish(mut self) -> Result { + if self.best_proposal.is_none() { + self.advance(1.try_into().expect("not zero")).await?; + } + + let best_proposal = self + .best_proposal + .take() + .expect("advance should have set the best proposal"); + + let compressed_data_size = best_proposal.compressed_data.len_nonzero(); + let fragments = self + .fragment_encoder + .encode(best_proposal.compressed_data)?; + + let num_attempts = self + .blocks + .len() + .saturating_sub(self.attempts.len()) + .try_into() + .map_err(|_| crate::Error::Other("too many attempts".to_string()))?; + + Ok(BundleProposal { + metadata: Metadata { + block_heights: best_proposal.block_heights, + known_to_be_optimal: self.attempts.is_empty(), + uncompressed_data_size: best_proposal.uncompressed_data_size, + compressed_data_size, + gas_usage: best_proposal.gas_usage, + optimization_attempts: num_attempts, + num_fragments: fragments.len_nonzero(), + }, + fragments, + }) + } +} + +// The step sizes are progressively halved, starting from the largest step, in order to explore +// bundling opportunities more efficiently. Larger steps attempt to bundle more blocks together, +// which can result in significant gas savings or better compression ratios early on. +// By starting with the largest step, we cover more ground and are more likely to encounter +// major improvements quickly. As the step size decreases, the search becomes more fine-tuned, +// focusing on incremental changes. This ensures that even if we stop optimizing early, we will +// have tested configurations that likely provide substantial benefits to the overall gas per byte. +fn generate_attempts( + max_blocks: NonZeroUsize, + initial_step: NonZeroUsize, +) -> VecDeque { + std::iter::successors(Some(min(initial_step, max_blocks).get()), |&step| { + (step > 1).then_some(step / 2) + }) + .flat_map(|step| (1..=max_blocks.get()).rev().step_by(step)) + .filter_map(NonZeroUsize::new) + .unique() + .collect() +} + +fn merge_block_data(blocks: NonEmpty) -> NonEmpty { + blocks + .into_iter() + .flat_map(|b| b.data) + .collect_nonempty() + .expect("non-empty") +} + +fn create_proposal( + compressor: Compressor, + fragment_encoder: impl FragmentEncoder, + bundle_blocks: NonEmpty, +) -> Result { + let block_heights = bundle_blocks.first().height..=bundle_blocks.last().height; + + let uncompressed_data = merge_block_data(bundle_blocks); + let uncompressed_data_size = uncompressed_data.len_nonzero(); + + let compressed_data = compressor.compress(uncompressed_data)?; + + let gas_usage = fragment_encoder.gas_usage(compressed_data.len_nonzero()); + + Ok(Proposal { + uncompressed_data_size, + compressed_data, + gas_usage, + block_heights, + }) +} + +#[cfg(test)] +mod tests { + use std::num::NonZeroUsize; + + use eth::Eip4844BlobEncoder; + use fuel_crypto::SecretKey; + use ports::{l1::FragmentEncoder, types::nonempty}; + + use super::*; + use crate::test_utils::mocks::fuel::{generate_storage_block, generate_storage_block_sequence}; + + #[test] + fn can_disable_compression() { + // given + let compressor = Compressor::new(CompressionLevel::Disabled); + let data = nonempty!(1, 2, 3); + + // when + let compressed = compressor.compress(data.clone()).unwrap(); + + // then + assert_eq!(data, compressed); + } + + #[test] + fn all_compression_levels_work() { + let data = nonempty!(1, 2, 3); + for level in CompressionLevel::levels() { + let compressor = Compressor::new(level); + compressor.compress(data.clone()).unwrap(); + } + } + + #[tokio::test] + async fn finishing_will_advance_if_not_called_at_least_once() { + // given + let secret_key = SecretKey::random(&mut rand::thread_rng()); + let blocks = generate_storage_block_sequence(0..=10, &secret_key, 10, 100); + + let bundler = Bundler::new( + Eip4844BlobEncoder, + blocks.clone(), + Compressor::no_compression(), + NonZeroUsize::new(1).unwrap(), + ); + + // when + let bundle = bundler.finish().await.unwrap(); + + // then + let merged = blocks.into_inner().flat_map(|b| b.data.clone()); + let expected_fragments = Eip4844BlobEncoder.encode(merged).unwrap(); + assert!(!bundle.metadata.known_to_be_optimal); + assert_eq!(bundle.metadata.block_heights, 0..=10); + assert_eq!(bundle.fragments, expected_fragments); + } + + #[tokio::test] + async fn will_provide_a_suboptimal_bundle_if_not_advanced_enough() -> Result<()> { + // given + let secret_key = SecretKey::random(&mut rand::thread_rng()); + + let stops_at_blob_boundary = + generate_storage_block(0, &secret_key, 1, enough_bytes_to_almost_fill_a_blob()); + + let requires_new_blob_but_doesnt_utilize_it = + generate_storage_block(1, &secret_key, 1, enough_bytes_to_almost_fill_a_blob() / 3); + + let blocks: SequentialFuelBlocks = nonempty![ + stops_at_blob_boundary, + requires_new_blob_but_doesnt_utilize_it + ] + .try_into() + .unwrap(); + + let mut bundler = Bundler::new( + Eip4844BlobEncoder, + blocks.clone(), + Compressor::no_compression(), + NonZeroUsize::new(1).unwrap(), + ); + + bundler.advance(1.try_into().unwrap()).await?; + + let non_optimal_bundle = bundler.clone().finish().await?; + bundler.advance(1.try_into().unwrap()).await?; + + // when + let optimal_bundle = bundler.finish().await?; + + // then + // Non-optimal bundle should include both blocks + assert_eq!(non_optimal_bundle.metadata.block_heights, 0..=1); + assert!(!non_optimal_bundle.metadata.known_to_be_optimal); + + // Optimal bundle should include only the first block + assert_eq!(optimal_bundle.metadata.block_heights, 0..=0); + assert!(optimal_bundle.metadata.known_to_be_optimal); + + Ok(()) + } + + #[tokio::test] + async fn tolerates_step_too_large() -> Result<()> { + // given + let secret_key = SecretKey::random(&mut rand::thread_rng()); + + let blocks = generate_storage_block_sequence(0..=2, &secret_key, 3, 100); + + let step_size = NonZeroUsize::new(5).unwrap(); // Step size larger than number of blocks + + let mut bundler = Bundler::new( + Eip4844BlobEncoder, + blocks.clone(), + Compressor::no_compression(), + step_size, + ); + + while bundler.advance(1.try_into().unwrap()).await? {} + + // when + let bundle = bundler.finish().await?; + + // then + assert!(bundle.metadata.known_to_be_optimal); + assert_eq!(bundle.metadata.block_heights, 0..=2); + assert_eq!(bundle.metadata.optimization_attempts, 3); // 3 then 1 then 2 + Ok(()) + } + + // when the smaller bundle doesn't utilize the whole blob, for example + #[tokio::test] + async fn bigger_bundle_will_have_same_storage_gas_usage() -> Result<()> { + // given + let secret_key = SecretKey::random(&mut rand::thread_rng()); + + let blocks = nonempty![ + generate_storage_block(0, &secret_key, 0, 100), + generate_storage_block(1, &secret_key, 1, enough_bytes_to_almost_fill_a_blob()) + ]; + + let mut bundler = Bundler::new( + Eip4844BlobEncoder, + blocks.clone().try_into().unwrap(), + Compressor::no_compression(), + NonZeroUsize::new(1).unwrap(), // Default step size + ); + while bundler.advance(1.try_into().unwrap()).await? {} + + // when + let bundle = bundler.finish().await?; + + // then + assert!(bundle.metadata.known_to_be_optimal); + assert_eq!(bundle.metadata.block_heights, 0..=1); + Ok(()) + } + + fn enough_bytes_to_almost_fill_a_blob() -> usize { + let encoding_overhead = Eip4844BlobEncoder::FRAGMENT_SIZE as f64 * 0.04; + Eip4844BlobEncoder::FRAGMENT_SIZE - encoding_overhead as usize + } + #[test] + fn generates_steps_as_expected() { + // given + let max_steps = 100; + let max_step = 20; + + // when + let steps = generate_attempts( + NonZeroUsize::new(max_steps).unwrap(), + NonZeroUsize::new(max_step).unwrap(), + ); + + // then + let actual_steps = steps.into_iter().map(|s| s.get()).collect::>(); + let expected_steps = vec![ + 100, 80, 60, 40, 20, 90, 70, 50, 30, 10, 95, 85, 75, 65, 55, 45, 35, 25, 15, 5, 98, 96, + 94, 92, 88, 86, 84, 82, 78, 76, 74, 72, 68, 66, 64, 62, 58, 56, 54, 52, 48, 46, 44, 42, + 38, 36, 34, 32, 28, 26, 24, 22, 18, 16, 14, 12, 8, 6, 4, 2, 99, 97, 93, 91, 89, 87, 83, + 81, 79, 77, 73, 71, 69, 67, 63, 61, 59, 57, 53, 51, 49, 47, 43, 41, 39, 37, 33, 31, 29, + 27, 23, 21, 19, 17, 13, 11, 9, 7, 3, 1, + ]; + + assert_eq!(actual_steps, expected_steps); + } +} diff --git a/packages/services/src/block_committer.rs b/packages/services/src/block_committer.rs index 76cbeada..d2894783 100644 --- a/packages/services/src/block_committer.rs +++ b/packages/services/src/block_committer.rs @@ -1,19 +1,14 @@ use std::num::NonZeroU32; -use async_trait::async_trait; use metrics::{ prometheus::{core::Collector, IntGauge, Opts}, RegistersMetrics, }; -use ports::{ - storage::Storage, - types::{BlockSubmission, ValidatedFuelBlock}, -}; +use ports::{fuel::FuelBlock, storage::Storage, types::BlockSubmission}; use tracing::info; -use validator::Validator; use super::Runner; -use crate::{Error, Result}; +use crate::{validator::Validator, Error, Result}; pub struct BlockCommitter { l1_adapter: L1, @@ -74,12 +69,12 @@ where BlockValidator: Validator, Fuel: ports::fuel::Api, { - async fn submit_block(&self, fuel_block: ValidatedFuelBlock) -> Result<()> { + async fn submit_block(&self, fuel_block: FuelBlock) -> Result<()> { let submittal_height = self.l1_adapter.get_block_number().await?; let submission = BlockSubmission { - block_hash: fuel_block.hash(), - block_height: fuel_block.height(), + block_hash: *fuel_block.id, + block_height: fuel_block.header.height, submittal_height, completed: false, }; @@ -87,20 +82,26 @@ where self.storage.insert(submission).await?; // if we have a network failure the DB entry will be left at completed:false. - self.l1_adapter.submit(fuel_block).await?; + self.l1_adapter + .submit(*fuel_block.id, fuel_block.header.height) + .await?; Ok(()) } - async fn fetch_latest_block(&self) -> Result { + async fn fetch_latest_block(&self) -> Result { let latest_block = self.fuel_adapter.latest_block().await?; - let validated_block = self.block_validator.validate(&latest_block)?; + self.block_validator.validate( + latest_block.id, + &latest_block.header, + &latest_block.consensus, + )?; self.metrics .latest_fuel_block - .set(i64::from(validated_block.height())); + .set(i64::from(latest_block.header.height)); - Ok(validated_block) + Ok(latest_block) } async fn check_if_stale(&self, block_height: u32) -> Result { @@ -123,7 +124,7 @@ where .map(|submission| submission.block_height)) } - async fn fetch_block(&self, height: u32) -> Result { + async fn fetch_block(&self, height: u32) -> Result { let fuel_block = self .fuel_adapter .block_at_height(height) @@ -134,11 +135,12 @@ where )) })?; - Ok(self.block_validator.validate(&fuel_block)?) + self.block_validator + .validate(fuel_block.id, &fuel_block.header, &fuel_block.consensus)?; + Ok(fuel_block) } } -#[async_trait] impl Runner for BlockCommitter where L1: ports::l1::Contract + ports::l1::Api, @@ -148,19 +150,20 @@ where { async fn run(&mut self) -> Result<()> { let current_block = self.fetch_latest_block().await?; - let current_epoch_block_height = self.current_epoch_block_height(current_block.height()); + let current_epoch_block_height = + self.current_epoch_block_height(current_block.header.height); if self.check_if_stale(current_epoch_block_height).await? { return Ok(()); } - let block = if current_block.height() == current_epoch_block_height { + let block = if current_block.header.height == current_epoch_block_height { current_block } else { self.fetch_block(current_epoch_block_height).await? }; - self.submit_block(block) + self.submit_block(block.clone()) .await .map_err(|e| Error::Other(e.to_string()))?; info!("submitted {block:?}!"); @@ -171,82 +174,31 @@ where #[cfg(test)] mod tests { - use std::sync::Arc; use fuel_crypto::{Message, SecretKey, Signature}; use metrics::prometheus::{proto::Metric, Registry}; - use mockall::predicate::{self, eq}; - use ports::{ - fuel::{FuelBlock, FuelBlockId, FuelConsensus, FuelHeader, FuelPoAConsensus}, - l1::{Contract, EventStreamer, MockContract}, - types::{L1Height, TransactionResponse, U256}, - }; + use mockall::predicate::eq; + use ports::fuel::{FuelBlock, FuelBlockId, FuelConsensus, FuelHeader, FuelPoAConsensus}; use rand::{rngs::StdRng, Rng, SeedableRng}; - use storage::{Postgres, PostgresProcess}; - use validator::BlockValidator; + use storage::{DbWithProcess, PostgresProcess}; use super::*; + use crate::{test_utils::mocks::l1::FullL1Mock, validator::BlockValidator}; - struct MockL1 { - api: ports::l1::MockApi, - contract: MockContract, - } - impl MockL1 { - fn new() -> Self { - Self { - api: ports::l1::MockApi::new(), - contract: MockContract::new(), - } - } - } - - #[async_trait::async_trait] - impl Contract for MockL1 { - async fn submit(&self, block: ValidatedFuelBlock) -> ports::l1::Result<()> { - self.contract.submit(block).await - } - fn event_streamer(&self, height: L1Height) -> Box { - self.contract.event_streamer(height) - } - - fn commit_interval(&self) -> NonZeroU32 { - self.contract.commit_interval() - } - } - - #[async_trait::async_trait] - impl ports::l1::Api for MockL1 { - async fn submit_l2_state(&self, state_data: Vec) -> ports::l1::Result<[u8; 32]> { - self.api.submit_l2_state(state_data).await - } - - async fn get_block_number(&self) -> ports::l1::Result { - self.api.get_block_number().await - } - - async fn balance(&self) -> ports::l1::Result { - self.api.balance().await - } - - async fn get_transaction_response( - &self, - _tx_hash: [u8; 32], - ) -> ports::l1::Result> { - Ok(None) - } - } - - fn given_l1_that_expects_submission(block: ValidatedFuelBlock) -> MockL1 { - let mut l1 = MockL1::new(); + fn given_l1_that_expects_submission( + expected_hash: [u8; 32], + expected_height: u32, + ) -> FullL1Mock { + let mut l1 = FullL1Mock::default(); l1.contract .expect_submit() - .with(predicate::eq(block)) - .return_once(move |_| Ok(())); + .withf(move |hash, height| *hash == expected_hash && *height == expected_height) + .return_once(move |_, _| Box::pin(async { Ok(()) })); l1.api .expect_get_block_number() - .return_once(move || Ok(0u32.into())); + .return_once(move || Box::pin(async { Ok(0u32.into()) })); l1 } @@ -260,10 +212,8 @@ mod tests { let latest_block = given_a_block(5, &secret_key); let fuel_adapter = given_fetcher(vec![latest_block, missed_block.clone()]); - let validated_missed_block = ValidatedFuelBlock::new(*missed_block.id, 4); - let l1 = given_l1_that_expects_submission(validated_missed_block); - let process = PostgresProcess::shared().await.unwrap(); - let db = db_with_submissions(&process, vec![0, 2]).await; + let l1 = given_l1_that_expects_submission(*missed_block.id, 4); + let db = db_with_submissions(vec![0, 2]).await; let mut block_committer = BlockCommitter::new(l1, db, fuel_adapter, block_validator, 2.try_into().unwrap()); @@ -283,10 +233,9 @@ mod tests { let latest_block = given_a_block(5, &secret_key); let fuel_adapter = given_fetcher(vec![latest_block, missed_block]); - let process = PostgresProcess::shared().await.unwrap(); - let db = db_with_submissions(&process, vec![0, 2, 4]).await; + let db = db_with_submissions(vec![0, 2, 4]).await; - let mut l1 = MockL1::new(); + let mut l1 = FullL1Mock::default(); l1.contract.expect_submit().never(); let mut block_committer = @@ -307,10 +256,9 @@ mod tests { let latest_block = given_a_block(6, &secret_key); let fuel_adapter = given_fetcher(vec![latest_block]); - let process = PostgresProcess::shared().await.unwrap(); - let db = db_with_submissions(&process, vec![0, 2, 4, 6]).await; + let db = db_with_submissions(vec![0, 2, 4, 6]).await; - let mut l1 = MockL1::new(); + let mut l1 = FullL1Mock::default(); l1.contract.expect_submit().never(); let mut block_committer = @@ -331,9 +279,8 @@ mod tests { let block = given_a_block(4, &secret_key); let fuel_adapter = given_fetcher(vec![block.clone()]); - let process = PostgresProcess::shared().await.unwrap(); - let db = db_with_submissions(&process, vec![0, 2]).await; - let l1 = given_l1_that_expects_submission(ValidatedFuelBlock::new(*block.id, 4)); + let db = db_with_submissions(vec![0, 2]).await; + let l1 = given_l1_that_expects_submission(*block.id, 4); let mut block_committer = BlockCommitter::new(l1, db, fuel_adapter, block_validator, 2.try_into().unwrap()); @@ -352,10 +299,9 @@ mod tests { let block = given_a_block(5, &secret_key); let fuel_adapter = given_fetcher(vec![block]); - let process = PostgresProcess::shared().await.unwrap(); - let db = db_with_submissions(&process, vec![0, 2, 4]).await; + let db = db_with_submissions(vec![0, 2, 4]).await; - let mut l1 = MockL1::new(); + let mut l1 = FullL1Mock::default(); l1.contract.expect_submit().never(); let mut block_committer = @@ -379,11 +325,13 @@ mod tests { assert_eq!(latest_block_metric.get_value(), 5f64); } - async fn db_with_submissions( - process: &Arc, - pending_submissions: Vec, - ) -> Postgres { - let db = process.create_random_db().await.unwrap(); + async fn db_with_submissions(pending_submissions: Vec) -> DbWithProcess { + let db = PostgresProcess::shared() + .await + .unwrap() + .create_random_db() + .await + .unwrap(); for height in pending_submissions { db.insert(given_a_pending_submission(height)).await.unwrap(); } @@ -397,15 +345,19 @@ mod tests { fetcher .expect_block_at_height() .with(eq(block.header.height)) - .returning(move |_| Ok(Some(block.clone()))); + .returning(move |_| { + let block = block.clone(); + Box::pin(async move { Ok(Some(block)) }) + }); } if let Some(block) = available_blocks .into_iter() .max_by_key(|el| el.header.height) { - fetcher - .expect_latest_block() - .returning(move || Ok(block.clone())); + fetcher.expect_latest_block().returning(move || { + let block = block.clone(); + Box::pin(async { Ok(block) }) + }); } fetcher diff --git a/packages/services/src/block_importer.rs b/packages/services/src/block_importer.rs new file mode 100644 index 00000000..3c00e070 --- /dev/null +++ b/packages/services/src/block_importer.rs @@ -0,0 +1,500 @@ +use futures::TryStreamExt; +use itertools::chain; +use ports::{ + fuel::{Consensus, FuelPoAConsensus, FullFuelBlock, Genesis}, + storage::Storage, + types::{CollectNonEmpty, NonEmpty}, +}; +use tracing::info; + +use crate::{validator::Validator, Result, Runner}; + +/// The `BlockImporter` is responsible for importing blocks from the Fuel blockchain +/// into local storage. It fetches blocks from the Fuel API, validates them, +/// and stores them if they are not already present. +pub struct BlockImporter { + storage: Db, + fuel_api: FuelApi, + block_validator: BlockValidator, + lookback_window: u32, +} + +impl BlockImporter { + /// Creates a new `BlockImporter`. + pub fn new( + storage: Db, + fuel_api: FuelApi, + block_validator: BlockValidator, + lookback_window: u32, + ) -> Self { + Self { + storage, + fuel_api, + block_validator, + lookback_window, + } + } +} + +impl BlockImporter +where + Db: Storage, + FuelApi: ports::fuel::Api, + BlockValidator: Validator, +{ + async fn import_blocks(&self, blocks: NonEmpty) -> Result<()> { + let db_blocks = encode_blocks(blocks); + + let starting_height = db_blocks.first().height; + let ending_height = db_blocks.last().height; + + self.storage.insert_blocks(db_blocks).await?; + + info!("Imported blocks: {starting_height}..={ending_height}"); + + Ok(()) + } + + fn validate_blocks(&self, blocks: &[FullFuelBlock]) -> Result<()> { + for block in blocks { + self.block_validator + .validate(block.id, &block.header, &block.consensus)?; + } + + Ok(()) + } +} + +pub(crate) fn encode_blocks( + blocks: NonEmpty, +) -> NonEmpty { + blocks + .into_iter() + .map(|full_block| ports::storage::FuelBlock { + hash: *full_block.id, + height: full_block.header.height, + data: encode_block_data(full_block), + }) + .collect_nonempty() + .expect("should be non-empty") +} + +fn serialize_header(header: ports::fuel::FuelHeader) -> NonEmpty { + chain!( + *header.id, + header.da_height.to_be_bytes(), + header.consensus_parameters_version.to_be_bytes(), + header.state_transition_bytecode_version.to_be_bytes(), + header.transactions_count.to_be_bytes(), + header.message_receipt_count.to_be_bytes(), + *header.transactions_root, + *header.message_outbox_root, + *header.event_inbox_root, + header.height.to_be_bytes(), + *header.prev_root, + header.time.0.to_be_bytes(), + *header.application_hash, + ) + .collect_nonempty() + .expect("should be non-empty") +} + +fn serialize_consensus(consensus: Consensus) -> NonEmpty { + let mut buf = vec![]; + match consensus { + Consensus::Genesis(Genesis { + chain_config_hash, + coins_root, + contracts_root, + messages_root, + transactions_root, + }) => { + let variant = 0u8; + buf.extend(chain!( + variant.to_be_bytes(), + *chain_config_hash, + *coins_root, + *contracts_root, + *messages_root, + *transactions_root, + )); + } + Consensus::PoAConsensus(FuelPoAConsensus { signature }) => { + let variant = 1u8; + + buf.extend(chain!(variant.to_be_bytes(), *signature)); + } + Consensus::Unknown => { + let variant = 2u8; + buf.extend(variant.to_be_bytes()); + } + } + + NonEmpty::from_vec(buf).expect("should be non-empty") +} + +fn encode_block_data(block: FullFuelBlock) -> NonEmpty { + // We don't handle fwd/bwd compatibility, that should be handled once the DA compression on the core is incorporated + chain!( + serialize_header(block.header), + serialize_consensus(block.consensus), + block.raw_transactions.into_iter().flatten() + ) + .collect_nonempty() + .expect("should be non-empty") +} + +impl Runner for BlockImporter +where + Db: Storage + Send + Sync, + FuelApi: ports::fuel::Api + Send + Sync, + BlockValidator: Validator + Send + Sync, +{ + async fn run(&mut self) -> Result<()> { + let chain_height = self.fuel_api.latest_height().await?; + let starting_height = chain_height.saturating_sub(self.lookback_window); + + for range in self + .storage + .missing_blocks(starting_height, chain_height) + .await? + { + self.fuel_api + .full_blocks_in_height_range(range) + .map_err(crate::Error::from) + .try_for_each(|blocks| async { + self.validate_blocks(&blocks)?; + + if let Some(blocks) = NonEmpty::from_vec(blocks) { + self.import_blocks(blocks).await?; + } + + Ok(()) + }) + .await?; + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + + use fuel_crypto::SecretKey; + use futures::StreamExt; + use itertools::Itertools; + use mockall::{predicate::eq, Sequence}; + use ports::types::nonempty; + use rand::{rngs::StdRng, SeedableRng}; + + use super::*; + use crate::{ + test_utils::{self, Blocks, ImportedBlocks}, + BlockValidator, Error, + }; + + #[tokio::test] + async fn imports_first_block_when_db_is_empty() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + + let mut rng = StdRng::from_seed([0; 32]); + let secret_key = SecretKey::random(&mut rng); + let block = test_utils::mocks::fuel::generate_block(0, &secret_key, 1, 100); + + let fuel_mock = test_utils::mocks::fuel::these_blocks_exist(vec![block.clone()], true); + let block_validator = BlockValidator::new(*secret_key.public_key().hash()); + + let mut importer = BlockImporter::new(setup.db(), fuel_mock, block_validator, 0); + + // when + importer.run().await?; + + // then + let all_blocks = setup + .db() + .lowest_sequence_of_unbundled_blocks(0, 10) + .await? + .unwrap(); + + let expected_block = encode_blocks(nonempty![block]); + + assert_eq!(all_blocks.into_inner(), expected_block); + + Ok(()) + } + + #[tokio::test] + async fn wont_import_invalid_blocks() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + + let mut rng = StdRng::from_seed([0; 32]); + let correct_secret_key = SecretKey::random(&mut rng); + let block_validator = BlockValidator::new(*correct_secret_key.public_key().hash()); + + let incorrect_secret_key = SecretKey::random(&mut rng); + let block = test_utils::mocks::fuel::generate_block(0, &incorrect_secret_key, 1, 100); + + let fuel_mock = test_utils::mocks::fuel::these_blocks_exist(vec![block.clone()], true); + + let mut importer = BlockImporter::new(setup.db(), fuel_mock, block_validator, 0); + + // when + let result = importer.run().await; + + // then + let Err(Error::BlockValidation(msg)) = result else { + panic!("expected a validation error, got: {:?}", result); + }; + + assert_eq!( + msg, + r#"recovered producer addr `13d5eed3c6132bcf8dc2f92944d11fb3dc32df5ed183ab4716914eb21fd2b318` does not match expected addr`4747f47fb79e2b73b2f3c3ca1ea69d9b2b0caf8ac2d3480da6e750664f40914b`."# + ); + + Ok(()) + } + + #[tokio::test] + async fn does_not_request_or_import_blocks_already_in_db() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + + let ImportedBlocks { + fuel_blocks: existing_blocks, + secret_key, + .. + } = setup + .import_blocks(Blocks::WithHeights { + range: 0..=2, + tx_per_block: 1, + size_per_tx: 100, + }) + .await; + + let new_blocks = (3..=5) + .map(|height| test_utils::mocks::fuel::generate_block(height, &secret_key, 1, 100)) + .collect_vec(); + + let all_blocks = existing_blocks + .into_iter() + .chain(new_blocks.clone()) + .collect_nonempty() + .unwrap(); + + let fuel_mock = test_utils::mocks::fuel::these_blocks_exist(new_blocks.clone(), true); + let block_validator = BlockValidator::new(*secret_key.public_key().hash()); + + let mut importer = BlockImporter::new(setup.db(), fuel_mock, block_validator, 1000); + + // when + importer.run().await?; + + // then + let stored_blocks = setup + .db() + .lowest_sequence_of_unbundled_blocks(0, 100) + .await? + .unwrap(); + + let expected_blocks = encode_blocks(all_blocks); + + pretty_assertions::assert_eq!(stored_blocks.into_inner(), expected_blocks); + + Ok(()) + } + + #[tokio::test] + async fn respects_height_even_if_blocks_before_are_missing() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + + let ImportedBlocks { secret_key, .. } = setup + .import_blocks(Blocks::WithHeights { + range: 0..=2, + tx_per_block: 1, + size_per_tx: 100, + }) + .await; + + let starting_height = 8; + let new_blocks = (starting_height..=13) + .map(|height| test_utils::mocks::fuel::generate_block(height, &secret_key, 1, 100)) + .collect_nonempty() + .unwrap(); + + let fuel_mock = test_utils::mocks::fuel::these_blocks_exist(new_blocks.clone(), true); + let block_validator = BlockValidator::new(*secret_key.public_key().hash()); + + let mut importer = BlockImporter::new(setup.db(), fuel_mock, block_validator, 5); + + // when + importer.run().await?; + + // then + let stored_new_blocks = setup + .db() + .lowest_sequence_of_unbundled_blocks(starting_height, 100) + .await? + .unwrap(); + let expected_blocks = encode_blocks(new_blocks); + + pretty_assertions::assert_eq!(stored_new_blocks.into_inner(), expected_blocks); + + Ok(()) + } + + #[tokio::test] + async fn handles_chain_with_no_new_blocks() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + + let ImportedBlocks { + fuel_blocks, + storage_blocks, + secret_key, + .. + } = setup + .import_blocks(Blocks::WithHeights { + range: 0..=2, + tx_per_block: 1, + size_per_tx: 100, + }) + .await; + + let fuel_mock = test_utils::mocks::fuel::these_blocks_exist(fuel_blocks, true); + let block_validator = BlockValidator::new(*secret_key.public_key().hash()); + + let mut importer = BlockImporter::new(setup.db(), fuel_mock, block_validator, 0); + + // when + importer.run().await?; + + // then + // Database should remain unchanged + let stored_blocks = setup + .db() + .lowest_sequence_of_unbundled_blocks(0, 10) + .await? + .unwrap(); + + assert_eq!(stored_blocks.into_inner(), storage_blocks); + + Ok(()) + } + + #[tokio::test] + async fn skips_blocks_outside_lookback_window() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + let lookback_window = 2; + + let secret_key = SecretKey::random(&mut StdRng::from_seed([0; 32])); + let blocks_to_import = (3..=5) + .map(|height| test_utils::mocks::fuel::generate_block(height, &secret_key, 1, 100)); + + let fuel_mock = test_utils::mocks::fuel::these_blocks_exist(blocks_to_import, true); + let block_validator = BlockValidator::new(*secret_key.public_key().hash()); + + let mut importer = + BlockImporter::new(setup.db(), fuel_mock, block_validator, lookback_window); + + // when + importer.run().await?; + + // then + let unbundled_blocks = setup + .db() + .lowest_sequence_of_unbundled_blocks(0, 10) + .await? + .unwrap(); + + let unbundled_block_heights: Vec<_> = unbundled_blocks + .into_inner() + .iter() + .map(|b| b.height) + .collect(); + + assert_eq!( + unbundled_block_heights, + vec![3, 4, 5], + "Blocks outside the lookback window should remain unbundled" + ); + + Ok(()) + } + + #[tokio::test] + async fn fills_in_missing_blocks_inside_lookback_window() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; + + let secret_key = SecretKey::random(&mut StdRng::from_seed([0; 32])); + + for range in [(3..=10), (40..=60)] { + setup + .import_blocks(Blocks::WithHeights { + range, + tx_per_block: 1, + size_per_tx: 100, + }) + .await; + } + + let mut fuel_mock = ports::fuel::MockApi::new(); + + let mut sequence = Sequence::new(); + + for range in [0..=2, 11..=39, 61..=100] { + fuel_mock + .expect_full_blocks_in_height_range() + .with(eq(range)) + .once() + .in_sequence(&mut sequence) + .return_once(move |range| { + let blocks = range + .map(|height| { + test_utils::mocks::fuel::generate_block(height, &secret_key, 1, 100) + }) + .collect(); + + futures::stream::once(async { Ok(blocks) }).boxed() + }); + } + + fuel_mock + .expect_latest_height() + .once() + .return_once(|| Box::pin(async { Ok(100) })); + + let block_validator = BlockValidator::new(*secret_key.public_key().hash()); + + let mut importer = BlockImporter::new(setup.db(), fuel_mock, block_validator, 101); + + // when + importer.run().await?; + + // then + let unbundled_blocks = setup + .db() + .lowest_sequence_of_unbundled_blocks(0, 101) + .await? + .unwrap(); + + let unbundled_block_heights: Vec<_> = unbundled_blocks + .into_inner() + .iter() + .map(|b| b.height) + .collect(); + + assert_eq!( + unbundled_block_heights, + (0..=100).collect_vec(), + "Blocks outside the lookback window should remain unbundled" + ); + + Ok(()) + } +} diff --git a/packages/services/src/commit_listener.rs b/packages/services/src/commit_listener.rs index c68f82ea..40b32b72 100644 --- a/packages/services/src/commit_listener.rs +++ b/packages/services/src/commit_listener.rs @@ -1,4 +1,3 @@ -use async_trait::async_trait; use futures::{StreamExt, TryStreamExt}; use metrics::{ prometheus::{core::Collector, IntGauge, Opts}, @@ -69,7 +68,6 @@ where } } -#[async_trait] impl Runner for CommitListener where C: ports::l1::Contract, @@ -131,7 +129,7 @@ mod tests { types::{BlockSubmission, FuelBlockCommittedOnL1, L1Height, U256}, }; use rand::Rng; - use storage::{Postgres, PostgresProcess}; + use storage::{DbWithProcess, PostgresProcess}; use tokio_util::sync::CancellationToken; use crate::{CommitListener, Runner}; @@ -149,8 +147,7 @@ mod tests { let contract = given_contract_with_events(vec![block_hash], submission.submittal_height); - let process = PostgresProcess::shared().await.unwrap(); - let db = db_with_submission(&process, submission).await; + let db = db_with_submission(submission).await; let mut commit_listener = CommitListener::new(contract, db.clone(), CancellationToken::default()); @@ -177,8 +174,7 @@ mod tests { let contract = given_contract_with_events(vec![block_hash], submission.submittal_height); - let process = PostgresProcess::shared().await.unwrap(); - let db = db_with_submission(&process, submission).await; + let db = db_with_submission(submission).await; let mut commit_listener = CommitListener::new(contract, db, CancellationToken::default()); @@ -218,8 +214,7 @@ mod tests { incoming_block.submittal_height, ); - let process = PostgresProcess::shared().await.unwrap(); - let db = db_with_submission(&process, incoming_block.clone()).await; + let db = db_with_submission(incoming_block.clone()).await; let mut commit_listener = CommitListener::new(contract, db.clone(), CancellationToken::default()); @@ -238,11 +233,13 @@ mod tests { ); } - async fn db_with_submission( - process: &PostgresProcess, - submission: BlockSubmission, - ) -> Postgres { - let db = process.create_random_db().await.unwrap(); + async fn db_with_submission(submission: BlockSubmission) -> DbWithProcess { + let db = PostgresProcess::shared() + .await + .unwrap() + .create_random_db() + .await + .unwrap(); db.insert(submission).await.unwrap(); diff --git a/packages/services/src/lib.rs b/packages/services/src/lib.rs index ea4ab129..6f48bbba 100644 --- a/packages/services/src/lib.rs +++ b/packages/services/src/lib.rs @@ -1,20 +1,26 @@ -#![deny(unused_crate_dependencies)] +mod block_bundler; mod block_committer; +mod block_importer; mod commit_listener; mod health_reporter; mod state_committer; -mod state_importer; mod state_listener; mod status_reporter; +mod validator; mod wallet_balance_tracker; +pub use block_bundler::{ + bundler::{CompressionLevel, Factory as BundlerFactory}, + BlockBundler, Config as BlockBundlerConfig, +}; pub use block_committer::BlockCommitter; +pub use block_importer::BlockImporter; pub use commit_listener::CommitListener; pub use health_reporter::HealthReporter; -pub use state_committer::StateCommitter; -pub use state_importer::StateImporter; +pub use state_committer::{Config as StateCommitterConfig, StateCommitter}; pub use state_listener::StateListener; pub use status_reporter::StatusReporter; +pub use validator::BlockValidator; pub use wallet_balance_tracker::WalletBalanceTracker; #[derive(thiserror::Error, Debug)] @@ -63,7 +69,503 @@ impl From for Error { pub type Result = std::result::Result; -#[async_trait::async_trait] +#[trait_variant::make(Send)] pub trait Runner: Send + Sync { async fn run(&mut self) -> Result<()>; } + +#[cfg(test)] +pub(crate) mod test_utils { + + pub fn encode_and_merge(blocks: NonEmpty) -> NonEmpty { + block_importer::encode_blocks(blocks) + .into_iter() + .flat_map(|b| b.data) + .collect_nonempty() + .expect("is not empty") + } + + pub fn random_data(size: impl Into) -> NonEmpty { + let size = size.into(); + if size == 0 { + panic!("random data size must be greater than 0"); + } + + let mut buffer = vec![0; size]; + rand::thread_rng().fill_bytes(&mut buffer[..]); + NonEmpty::collect(buffer).expect("checked size, not empty") + } + + use std::{ops::RangeInclusive, time::Duration}; + + use clock::TestClock; + use eth::Eip4844BlobEncoder; + use fuel_crypto::SecretKey; + use mocks::l1::TxStatus; + use ports::{ + storage::Storage, + types::{CollectNonEmpty, DateTime, Fragment, NonEmpty, Utc}, + }; + use rand::RngCore; + use storage::{DbWithProcess, PostgresProcess}; + + use super::Runner; + use crate::{ + block_bundler::bundler::Factory, + block_importer::{self, encode_blocks}, + BlockBundler, BlockBundlerConfig, BlockImporter, BlockValidator, StateCommitter, + StateListener, + }; + + pub mod mocks { + pub mod l1 { + + use std::cmp::min; + + use delegate::delegate; + use mockall::{predicate::eq, Sequence}; + use ports::{ + l1::FragmentsSubmitted, + types::{Fragment, L1Height, NonEmpty, TransactionResponse, U256}, + }; + + pub struct FullL1Mock { + pub api: ports::l1::MockApi, + pub contract: ports::l1::MockContract, + } + + impl Default for FullL1Mock { + fn default() -> Self { + Self::new() + } + } + + impl FullL1Mock { + pub fn new() -> Self { + Self { + api: ports::l1::MockApi::new(), + contract: ports::l1::MockContract::new(), + } + } + } + + impl ports::l1::Contract for FullL1Mock { + delegate! { + to self.contract { + async fn submit(&self, hash: [u8;32], height: u32) -> ports::l1::Result<()>; + fn event_streamer(&self, height: L1Height) -> Box; + fn commit_interval(&self) -> std::num::NonZeroU32; + } + } + } + + impl ports::l1::Api for FullL1Mock { + delegate! { + to self.api { + async fn submit_state_fragments( + &self, + fragments: NonEmpty, + ) -> ports::l1::Result; + async fn get_block_number(&self) -> ports::l1::Result; + async fn balance(&self) -> ports::l1::Result; + async fn get_transaction_response(&self, tx_hash: [u8; 32]) -> ports::l1::Result>; + } + } + } + + pub enum TxStatus { + Success, + Failure, + } + + pub fn expects_state_submissions( + expectations: impl IntoIterator>, [u8; 32])>, + ) -> ports::l1::MockApi { + let mut sequence = Sequence::new(); + + let mut l1_mock = ports::l1::MockApi::new(); + + for (fragment, tx_id) in expectations { + l1_mock + .expect_submit_state_fragments() + .withf(move |data| { + if let Some(fragment) = &fragment { + data == fragment + } else { + true + } + }) + .once() + .return_once(move |fragments| { + Box::pin(async move { + Ok(FragmentsSubmitted { + tx: tx_id, + num_fragments: min(fragments.len(), 6).try_into().unwrap(), + }) + }) + }) + .in_sequence(&mut sequence); + } + + l1_mock + } + + pub fn txs_finished( + current_height: u32, + tx_height: u32, + statuses: impl IntoIterator, + ) -> ports::l1::MockApi { + let mut l1_mock = ports::l1::MockApi::new(); + + let height = L1Height::from(current_height); + l1_mock + .expect_get_block_number() + .returning(move || Box::pin(async move { Ok(height) })); + + for expectation in statuses { + let (tx_id, status) = expectation; + + let height = L1Height::from(tx_height); + l1_mock + .expect_get_transaction_response() + .with(eq(tx_id)) + .return_once(move |_| { + Box::pin(async move { + Ok(Some(TransactionResponse::new( + height.into(), + matches!(status, TxStatus::Success), + ))) + }) + }); + } + l1_mock + } + } + + pub mod fuel { + use std::{iter, ops::RangeInclusive}; + + use fuel_crypto::{Message, SecretKey, Signature}; + use futures::{stream, StreamExt}; + use itertools::Itertools; + use ports::{ + fuel::{FuelBlockId, FuelConsensus, FuelHeader, FuelPoAConsensus, FullFuelBlock}, + storage::SequentialFuelBlocks, + types::{nonempty, CollectNonEmpty, NonEmpty}, + }; + use rand::{RngCore, SeedableRng}; + + use crate::block_importer; + + pub fn generate_block( + height: u32, + secret_key: &SecretKey, + num_tx: usize, + tx_size: usize, + ) -> ports::fuel::FullFuelBlock { + let header = given_header(height); + + let mut hasher = fuel_crypto::Hasher::default(); + hasher.input(header.prev_root.as_ref()); + hasher.input(header.height.to_be_bytes()); + hasher.input(header.time.0.to_be_bytes()); + hasher.input(header.application_hash.as_ref()); + + let id = FuelBlockId::from(hasher.digest()); + let id_message = Message::from_bytes(*id); + let signature = Signature::sign(secret_key, &id_message); + + let mut small_rng = rand::rngs::SmallRng::from_seed([0; 32]); + let raw_transactions = std::iter::repeat_with(|| { + let mut buf = vec![0; tx_size]; + small_rng.fill_bytes(&mut buf); + NonEmpty::collect(buf).unwrap() + }) + .take(num_tx) + .collect::>(); + + FullFuelBlock { + id, + header, + consensus: FuelConsensus::PoAConsensus(FuelPoAConsensus { signature }), + raw_transactions, + } + } + + pub fn generate_storage_block_sequence( + heights: RangeInclusive, + secret_key: &SecretKey, + num_tx: usize, + tx_size: usize, + ) -> SequentialFuelBlocks { + heights + .map(|height| generate_storage_block(height, secret_key, num_tx, tx_size)) + .collect_nonempty() + .unwrap() + .try_into() + .unwrap() + } + + pub fn generate_storage_block( + height: u32, + secret_key: &SecretKey, + num_tx: usize, + tx_size: usize, + ) -> ports::storage::FuelBlock { + let block = generate_block(height, secret_key, num_tx, tx_size); + block_importer::encode_blocks(nonempty![block]) + .first() + .clone() + } + + fn given_header(height: u32) -> FuelHeader { + let application_hash = + "0x8b96f712e293e801d53da77113fec3676c01669c6ea05c6c92a5889fce5f649d" + .parse() + .unwrap(); + + ports::fuel::FuelHeader { + id: Default::default(), + da_height: Default::default(), + consensus_parameters_version: Default::default(), + state_transition_bytecode_version: Default::default(), + transactions_count: 1, + message_receipt_count: Default::default(), + transactions_root: Default::default(), + message_outbox_root: Default::default(), + event_inbox_root: Default::default(), + height, + prev_root: Default::default(), + time: tai64::Tai64(0), + application_hash, + } + } + + pub fn these_blocks_exist( + blocks: impl IntoIterator, + enforce_tight_range: bool, + ) -> ports::fuel::MockApi { + let mut fuel_mock = ports::fuel::MockApi::default(); + + let blocks = blocks + .into_iter() + .sorted_by_key(|b| b.header.height) + .collect::>(); + + let latest_block = blocks.last().expect("Must have at least one block").clone(); + + let lowest_height = blocks + .first() + .expect("Must have at least one block") + .header + .height; + let highest_height = latest_block.header.height; + + fuel_mock + .expect_latest_height() + .return_once(move || Box::pin(async move { Ok(highest_height) })); + + fuel_mock + .expect_full_blocks_in_height_range() + .returning(move |range| { + let expected_range = lowest_height..=highest_height; + if enforce_tight_range && range != expected_range { + panic!("range of requested blocks {range:?} is not as tight as expected: {expected_range:?}"); + } + + let blocks_batch = blocks + .iter() + .filter(move |b| range.contains(&b.header.height)) + .cloned().collect(); + + stream::iter(iter::once(Ok(blocks_batch))).boxed() + }); + + fuel_mock + } + + pub fn latest_height_is(height: u32) -> ports::fuel::MockApi { + let mut fuel_mock = ports::fuel::MockApi::default(); + fuel_mock + .expect_latest_height() + .returning(move || Box::pin(async move { Ok(height) })); + fuel_mock + } + } + } + + #[derive(Debug)] + pub struct ImportedBlocks { + pub fuel_blocks: NonEmpty, + pub storage_blocks: NonEmpty, + pub secret_key: SecretKey, + } + + pub struct Setup { + db: DbWithProcess, + } + + impl Setup { + pub async fn send_fragments(&self, eth_tx: [u8; 32]) { + StateCommitter::new( + mocks::l1::expects_state_submissions(vec![(None, eth_tx)]), + mocks::fuel::latest_height_is(0), + self.db(), + crate::StateCommitterConfig::default(), + TestClock::default(), + ) + .run() + .await + .unwrap(); + } + + pub async fn init() -> Self { + let db = PostgresProcess::shared() + .await + .unwrap() + .create_random_db() + .await + .unwrap(); + Self { db } + } + + pub fn db(&self) -> DbWithProcess { + self.db.clone() + } + + pub async fn commit_single_block_bundle(&self, finalization_time: DateTime) { + self.insert_fragments(0, 6).await; + + let clock = TestClock::default(); + clock.set_time(finalization_time); + + let tx = [1; 32]; + let l1_mock = mocks::l1::expects_state_submissions(vec![(None, tx)]); + let fuel_mock = mocks::fuel::latest_height_is(0); + let mut committer = StateCommitter::new( + l1_mock, + fuel_mock, + self.db(), + crate::StateCommitterConfig::default(), + TestClock::default(), + ); + committer.run().await.unwrap(); + + let l1_mock = mocks::l1::txs_finished(0, 0, [(tx, TxStatus::Success)]); + + StateListener::new(l1_mock, self.db(), 0, clock.clone()) + .run() + .await + .unwrap(); + } + + pub async fn insert_fragments(&self, height: u32, amount: usize) -> Vec { + let max_per_blob = (Eip4844BlobEncoder::FRAGMENT_SIZE as f64 * 0.96) as usize; + let ImportedBlocks { fuel_blocks, .. } = self + .import_blocks(Blocks::WithHeights { + range: height..=height, + tx_per_block: amount, + size_per_tx: max_per_blob, + }) + .await; + + let factory = Factory::new( + Eip4844BlobEncoder, + crate::CompressionLevel::Level6, + 1.try_into().unwrap(), + ); + + let mut fuel_api = ports::fuel::MockApi::new(); + let latest_height = fuel_blocks.last().header.height; + fuel_api + .expect_latest_height() + .returning(move || Box::pin(async move { Ok(latest_height) })); + + let mut bundler = BlockBundler::new( + fuel_api, + self.db(), + TestClock::default(), + factory, + BlockBundlerConfig { + optimization_time_limit: Duration::ZERO, + block_accumulation_time_limit: Duration::ZERO, + num_blocks_to_accumulate: 1.try_into().unwrap(), + lookback_window: 100, + ..Default::default() + }, + ); + + bundler.run().await.unwrap(); + + let fragments = self + .db + .oldest_nonfinalized_fragments(0, amount) + .await + .unwrap(); + assert_eq!(fragments.len(), amount); + + fragments.into_iter().map(|f| f.fragment).collect() + } + + pub async fn import_blocks(&self, blocks: Blocks) -> ImportedBlocks { + let (mut block_importer, blocks) = self.block_importer(blocks); + + block_importer.run().await.unwrap(); + + blocks + } + + pub fn block_importer( + &self, + blocks: Blocks, + ) -> ( + BlockImporter, + ImportedBlocks, + ) { + match blocks { + Blocks::WithHeights { + range, + tx_per_block, + size_per_tx, + } => { + let secret_key = SecretKey::random(&mut rand::thread_rng()); + + let block_validator = BlockValidator::new(*secret_key.public_key().hash()); + + let fuel_blocks = range + .map(|height| { + mocks::fuel::generate_block( + height, + &secret_key, + tx_per_block, + size_per_tx, + ) + }) + .collect_nonempty() + .unwrap(); + + let storage_blocks = encode_blocks(fuel_blocks.clone()); + + let mock = mocks::fuel::these_blocks_exist(fuel_blocks.clone(), false); + + ( + BlockImporter::new(self.db(), mock, block_validator, 1000), + ImportedBlocks { + fuel_blocks, + secret_key, + storage_blocks, + }, + ) + } + } + } + } + + pub enum Blocks { + WithHeights { + range: RangeInclusive, + tx_per_block: usize, + size_per_tx: usize, + }, + } +} diff --git a/packages/services/src/state_committer.rs b/packages/services/src/state_committer.rs index a1d7f9ef..c1ddd9ff 100644 --- a/packages/services/src/state_committer.rs +++ b/packages/services/src/state_committer.rs @@ -1,75 +1,192 @@ -use async_trait::async_trait; -use ports::storage::Storage; +use std::{num::NonZeroUsize, time::Duration}; + +use itertools::Itertools; +use ports::{ + clock::Clock, + storage::{BundleFragment, Storage}, + types::{CollectNonEmpty, DateTime, NonEmpty, Utc}, +}; use tracing::info; use crate::{Result, Runner}; -pub struct StateCommitter { +// src/config.rs +#[derive(Debug, Clone)] +pub struct Config { + /// The lookback window in blocks to determine the starting height. + pub lookback_window: u32, + pub fragment_accumulation_timeout: Duration, + pub fragments_to_accumulate: NonZeroUsize, +} + +#[cfg(test)] +impl Default for Config { + fn default() -> Self { + Self { + lookback_window: 1000, + fragment_accumulation_timeout: Duration::from_secs(0), + fragments_to_accumulate: 1.try_into().unwrap(), + } + } +} + +/// The `StateCommitter` is responsible for committing state fragments to L1. +pub struct StateCommitter { l1_adapter: L1, - storage: Db, + fuel_api: F, + storage: Storage, + config: Config, + clock: C, + startup_time: DateTime, } -impl StateCommitter { - pub fn new(l1: L1, storage: Db) -> Self { +impl StateCommitter +where + C: Clock, +{ + /// Creates a new `StateCommitter`. + pub fn new(l1_adapter: L1, fuel_api: F, storage: S, config: Config, clock: C) -> Self { + let startup_time = clock.now(); Self { - l1_adapter: l1, + l1_adapter, + fuel_api, storage, + config, + clock, + startup_time, } } } -impl StateCommitter +impl StateCommitter where L1: ports::l1::Api, + F: ports::fuel::Api, Db: Storage, + C: Clock, { - async fn prepare_fragments(&self) -> Result<(Vec, Vec)> { - let fragments = self.storage.get_unsubmitted_fragments().await?; - - let num_fragments = fragments.len(); - let mut fragment_ids = Vec::with_capacity(num_fragments); - let mut data = Vec::with_capacity(num_fragments); - for fragment in fragments { - fragment_ids.push(fragment.id.expect("fragments from DB must have `id`")); - data.extend(fragment.data); - } + async fn get_reference_time(&self) -> Result> { + Ok(self + .storage + .last_time_a_fragment_was_finalized() + .await? + .unwrap_or(self.startup_time)) + } - Ok((fragment_ids, data)) + async fn is_timeout_expired(&self) -> Result { + let reference_time = self.get_reference_time().await?; + let elapsed = self.clock.now() - reference_time; + let std_elapsed = elapsed + .to_std() + .map_err(|e| crate::Error::Other(format!("Failed to convert time: {}", e)))?; + Ok(std_elapsed >= self.config.fragment_accumulation_timeout) } - async fn submit_state(&self) -> Result<()> { - let (fragment_ids, data) = self.prepare_fragments().await?; - if fragment_ids.is_empty() { - return Ok(()); + async fn submit_fragments(&self, fragments: NonEmpty) -> Result<()> { + info!("about to send at most {} fragments", fragments.len()); + + let data = fragments.clone().map(|f| f.fragment); + + match self.l1_adapter.submit_state_fragments(data).await { + Ok(submittal_report) => { + let fragment_ids = fragments + .iter() + .map(|f| f.id) + .take(submittal_report.num_fragments.get()) + .collect_nonempty() + .expect("non-empty vec"); + + let ids = fragment_ids + .iter() + .map(|id| id.as_u32().to_string()) + .join(", "); + + self.storage + .record_pending_tx(submittal_report.tx, fragment_ids) + .await?; + + tracing::info!( + "Submitted fragments {ids} with tx {}", + hex::encode(submittal_report.tx) + ); + Ok(()) + } + Err(e) => { + let ids = fragments + .iter() + .map(|f| f.id.as_u32().to_string()) + .join(", "); + + tracing::error!("Failed to submit fragments {ids}: {e}"); + Err(e.into()) + } } + } + + async fn has_pending_transactions(&self) -> Result { + self.storage.has_pending_txs().await.map_err(|e| e.into()) + } + + async fn next_fragments_to_submit(&self) -> Result>> { + let latest_height = self.fuel_api.latest_height().await?; + let starting_height = latest_height.saturating_sub(self.config.lookback_window); - let tx_hash = self.l1_adapter.submit_l2_state(data).await?; - self.storage - .record_pending_tx(tx_hash, fragment_ids) + // although we shouldn't know at this layer how many fragments the L1 can accept, we ignore + // this for now and put the eth value of max blobs per block (6). + let existing_fragments = self + .storage + .oldest_nonfinalized_fragments(starting_height, 6) .await?; - info!("submitted blob tx {}", hex::encode(tx_hash)); + Ok(NonEmpty::collect(existing_fragments)) + } - Ok(()) + async fn should_submit_fragments(&self, fragment_count: NonZeroUsize) -> Result { + if fragment_count >= self.config.fragments_to_accumulate { + return Ok(true); + } + info!( + "have only {} out of the target {} fragments per tx", + fragment_count, self.config.fragments_to_accumulate + ); + + let expired = self.is_timeout_expired().await?; + if expired { + info!( + "fragment accumulation timeout expired, proceeding with {} fragments", + fragment_count + ); + } + + Ok(expired) } - async fn is_tx_pending(&self) -> Result { - self.storage.has_pending_txs().await.map_err(|e| e.into()) + async fn submit_fragments_if_ready(&self) -> Result<()> { + if let Some(fragments) = self.next_fragments_to_submit().await? { + if self + .should_submit_fragments(fragments.len_nonzero()) + .await? + { + self.submit_fragments(fragments).await?; + } + } + Ok(()) } } -#[async_trait] -impl Runner for StateCommitter +impl Runner for StateCommitter where + F: ports::fuel::Api + Send + Sync, L1: ports::l1::Api + Send + Sync, - Db: Storage, + Db: Storage + Clone + Send + Sync, + C: Clock + Send + Sync, { async fn run(&mut self) -> Result<()> { - if self.is_tx_pending().await? { + if self.has_pending_transactions().await? { return Ok(()); - }; + } - self.submit_state().await?; + self.submit_fragments_if_ready().await?; Ok(()) } @@ -77,90 +194,230 @@ where #[cfg(test)] mod tests { - use mockall::predicate; - use ports::types::{L1Height, StateFragment, StateSubmission, TransactionResponse, U256}; - use storage::PostgresProcess; + use std::time::Duration; + + use clock::TestClock; use super::*; + use crate::{test_utils, Runner, StateCommitter}; - struct MockL1 { - api: ports::l1::MockApi, - } - impl MockL1 { - fn new() -> Self { - Self { - api: ports::l1::MockApi::new(), - } - } - } + #[tokio::test] + async fn submits_fragments_when_required_count_accumulated() -> Result<()> { + // given + let setup = test_utils::Setup::init().await; - #[async_trait::async_trait] - impl ports::l1::Api for MockL1 { - async fn submit_l2_state(&self, state_data: Vec) -> ports::l1::Result<[u8; 32]> { - self.api.submit_l2_state(state_data).await - } + let fragments = setup.insert_fragments(0, 4).await; - async fn get_block_number(&self) -> ports::l1::Result { - Ok(0.into()) - } + let tx_hash = [0; 32]; + let l1_mock_submit = test_utils::mocks::l1::expects_state_submissions([( + Some(NonEmpty::from_vec(fragments.clone()).unwrap()), + tx_hash, + )]); - async fn balance(&self) -> ports::l1::Result { - Ok(U256::ZERO) - } + let fuel_mock = test_utils::mocks::fuel::latest_height_is(0); + let mut state_committer = StateCommitter::new( + l1_mock_submit, + fuel_mock, + setup.db(), + Config { + lookback_window: 1000, + fragment_accumulation_timeout: Duration::from_secs(60), + fragments_to_accumulate: 4.try_into().unwrap(), + }, + TestClock::default(), + ); - async fn get_transaction_response( - &self, - _tx_hash: [u8; 32], - ) -> ports::l1::Result> { - Ok(None) - } + // when + state_committer.run().await?; + + // then + // Mocks validate that the fragments have been sent + Ok(()) } - fn given_l1_that_expects_submission(fragment: StateFragment) -> MockL1 { - let mut l1 = MockL1::new(); + #[tokio::test] + async fn submits_fragments_on_timeout_before_accumulation() -> Result<()> { + // given + let clock = TestClock::default(); + let setup = test_utils::Setup::init().await; + + let fragments = setup.insert_fragments(0, 5).await; // Only 5 fragments, less than required + + let tx_hash = [1; 32]; + let l1_mock_submit = test_utils::mocks::l1::expects_state_submissions([( + Some(NonEmpty::from_vec(fragments.clone()).unwrap()), + tx_hash, + )]); + + let fuel_mock = test_utils::mocks::fuel::latest_height_is(0); + let mut state_committer = StateCommitter::new( + l1_mock_submit, + fuel_mock, + setup.db(), + Config { + lookback_window: 1000, + fragment_accumulation_timeout: Duration::from_secs(60), + fragments_to_accumulate: 10.try_into().unwrap(), + }, + clock.clone(), + ); + + // Advance time beyond the timeout + clock.advance_time(Duration::from_secs(61)); - l1.api - .expect_submit_l2_state() - .with(predicate::eq(fragment.data)) - .return_once(move |_| Ok([1u8; 32])); + // when + state_committer.run().await?; - l1 + // then + // Mocks validate that the fragments have been sent despite insufficient accumulation + Ok(()) } - fn given_state() -> (StateSubmission, StateFragment) { - ( - StateSubmission { - id: None, - block_hash: [0u8; 32], - block_height: 1, + #[tokio::test] + async fn does_not_submit_fragments_before_required_count_or_timeout() -> Result<()> { + // given + let clock = TestClock::default(); + let setup = test_utils::Setup::init().await; + + let _fragments = setup.insert_fragments(0, 5).await; // Only 5 fragments, less than required + + let l1_mock_submit = test_utils::mocks::l1::expects_state_submissions([]); // Expect no submissions + + let fuel_mock = test_utils::mocks::fuel::latest_height_is(0); + let mut state_committer = StateCommitter::new( + l1_mock_submit, + fuel_mock, + setup.db(), + Config { + lookback_window: 1000, + fragment_accumulation_timeout: Duration::from_secs(60), + fragments_to_accumulate: 10.try_into().unwrap(), }, - StateFragment { - id: None, - submission_id: None, - fragment_idx: 0, - data: vec![1, 2, 3], - created_at: ports::types::Utc::now(), + clock.clone(), + ); + + // Advance time less than the timeout + clock.advance_time(Duration::from_secs(30)); + + // when + state_committer.run().await?; + + // then + // Mocks validate that no fragments have been sent + Ok(()) + } + + #[tokio::test] + async fn submits_fragments_when_required_count_before_timeout() -> Result<()> { + // given + let clock = TestClock::default(); + let setup = test_utils::Setup::init().await; + + let fragments = setup.insert_fragments(0, 5).await; + + let tx_hash = [3; 32]; + let l1_mock_submit = test_utils::mocks::l1::expects_state_submissions([( + Some(NonEmpty::from_vec(fragments).unwrap()), + tx_hash, + )]); + + let fuel_mock = test_utils::mocks::fuel::latest_height_is(0); + let mut state_committer = StateCommitter::new( + l1_mock_submit, + fuel_mock, + setup.db(), + Config { + lookback_window: 1000, + fragment_accumulation_timeout: Duration::from_secs(60), + fragments_to_accumulate: 5.try_into().unwrap(), }, - ) + clock.clone(), + ); + + // when + state_committer.run().await?; + + // then + // Mocks validate that the fragments have been sent + Ok(()) } #[tokio::test] - async fn test_submit_state() -> Result<()> { + async fn timeout_measured_from_last_finalized_fragment() -> Result<()> { // given - let (state, fragment) = given_state(); - let l1_mock = given_l1_that_expects_submission(fragment.clone()); + let clock = TestClock::default(); + let setup = test_utils::Setup::init().await; + + // Insert initial fragments + setup.commit_single_block_bundle(clock.now()).await; + + let fragments_to_submit = setup.insert_fragments(1, 2).await; + + let tx_hash = [4; 32]; + let l1_mock_submit = test_utils::mocks::l1::expects_state_submissions([( + Some(NonEmpty::from_vec(fragments_to_submit).unwrap()), + tx_hash, + )]); + + let fuel_mock = test_utils::mocks::fuel::latest_height_is(1); + let mut state_committer = StateCommitter::new( + l1_mock_submit, + fuel_mock, + setup.db(), + Config { + lookback_window: 1000, + fragment_accumulation_timeout: Duration::from_secs(60), + fragments_to_accumulate: 10.try_into().unwrap(), + }, + clock.clone(), + ); - let process = PostgresProcess::shared().await.unwrap(); - let db = process.create_random_db().await?; - db.insert_state_submission(state, vec![fragment]).await?; - let mut committer = StateCommitter::new(l1_mock, db.clone()); + // Advance time to exceed the timeout since last finalized fragment + clock.advance_time(Duration::from_secs(60)); // when - committer.run().await.unwrap(); + state_committer.run().await?; // then - assert!(db.has_pending_txs().await?); + // Mocks validate that the fragments were sent even though the accumulation target was not reached + Ok(()) + } + + #[tokio::test] + async fn timeout_measured_from_startup_if_no_finalized_fragment() -> Result<()> { + // given + let clock = TestClock::default(); + let setup = test_utils::Setup::init().await; + + let fragments = setup.insert_fragments(0, 5).await; // Only 5 fragments, less than required + + let tx_hash = [5; 32]; + let l1_mock_submit = test_utils::mocks::l1::expects_state_submissions([( + Some(NonEmpty::from_vec(fragments.clone()).unwrap()), + tx_hash, + )]); + let fuel_mock = test_utils::mocks::fuel::latest_height_is(0); + let mut state_committer = StateCommitter::new( + l1_mock_submit, + fuel_mock, + setup.db(), + Config { + lookback_window: 1000, + fragment_accumulation_timeout: Duration::from_secs(60), + fragments_to_accumulate: 10.try_into().unwrap(), + }, + clock.clone(), + ); + + // Advance time beyond the timeout from startup + clock.advance_time(Duration::from_secs(61)); + + // when + state_committer.run().await?; + + // then + // Mocks validate that the fragments have been sent despite insufficient accumulation Ok(()) } } diff --git a/packages/services/src/state_importer.rs b/packages/services/src/state_importer.rs deleted file mode 100644 index 3fb707ce..00000000 --- a/packages/services/src/state_importer.rs +++ /dev/null @@ -1,223 +0,0 @@ -use async_trait::async_trait; -use ports::{ - fuel::FuelBlock, - storage::Storage, - types::{StateFragment, StateSubmission}, -}; -use tracing::info; -use validator::Validator; - -use crate::{Result, Runner}; - -pub struct StateImporter { - storage: Db, - fuel_adapter: A, - block_validator: BlockValidator, -} - -impl StateImporter { - pub fn new(storage: Db, fuel_adapter: A, block_validator: BlockValidator) -> Self { - Self { - storage, - fuel_adapter, - block_validator, - } - } -} - -impl StateImporter -where - Db: Storage, - A: ports::fuel::Api, - BlockValidator: Validator, -{ - async fn fetch_latest_block(&self) -> Result { - let latest_block = self.fuel_adapter.latest_block().await?; - - self.block_validator.validate(&latest_block)?; - - Ok(latest_block) - } - - async fn check_if_stale(&self, block_height: u32) -> Result { - let Some(submitted_height) = self.last_submitted_block_height().await? else { - return Ok(false); - }; - - Ok(submitted_height >= block_height) - } - - async fn last_submitted_block_height(&self) -> Result> { - Ok(self - .storage - .state_submission_w_latest_block() - .await? - .map(|submission| submission.block_height)) - } - - fn block_to_state_submission( - &self, - block: FuelBlock, - ) -> Result<(StateSubmission, Vec)> { - use itertools::Itertools; - - // Serialize the block into bytes - let fragments = block - .transactions - .iter() - .flat_map(|tx| tx.iter()) - .chunks(StateFragment::MAX_FRAGMENT_SIZE) - .into_iter() - .enumerate() - .map(|(index, chunk)| StateFragment { - id: None, - submission_id: None, - fragment_idx: index as u32, - data: chunk.copied().collect(), - created_at: ports::types::Utc::now(), - }) - .collect(); - - let submission = StateSubmission { - id: None, - block_hash: *block.id, - block_height: block.header.height, - }; - - Ok((submission, fragments)) - } - - async fn import_state(&self, block: FuelBlock) -> Result<()> { - let (submission, fragments) = self.block_to_state_submission(block)?; - self.storage - .insert_state_submission(submission, fragments) - .await?; - - Ok(()) - } -} - -#[async_trait] -impl Runner for StateImporter -where - Db: Storage, - Fuel: ports::fuel::Api, - BlockValidator: Validator, -{ - async fn run(&mut self) -> Result<()> { - let block = self.fetch_latest_block().await?; - - if self.check_if_stale(block.header.height).await? { - return Ok(()); - } - - if block.transactions.is_empty() { - return Ok(()); - } - - let block_id = block.id; - let block_height = block.header.height; - self.import_state(block).await?; - info!( - "imported state from fuel block: height: {}, id: {}", - block_height, block_id - ); - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use fuel_crypto::{Message, SecretKey, Signature}; - use ports::fuel::{FuelBlock, FuelBlockId, FuelConsensus, FuelHeader, FuelPoAConsensus}; - use rand::{rngs::StdRng, SeedableRng}; - use storage::PostgresProcess; - use validator::BlockValidator; - - use super::*; - - fn given_secret_key() -> SecretKey { - let mut rng = StdRng::seed_from_u64(42); - - SecretKey::random(&mut rng) - } - - fn given_a_block(height: u32, secret_key: &SecretKey) -> FuelBlock { - let header = given_header(height); - - let mut hasher = fuel_crypto::Hasher::default(); - hasher.input(header.prev_root.as_ref()); - hasher.input(header.height.to_be_bytes()); - hasher.input(header.time.0.to_be_bytes()); - hasher.input(header.application_hash.as_ref()); - - let id = FuelBlockId::from(hasher.digest()); - let id_message = Message::from_bytes(*id); - let signature = Signature::sign(secret_key, &id_message); - - FuelBlock { - id, - header, - consensus: FuelConsensus::PoAConsensus(FuelPoAConsensus { signature }), - transactions: vec![[2u8; 32].into()], - block_producer: Some(secret_key.public_key()), - } - } - - fn given_header(height: u32) -> FuelHeader { - let application_hash = "0x8b96f712e293e801d53da77113fec3676c01669c6ea05c6c92a5889fce5f649d" - .parse() - .unwrap(); - - ports::fuel::FuelHeader { - id: Default::default(), - da_height: Default::default(), - consensus_parameters_version: Default::default(), - state_transition_bytecode_version: Default::default(), - transactions_count: 1, - message_receipt_count: Default::default(), - transactions_root: Default::default(), - message_outbox_root: Default::default(), - event_inbox_root: Default::default(), - height, - prev_root: Default::default(), - time: tai64::Tai64(0), - application_hash, - } - } - - fn given_fetcher(block: FuelBlock) -> ports::fuel::MockApi { - let mut fetcher = ports::fuel::MockApi::new(); - - fetcher - .expect_latest_block() - .returning(move || Ok(block.clone())); - - fetcher - } - - #[tokio::test] - async fn test_import_state() -> Result<()> { - // given - let secret_key = given_secret_key(); - let block = given_a_block(1, &secret_key); - let fuel_mock = given_fetcher(block); - let block_validator = BlockValidator::new(*secret_key.public_key().hash()); - - let process = PostgresProcess::shared().await.unwrap(); - let db = process.create_random_db().await?; - let mut importer = StateImporter::new(db.clone(), fuel_mock, block_validator); - - // when - importer.run().await.unwrap(); - - // then - let fragments = db.get_unsubmitted_fragments().await?; - let latest_submission = db.state_submission_w_latest_block().await?.unwrap(); - assert_eq!(fragments.len(), 1); - assert_eq!(fragments[0].submission_id, latest_submission.id); - - Ok(()) - } -} diff --git a/packages/services/src/state_listener.rs b/packages/services/src/state_listener.rs index 05f59068..966787c7 100644 --- a/packages/services/src/state_listener.rs +++ b/packages/services/src/state_listener.rs @@ -1,40 +1,43 @@ -use async_trait::async_trait; use metrics::{ prometheus::{core::Collector, IntGauge, Opts}, RegistersMetrics, }; use ports::{ + clock::Clock, storage::Storage, - types::{SubmissionTx, TransactionState}, + types::{L1Tx, TransactionState}, }; use tracing::info; use super::Runner; -pub struct StateListener { +pub struct StateListener { l1_adapter: L1, storage: Db, num_blocks_to_finalize: u64, metrics: Metrics, + clock: C, } -impl StateListener { - pub fn new(l1_adapter: L1, storage: Db, num_blocks_to_finalize: u64) -> Self { +impl StateListener { + pub fn new(l1_adapter: L1, storage: Db, num_blocks_to_finalize: u64, clock: C) -> Self { Self { l1_adapter, storage, num_blocks_to_finalize, metrics: Metrics::default(), + clock, } } } -impl StateListener +impl StateListener where L1: ports::l1::Api, Db: Storage, + C: Clock, { - async fn check_pending_txs(&mut self, pending_txs: Vec) -> crate::Result<()> { + async fn check_pending_txs(&mut self, pending_txs: Vec) -> crate::Result<()> { let current_block_number: u64 = self.l1_adapter.get_block_number().await?.into(); for tx in pending_txs { @@ -45,7 +48,7 @@ where if !tx_response.succeeded() { self.storage - .update_submission_tx_state(tx_hash, TransactionState::Failed) + .update_tx_state(tx_hash, TransactionState::Failed) .await?; info!("failed blob tx {}", hex::encode(tx_hash)); @@ -59,25 +62,25 @@ where } self.storage - .update_submission_tx_state(tx_hash, TransactionState::Finalized) + .update_tx_state(tx_hash, TransactionState::Finalized(self.clock.now())) .await?; info!("finalized blob tx {}", hex::encode(tx_hash)); self.metrics .last_eth_block_w_blob - .set(tx_response.block_number() as i64); // TODO: conversion + .set(i64::try_from(tx_response.block_number()).unwrap_or(i64::MAX)) } Ok(()) } } -#[async_trait] -impl Runner for StateListener +impl Runner for StateListener where L1: ports::l1::Api + Send + Sync, Db: Storage, + C: Clock + Send + Sync, { async fn run(&mut self) -> crate::Result<()> { let pending_txs = self.storage.get_pending_txs().await?; @@ -97,7 +100,7 @@ struct Metrics { last_eth_block_w_blob: IntGauge, } -impl RegistersMetrics for StateListener { +impl RegistersMetrics for StateListener { fn metrics(&self) -> Vec> { vec![Box::new(self.metrics.last_eth_block_w_blob.clone())] } @@ -119,129 +122,52 @@ impl Default for Metrics { #[cfg(test)] mod tests { - use mockall::predicate; - use ports::types::{L1Height, StateFragment, StateSubmission, TransactionResponse, U256}; - use storage::PostgresProcess; + use clock::TestClock; use super::*; + use crate::test_utils::{ + self, + mocks::{self, l1::TxStatus}, + }; - struct MockL1 { - api: ports::l1::MockApi, - } - impl MockL1 { - fn new() -> Self { - Self { - api: ports::l1::MockApi::new(), - } - } - } - - #[async_trait::async_trait] - impl ports::l1::Api for MockL1 { - async fn submit_l2_state(&self, _state_data: Vec) -> ports::l1::Result<[u8; 32]> { - Ok([0; 32]) - } - - async fn get_block_number(&self) -> ports::l1::Result { - self.api.get_block_number().await - } - - async fn balance(&self) -> ports::l1::Result { - Ok(U256::ZERO) - } - - async fn get_transaction_response( - &self, - tx_hash: [u8; 32], - ) -> ports::l1::Result> { - self.api.get_transaction_response(tx_hash).await - } - } - - fn given_l1_that_expects_get_transaction_receipt( - tx_hash: [u8; 32], - current_block_number: u32, - block_number: u64, - ) -> MockL1 { - let mut l1 = MockL1::new(); - - l1.api - .expect_get_block_number() - .return_once(move || Ok(current_block_number.into())); - - let transaction_response = TransactionResponse::new(block_number, true); - l1.api - .expect_get_transaction_response() - .with(predicate::eq(tx_hash)) - .return_once(move |_| Ok(Some(transaction_response))); - - l1 - } - - fn given_l1_that_returns_failed_transaction(tx_hash: [u8; 32]) -> MockL1 { - let mut l1 = MockL1::new(); - - l1.api - .expect_get_block_number() - .return_once(move || Ok(0u32.into())); - - let transaction_response = TransactionResponse::new(0, false); + #[tokio::test] + async fn state_listener_will_update_tx_state_if_finalized() -> crate::Result<()> { + // given + let setup = test_utils::Setup::init().await; - l1.api - .expect_get_transaction_response() - .with(predicate::eq(tx_hash)) - .return_once(move |_| Ok(Some(transaction_response))); + let _ = setup.insert_fragments(0, 1).await; - l1 - } + let tx_hash = [0; 32]; + setup.send_fragments(tx_hash).await; - fn given_state() -> (StateSubmission, StateFragment, Vec) { - let submission = StateSubmission { - id: None, - block_hash: [0u8; 32], - block_height: 1, - }; - let fragment_id = 1; - let fragment = StateFragment { - id: Some(fragment_id), - submission_id: None, - fragment_idx: 0, - data: vec![1, 2, 3], - created_at: ports::types::Utc::now(), - }; - let fragment_ids = vec![fragment_id]; - - (submission, fragment, fragment_ids) - } + let num_blocks_to_finalize = 1u64; + let current_height = 1; - #[tokio::test] - async fn state_listener_will_update_tx_state_if_finalized() -> crate::Result<()> { - // given - let (state, fragment, fragment_ids) = given_state(); - let tx_hash = [1; 32]; - - let process = PostgresProcess::shared().await.unwrap(); - let db = process.create_random_db().await?; - db.insert_state_submission(state, vec![fragment]).await?; - db.record_pending_tx(tx_hash, fragment_ids).await?; - - let current_block_number = 34; - let tx_block_number = 32; - let l1_mock = given_l1_that_expects_get_transaction_receipt( - tx_hash, - current_block_number, - tx_block_number, + let tx_height = current_height - num_blocks_to_finalize; + let l1_mock = mocks::l1::txs_finished( + current_height as u32, + tx_height as u32, + [(tx_hash, TxStatus::Success)], ); - let num_blocks_to_finalize = 1; - let mut listener = StateListener::new(l1_mock, db.clone(), num_blocks_to_finalize); - assert!(db.has_pending_txs().await?); + let test_clock = TestClock::default(); + let now = test_clock.now(); + let mut listener = + StateListener::new(l1_mock, setup.db(), num_blocks_to_finalize, test_clock); // when listener.run().await.unwrap(); // then - assert!(!db.has_pending_txs().await?); + assert!(!setup.db().has_pending_txs().await?); + assert_eq!( + setup + .db() + .last_time_a_fragment_was_finalized() + .await? + .unwrap(), + now + ); Ok(()) } @@ -249,31 +175,42 @@ mod tests { #[tokio::test] async fn state_listener_will_not_update_tx_state_if_not_finalized() -> crate::Result<()> { // given - let (state, fragment, fragment_ids) = given_state(); - let tx_hash = [1; 32]; - - let process = PostgresProcess::shared().await.unwrap(); - let db = process.create_random_db().await?; - db.insert_state_submission(state, vec![fragment]).await?; - db.record_pending_tx(tx_hash, fragment_ids).await?; - - let current_block_number = 34; - let tx_block_number = 32; - let l1_mock = given_l1_that_expects_get_transaction_receipt( - tx_hash, - current_block_number, - tx_block_number, + let setup = test_utils::Setup::init().await; + + let _ = setup.insert_fragments(0, 1).await; + + let tx_hash = [0; 32]; + setup.send_fragments(tx_hash).await; + + let num_blocks_to_finalize = 5u64; + let current_height = 5; + + let tx_height = current_height - 2; + assert!(current_height - tx_height < num_blocks_to_finalize); + + let l1_mock = mocks::l1::txs_finished( + current_height as u32, + tx_height as u32, + [(tx_hash, TxStatus::Success)], ); - let num_blocks_to_finalize = 4; - let mut listener = StateListener::new(l1_mock, db.clone(), num_blocks_to_finalize); - assert!(db.has_pending_txs().await?); + let mut listener = StateListener::new( + l1_mock, + setup.db(), + num_blocks_to_finalize, + TestClock::default(), + ); // when listener.run().await.unwrap(); // then - assert!(db.has_pending_txs().await?); + assert!(setup.db().has_pending_txs().await?); + assert!(setup + .db() + .last_time_a_fragment_was_finalized() + .await? + .is_none()); Ok(()) } @@ -281,25 +218,45 @@ mod tests { #[tokio::test] async fn state_listener_will_update_tx_state_if_failed() -> crate::Result<()> { // given - let (state, fragment, fragment_ids) = given_state(); - let tx_hash = [1; 32]; + let setup = test_utils::Setup::init().await; + + let _ = setup.insert_fragments(0, 1).await; - let process = PostgresProcess::shared().await.unwrap(); - let db = process.create_random_db().await?; - db.insert_state_submission(state, vec![fragment]).await?; - db.record_pending_tx(tx_hash, fragment_ids).await?; + let tx_hash = [0; 32]; + setup.send_fragments(tx_hash).await; - let l1_mock = given_l1_that_returns_failed_transaction(tx_hash); + let num_blocks_to_finalize = 5u64; + let current_height = 5; - let num_blocks_to_finalize = 4; - let mut listener = StateListener::new(l1_mock, db.clone(), num_blocks_to_finalize); - assert!(db.has_pending_txs().await?); + let tx_height = current_height - 2; + assert!( + current_height - tx_height < num_blocks_to_finalize, + "we should choose the tx height such that it's not finalized to showcase that we don't wait for finalization for failed txs" + ); + + let l1_mock = mocks::l1::txs_finished( + current_height as u32, + tx_height as u32, + [(tx_hash, TxStatus::Failure)], + ); + + let mut listener = StateListener::new( + l1_mock, + setup.db(), + num_blocks_to_finalize, + TestClock::default(), + ); // when listener.run().await.unwrap(); // then - assert!(!db.has_pending_txs().await?); + assert!(!setup.db().has_pending_txs().await?); + assert!(setup + .db() + .last_time_a_fragment_was_finalized() + .await? + .is_none()); Ok(()) } diff --git a/packages/validator/src/validator.rs b/packages/services/src/validator.rs similarity index 52% rename from packages/validator/src/validator.rs rename to packages/services/src/validator.rs index bd6c0ed2..422ff5cb 100644 --- a/packages/validator/src/validator.rs +++ b/packages/services/src/validator.rs @@ -1,13 +1,19 @@ -use fuel_core_client::client::types::{ - block::{ - Block as FuelBlock, Consensus as FuelConsensus, Header as FuelHeader, - PoAConsensus as FuelPoAConsensus, - }, - primitives::{BlockId as FuelBlockId, Bytes32 as FuelBytes32}, -}; -use fuel_crypto::{Hasher, Message}; +use ports::fuel::{Consensus, FuelBytes32, FuelHeader}; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("{0}")] + BlockValidation(String), +} -use crate::{block::ValidatedFuelBlock, Error, Result, Validator}; +pub type Result = std::result::Result; + +#[cfg_attr(feature = "test-helpers", mockall::automock)] +pub trait Validator: Send + Sync { + fn validate(&self, id: FuelBytes32, header: &FuelHeader, consensus: &Consensus) -> Result<()>; +} +use fuel_crypto::{Hasher, Message}; +use ports::fuel::{FuelBlockId, FuelPoAConsensus}; #[derive(Debug)] pub struct BlockValidator { @@ -15,8 +21,8 @@ pub struct BlockValidator { } impl Validator for BlockValidator { - fn validate(&self, fuel_block: &FuelBlock) -> Result { - self._validate(fuel_block) + fn validate(&self, id: FuelBytes32, header: &FuelHeader, consensus: &Consensus) -> Result<()> { + self._validate(id, header, consensus) } } @@ -25,58 +31,40 @@ impl BlockValidator { Self { producer_addr } } - fn _validate(&self, fuel_block: &FuelBlock) -> Result { - self.validate_producer_addr(fuel_block)?; - Self::validate_block_id(fuel_block)?; - self.validate_block_signature(fuel_block)?; - - Ok(ValidatedFuelBlock { - hash: *fuel_block.id, - height: fuel_block.header.height, - }) - } - - fn validate_producer_addr(&self, fuel_block: &FuelBlock) -> Result<()> { - let Some(producer_addr) = fuel_block.block_producer().map(|key| key.hash()) else { - return Err(Error::BlockValidation( - "producer public key not found in fuel block".to_string(), - )); - }; - - if *producer_addr != self.producer_addr { - return Err(Error::BlockValidation(format!( - "producer addr '{}' does not match expected addr '{}'.", - hex::encode(producer_addr), - hex::encode(self.producer_addr) - ))); + fn _validate(&self, id: FuelBytes32, header: &FuelHeader, consensus: &Consensus) -> Result<()> { + // Genesis block is a special case. It does not have a producer address or a signature. + if let Consensus::Genesis(_) = consensus { + return Ok(()); } + Self::validate_block_id(id, header)?; + self.validate_block_signature(id, consensus)?; + Ok(()) } - fn validate_block_id(fuel_block: &FuelBlock) -> Result<()> { - let calculated_block_id = Self::calculate_block_id(fuel_block); - if fuel_block.id != calculated_block_id { + fn validate_block_id(id: FuelBytes32, header: &FuelHeader) -> Result<()> { + let calculated_block_id = Self::calculate_block_id(header); + if id != calculated_block_id { return Err(Error::BlockValidation(format!( "fuel block id `{:x}` does not match \ calculated block id `{calculated_block_id:x}`.", - fuel_block.id, + id, ))); } Ok(()) } - fn validate_block_signature(&self, fuel_block: &FuelBlock) -> Result<()> { - let FuelConsensus::PoAConsensus(FuelPoAConsensus { signature }) = fuel_block.consensus - else { + fn validate_block_signature(&self, id: FuelBytes32, consensus: &Consensus) -> Result<()> { + let Consensus::PoAConsensus(FuelPoAConsensus { signature }) = consensus else { return Err(Error::BlockValidation( "PoAConsensus signature not found in fuel block".to_string(), )); }; - let recovered_producer_addr = *signature - .recover(&Message::from_bytes(*fuel_block.id)) + let recovered_producer_addr = signature + .recover(&Message::from_bytes(*id)) .map_err(|e| { Error::BlockValidation(format!( "failed to recover public key from PoAConsensus signature: {e:?}", @@ -84,7 +72,7 @@ impl BlockValidator { })? .hash(); - if recovered_producer_addr != self.producer_addr { + if *recovered_producer_addr != self.producer_addr { return Err(Error::BlockValidation(format!( "recovered producer addr `{}` does not match \ expected addr`{}`.", @@ -96,8 +84,8 @@ impl BlockValidator { Ok(()) } - fn calculate_block_id(fuel_block: &FuelBlock) -> FuelBlockId { - let application_hash = Self::application_hash(&fuel_block.header); + fn calculate_block_id(header: &FuelHeader) -> FuelBlockId { + let application_hash = Self::application_hash(header); let mut hasher = Hasher::default(); let FuelHeader { @@ -105,7 +93,7 @@ impl BlockValidator { height, time, .. - } = &fuel_block.header; + } = &header; hasher.input(prev_root.as_ref()); hasher.input(height.to_be_bytes()); @@ -144,20 +132,13 @@ impl BlockValidator { #[cfg(test)] mod tests { - use fuel_crypto::{SecretKey, Signature}; + use fuel_crypto::{PublicKey, SecretKey, Signature}; + use ports::fuel::{FuelBlock, FuelGenesis}; use rand::{rngs::StdRng, SeedableRng}; + use tai64::Tai64; use super::*; - #[test] - #[should_panic(expected = "producer public key not found in fuel block")] - fn validate_public_key_missing() { - let fuel_block = given_a_block(None); - let validator = BlockValidator::new([0; 32]); - - validator.validate(&fuel_block).unwrap(); - } - #[test] #[should_panic(expected = "does not match expected addr")] fn validate_public_key_mistmach() { @@ -165,7 +146,9 @@ mod tests { let fuel_block = given_a_block(Some(secret_key)); let validator = BlockValidator::new([0; 32]); - validator.validate(&fuel_block).unwrap(); + validator + .validate(fuel_block.id, &fuel_block.header, &fuel_block.consensus) + .unwrap(); } #[test] @@ -176,7 +159,9 @@ mod tests { fuel_block.header.height = 42; // Change a value to get a different block id let validator = BlockValidator::new(*secret_key.public_key().hash()); - validator.validate(&fuel_block).unwrap(); + validator + .validate(fuel_block.id, &fuel_block.header, &fuel_block.consensus) + .unwrap(); } #[test] @@ -184,10 +169,12 @@ mod tests { fn validate_block_consensus_not_poa() { let secret_key = given_secret_key(); let mut fuel_block = given_a_block(Some(secret_key)); - fuel_block.consensus = FuelConsensus::Unknown; + fuel_block.consensus = Consensus::Unknown; let validator = BlockValidator::new(*secret_key.public_key().hash()); - validator.validate(&fuel_block).unwrap(); + validator + .validate(fuel_block.id, &fuel_block.header, &fuel_block.consensus) + .unwrap(); } #[test] @@ -204,12 +191,14 @@ mod tests { Signature::sign(&different_secret_key, &id_message) }; - fuel_block.consensus = FuelConsensus::PoAConsensus(FuelPoAConsensus { + fuel_block.consensus = Consensus::PoAConsensus(FuelPoAConsensus { signature: invalid_signature, }); let validator = BlockValidator::new(*correct_secret_key.public_key().hash()); - validator.validate(&fuel_block).unwrap(); + validator + .validate(fuel_block.id, &fuel_block.header, &fuel_block.consensus) + .unwrap(); } #[test] @@ -218,7 +207,83 @@ mod tests { let fuel_block = given_a_block(Some(secret_key)); let validator = BlockValidator::new(*secret_key.public_key().hash()); - validator.validate(&fuel_block).unwrap(); + validator + .validate(fuel_block.id, &fuel_block.header, &fuel_block.consensus) + .unwrap(); + } + + #[test] + fn treats_genesis_block_differently() { + let zeroed_producer_pubkey: PublicKey = Default::default(); + let block = FuelBlock { + id: "0xdd87728ce9c2539af61d6c5326c234c5cb0722b14a8c059f5126ca2a8ca3b4e2" + .parse() + .unwrap(), + header: FuelHeader { + id: "0xdd87728ce9c2539af61d6c5326c234c5cb0722b14a8c059f5126ca2a8ca3b4e2" + .parse() + .unwrap(), + da_height: 5827607, + consensus_parameters_version: 0, + state_transition_bytecode_version: 0, + transactions_count: 0, + message_receipt_count: 0, + transactions_root: + "0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + .parse() + .unwrap(), + message_outbox_root: + "0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + .parse() + .unwrap(), + event_inbox_root: + "0x0000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap(), + height: 0, + prev_root: "0x0000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap(), + time: Tai64(4611686018427387914), + application_hash: + "0x7cb9d322996c4efb45f92aa67a0cb351530bc320eb2db91758a8f4b23f8428c5" + .parse() + .unwrap(), + }, + consensus: Consensus::Genesis(FuelGenesis { + chain_config_hash: + "0xd0df79ce0a5e69a88735306dcc9259d9c1d6b060f14cabe4df2b8afdeea8693b" + .parse() + .unwrap(), + coins_root: "0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + .parse() + .unwrap(), + contracts_root: + "0x70e4e3384ffe470a3802f0c1ff5fbb59fcea42329ef5bb9ef439d1db8853f438" + .parse() + .unwrap(), + messages_root: "0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + .parse() + .unwrap(), + transactions_root: + "0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + .parse() + .unwrap(), + }), + transactions: vec![], + block_producer: Some(zeroed_producer_pubkey), + }; + + let actual_producer_address = [8; 32]; + assert_ne!(actual_producer_address, *zeroed_producer_pubkey.hash()); + + let validator = BlockValidator::new(actual_producer_address); + + // when + let res = validator.validate(block.id, &block.header, &block.consensus); + + // then + res.unwrap(); } fn given_secret_key() -> SecretKey { @@ -229,7 +294,7 @@ mod tests { fn given_a_block(secret_key: Option) -> FuelBlock { let header = given_header(); - let id: FuelBytes32 = "0x57131ec6e99caafc08803aa946093e02c4303a305e5cc959ad84b775e668a5c3" + let id: FuelBytes32 = "0ae93c231f7f348f803d5f2d1fc4d7b6ada596e72c06f8c6c2387c32735969f7" .parse() .unwrap(); @@ -240,7 +305,7 @@ mod tests { FuelBlock { id, header, - consensus: FuelConsensus::PoAConsensus(FuelPoAConsensus { signature }), + consensus: Consensus::PoAConsensus(FuelPoAConsensus { signature }), transactions: vec![], block_producer: Some(secret_key.public_key()), } @@ -248,7 +313,7 @@ mod tests { FuelBlock { id, header, - consensus: FuelConsensus::Unknown, + consensus: Consensus::Unknown, transactions: vec![], block_producer: None, } @@ -270,7 +335,7 @@ mod tests { transactions_root: Default::default(), message_outbox_root: Default::default(), event_inbox_root: Default::default(), - height: Default::default(), + height: 1, prev_root: Default::default(), time: tai64::Tai64(0), application_hash, diff --git a/packages/services/src/wallet_balance_tracker.rs b/packages/services/src/wallet_balance_tracker.rs index 1d75e82c..1c9c152c 100644 --- a/packages/services/src/wallet_balance_tracker.rs +++ b/packages/services/src/wallet_balance_tracker.rs @@ -64,7 +64,6 @@ impl Default for Metrics { } } -#[async_trait::async_trait] impl Runner for WalletBalanceTracker where Api: Send + Sync + ports::l1::Api, @@ -114,7 +113,7 @@ mod tests { let mut eth_adapter = l1::MockApi::new(); eth_adapter .expect_balance() - .return_once(move || Ok(balance)); + .return_once(move || Box::pin(async move { Ok(balance) })); eth_adapter } diff --git a/packages/storage/Cargo.toml b/packages/storage/Cargo.toml index 05719b8f..796b95f9 100644 --- a/packages/storage/Cargo.toml +++ b/packages/storage/Cargo.toml @@ -10,8 +10,9 @@ publish = { workspace = true } rust-version = { workspace = true } [dependencies] -async-trait = { workspace = true } +delegate = { workspace = true, optional = true } hex = { workspace = true } +itertools = { workspace = true, features = ["use_alloc"] } ports = { workspace = true, features = ["storage"] } rand = { workspace = true, optional = true } serde = { workspace = true } @@ -24,13 +25,16 @@ sqlx = { workspace = true, features = [ "time", "chrono", ] } -testcontainers = { workspace = true, optional = true } +testcontainers = { workspace = true, optional = true, features = [ + "signal-hook", + "watchdog", +] } thiserror = { workspace = true } tokio = { workspace = true, optional = true } [dev-dependencies] ports = { workspace = true, features = ["storage"] } -rand = { workspace = true } +rand = { workspace = true, features = ["small_rng"] } storage = { workspace = true, features = ["test-helpers"] } tokio = { workspace = true } @@ -39,5 +43,6 @@ test-helpers = [ "dep:testcontainers", "tokio/sync", "dep:rand", + "dep:delegate", "ports/test-helpers", ] diff --git a/packages/storage/migrations/0002_better_fragmentation.up.sql b/packages/storage/migrations/0002_better_fragmentation.up.sql new file mode 100644 index 00000000..9a20d074 --- /dev/null +++ b/packages/storage/migrations/0002_better_fragmentation.up.sql @@ -0,0 +1,51 @@ +BEGIN; + +DELETE FROM l1_transaction_fragments; +DELETE FROM l1_fragments; + +CREATE TABLE IF NOT EXISTS fuel_blocks ( + hash BYTEA PRIMARY KEY NOT NULL, + height BIGINT NOT NULL UNIQUE CHECK (height >= 0), + CHECK (octet_length(hash) = 32), + data BYTEA NOT NULL +); + +-- Create new 'bundles' table to represent groups of blocks +CREATE TABLE IF NOT EXISTS bundles ( + id SERIAL PRIMARY KEY, + start_height BIGINT NOT NULL CHECK (start_height >= 0), + end_height BIGINT NOT NULL CHECK (end_height >= start_height) -- Ensure valid range +); + +CREATE INDEX idx_bundles_start_end ON bundles (start_height, end_height); + + +ALTER TABLE l1_fragments +DROP COLUMN submission_id, +DROP COLUMN created_at, +ADD COLUMN total_bytes BIGINT NOT NULL CHECK (total_bytes > 0), +ADD COLUMN unused_bytes BIGINT NOT NULL CHECK (unused_bytes >= 0), +ADD COLUMN bundle_id INTEGER REFERENCES bundles(id) NOT NULL, +ADD CONSTRAINT check_data_not_empty CHECK (octet_length(data) > 0), +ALTER COLUMN fragment_idx TYPE INTEGER; + +ALTER TABLE l1_fragments +RENAME COLUMN fragment_idx TO idx; + + +-- Add the new finalized_at column with UTC timestamp, allowing NULL values initially +ALTER TABLE l1_transactions +ADD COLUMN finalized_at TIMESTAMPTZ; + +-- Update rows where state is 1 and set finalized_at to the current timestamp +UPDATE l1_transactions +SET finalized_at = NOW() +WHERE state = 1; + +-- Add a check constraint to ensure finalized_at is not null when state is 1 +ALTER TABLE l1_transactions +ADD CONSTRAINT state_finalized_check +CHECK (state != 1 OR finalized_at IS NOT NULL); + + +COMMIT; diff --git a/packages/storage/src/lib.rs b/packages/storage/src/lib.rs index 05ccb4e3..26745a3b 100644 --- a/packages/storage/src/lib.rs +++ b/packages/storage/src/lib.rs @@ -1,24 +1,65 @@ -#![deny(unused_crate_dependencies)] -mod tables; +mod mappings; #[cfg(feature = "test-helpers")] mod test_instance; + +use std::ops::RangeInclusive; + #[cfg(feature = "test-helpers")] pub use test_instance::*; mod error; mod postgres; use ports::{ - storage::{Result, Storage}, - types::{BlockSubmission, StateFragment, StateSubmission, SubmissionTx, TransactionState}, + storage::{BundleFragment, Result, SequentialFuelBlocks, Storage}, + types::{ + BlockSubmission, DateTime, Fragment, L1Tx, NonEmpty, NonNegative, TransactionState, Utc, + }, }; pub use postgres::{DbConfig, Postgres}; -#[async_trait::async_trait] impl Storage for Postgres { async fn insert(&self, submission: BlockSubmission) -> Result<()> { Ok(self._insert(submission).await?) } + async fn oldest_nonfinalized_fragments( + &self, + starting_height: u32, + limit: usize, + ) -> Result> { + Ok(self + ._oldest_nonfinalized_fragments(starting_height, limit) + .await?) + } + + async fn missing_blocks( + &self, + starting_height: u32, + current_height: u32, + ) -> Result>> { + self._missing_blocks(starting_height, current_height) + .await + .map_err(Into::into) + } + + async fn insert_blocks(&self, blocks: NonEmpty) -> Result<()> { + Ok(self._insert_blocks(blocks).await?) + } + + async fn insert_bundle_and_fragments( + &self, + block_range: RangeInclusive, + fragments: NonEmpty, + ) -> Result<()> { + Ok(self + ._insert_bundle_and_fragments(block_range, fragments) + .await?) + } + + async fn last_time_a_fragment_was_finalized(&self) -> Result>> { + Ok(self._last_time_a_fragment_was_finalized().await?) + } + async fn submission_w_latest_block(&self) -> Result> { Ok(self._submission_w_latest_block().await?) } @@ -27,23 +68,25 @@ impl Storage for Postgres { Ok(self._set_submission_completed(fuel_block_hash).await?) } - async fn insert_state_submission( + async fn lowest_sequence_of_unbundled_blocks( &self, - submission: StateSubmission, - fragments: Vec, - ) -> Result<()> { - Ok(self._insert_state_submission(submission, fragments).await?) + starting_height: u32, + limit: usize, + ) -> Result> { + Ok(self + ._lowest_unbundled_blocks(starting_height, limit) + .await?) } - async fn get_unsubmitted_fragments(&self) -> Result> { - Ok(self._get_unsubmitted_fragments().await?) - } - - async fn record_pending_tx(&self, tx_hash: [u8; 32], fragment_ids: Vec) -> Result<()> { + async fn record_pending_tx( + &self, + tx_hash: [u8; 32], + fragment_ids: NonEmpty>, + ) -> Result<()> { Ok(self._record_pending_tx(tx_hash, fragment_ids).await?) } - async fn get_pending_txs(&self) -> Result> { + async fn get_pending_txs(&self) -> Result> { Ok(self._get_pending_txs().await?) } @@ -51,50 +94,60 @@ impl Storage for Postgres { Ok(self._has_pending_txs().await?) } - async fn state_submission_w_latest_block(&self) -> Result> { - Ok(self._state_submission_w_latest_block().await?) - } - - async fn update_submission_tx_state( - &self, - hash: [u8; 32], - state: TransactionState, - ) -> Result<()> { - Ok(self._update_submission_tx_state(hash, state).await?) + async fn update_tx_state(&self, hash: [u8; 32], state: TransactionState) -> Result<()> { + Ok(self._update_tx_state(hash, state).await?) } } #[cfg(test)] mod tests { + use itertools::Itertools; use ports::{ - storage::{Error, Result, Storage}, - types::{BlockSubmission, StateFragment, StateSubmission, TransactionState}, + storage::{Error, Storage}, + types::{nonempty, CollectNonEmpty}, }; - use rand::{thread_rng, Rng}; - use storage as _; - - use crate::PostgresProcess; + use rand::{thread_rng, Rng, SeedableRng}; + + use super::*; + + // Helper function to create a storage instance for testing + async fn start_db() -> DbWithProcess { + PostgresProcess::shared() + .await + .unwrap() + .create_random_db() + .await + .unwrap() + } fn random_non_zero_height() -> u32 { let mut rng = thread_rng(); rng.gen_range(1..u32::MAX) } + fn given_incomplete_submission(fuel_block_height: u32) -> BlockSubmission { + BlockSubmission { + block_hash: rand::random(), + block_height: fuel_block_height, + completed: false, + submittal_height: 0.into(), + } + } + #[tokio::test] - async fn can_insert_and_find_latest_block() { + async fn can_insert_and_find_latest_block_submission() { // given - let process = PostgresProcess::shared().await.unwrap(); - let db = process.create_random_db().await.unwrap(); + let storage = start_db().await; let latest_height = random_non_zero_height(); let latest_submission = given_incomplete_submission(latest_height); - db.insert(latest_submission.clone()).await.unwrap(); + storage.insert(latest_submission.clone()).await.unwrap(); let older_submission = given_incomplete_submission(latest_height - 1); - db.insert(older_submission).await.unwrap(); + storage.insert(older_submission).await.unwrap(); // when - let actual = db.submission_w_latest_block().await.unwrap().unwrap(); + let actual = storage.submission_w_latest_block().await.unwrap().unwrap(); // then assert_eq!(actual, latest_submission); @@ -103,16 +156,15 @@ mod tests { #[tokio::test] async fn can_update_completion_status() { // given - let process = PostgresProcess::shared().await.unwrap(); - let db = process.create_random_db().await.unwrap(); + let storage = start_db().await; let height = random_non_zero_height(); let submission = given_incomplete_submission(height); let block_hash = submission.block_hash; - db.insert(submission).await.unwrap(); + storage.insert(submission).await.unwrap(); // when - let submission = db.set_submission_completed(block_hash).await.unwrap(); + let submission = storage.set_submission_completed(block_hash).await.unwrap(); // then assert!(submission.completed); @@ -121,188 +173,466 @@ mod tests { #[tokio::test] async fn updating_a_missing_submission_causes_an_error() { // given - let process = PostgresProcess::shared().await.unwrap(); - let db = process.create_random_db().await.unwrap(); + let storage = start_db().await; let height = random_non_zero_height(); let submission = given_incomplete_submission(height); let block_hash = submission.block_hash; // when - let result = db.set_submission_completed(block_hash).await; + let result = storage.set_submission_completed(block_hash).await; + + // then + if let Err(Error::Database(msg)) = result { + let block_hash_hex = hex::encode(block_hash); + assert_eq!( + msg, + format!( + "Cannot set submission to completed! Submission of block: `{}` not found in DB.", + block_hash_hex + ) + ); + } else { + panic!("Expected storage error"); + } + } + + async fn ensure_some_fragments_exists_in_the_db( + storage: impl Storage, + ) -> NonEmpty> { + storage + .insert_bundle_and_fragments( + 0..=0, + nonempty!( + Fragment { + data: nonempty![0], + unused_bytes: 1000, + total_bytes: 100.try_into().unwrap() + }, + Fragment { + data: nonempty![1], + unused_bytes: 1000, + total_bytes: 100.try_into().unwrap() + } + ), + ) + .await + .unwrap(); + + storage + .oldest_nonfinalized_fragments(0, 2) + .await + .unwrap() + .into_iter() + .map(|f| f.id) + .collect_nonempty() + .unwrap() + } + + #[tokio::test] + async fn can_record_and_get_pending_txs() { + // given + let storage = start_db().await; + + let fragment_ids = ensure_some_fragments_exists_in_the_db(&storage).await; + + let tx_hash = rand::random::<[u8; 32]>(); + storage + .record_pending_tx(tx_hash, fragment_ids) + .await + .unwrap(); + + // when + let has_pending = storage.has_pending_txs().await.unwrap(); + let pending_txs = storage.get_pending_txs().await.unwrap(); + + // then + assert!(has_pending); + assert_eq!(pending_txs.len(), 1); + assert_eq!(pending_txs[0].hash, tx_hash); + assert_eq!(pending_txs[0].state, TransactionState::Pending); + } + + #[tokio::test] + async fn can_update_tx_state() { + // given + let storage = start_db().await; + + let fragment_ids = ensure_some_fragments_exists_in_the_db(&storage).await; + let tx_hash = rand::random::<[u8; 32]>(); + storage + .record_pending_tx(tx_hash, fragment_ids) + .await + .unwrap(); + + // when + storage + .update_tx_state(tx_hash, TransactionState::Finalized(Utc::now())) + .await + .unwrap(); // then - let Err(Error::Database(msg)) = result else { - panic!("should be storage error"); + let has_pending = storage.has_pending_txs().await.unwrap(); + let pending_txs = storage.get_pending_txs().await.unwrap(); + + assert!(!has_pending); + assert!(pending_txs.is_empty()); + } + + #[tokio::test] + async fn can_insert_bundle_and_fragments() { + // given + let storage = start_db().await; + + let block_range = 1..=5; + let fragment_1 = Fragment { + data: nonempty![1u8, 2, 3], + unused_bytes: 1000, + total_bytes: 100.try_into().unwrap(), + }; + let fragment_2 = Fragment { + data: nonempty![4u8, 5, 6], + unused_bytes: 1000, + total_bytes: 100.try_into().unwrap(), }; + let fragments = nonempty![fragment_1.clone(), fragment_2.clone()]; - let block_hash = hex::encode(block_hash); - assert_eq!(msg, format!("Cannot set submission to completed! Submission of block: `{block_hash}` not found in DB.")); + // when + storage + .insert_bundle_and_fragments(block_range.clone(), fragments.clone()) + .await + .unwrap(); + + // then + let inserted_fragments = storage + .oldest_nonfinalized_fragments(0, 2) + .await + .unwrap() + .into_iter() + .collect_vec(); + + assert_eq!(inserted_fragments.len(), 2); + for (inserted_fragment, given_fragment) in inserted_fragments.iter().zip(fragments.iter()) { + assert_eq!(inserted_fragment.fragment, *given_fragment); + } } - fn given_incomplete_submission(fuel_block_height: u32) -> BlockSubmission { - let mut submission = rand::thread_rng().gen::(); - submission.block_height = fuel_block_height; + fn round_to_millis(date: DateTime) -> DateTime { + DateTime::from_timestamp_millis(date.timestamp_millis()).unwrap() + } + + #[tokio::test] + async fn can_get_last_time_a_fragment_was_finalized() { + // given + let storage = start_db().await; + + let fragment_ids = ensure_some_fragments_exists_in_the_db(&storage).await; + let tx_hash = rand::random::<[u8; 32]>(); + storage + .record_pending_tx(tx_hash, fragment_ids) + .await + .unwrap(); + + let finalization_time = Utc::now(); + + // when + storage + .update_tx_state(tx_hash, TransactionState::Finalized(finalization_time)) + .await + .unwrap(); - submission + // then + let last_time = storage + .last_time_a_fragment_was_finalized() + .await + .unwrap() + .unwrap(); + + assert_eq!( + round_to_millis(last_time), + round_to_millis(finalization_time) + ); + } + + async fn insert_sequence_of_unbundled_blocks( + storage: impl Storage, + range: RangeInclusive, + ) { + let mut rng = rand::rngs::SmallRng::from_entropy(); + let blocks = range + .clone() + .map(|height| { + let block_hash: [u8; 32] = rng.gen(); + let block_data = nonempty![height as u8]; + ports::storage::FuelBlock { + hash: block_hash, + height, + data: block_data, + } + }) + .collect_nonempty() + .expect("shouldn't be empty"); + + storage.insert_blocks(blocks).await.unwrap(); + } + + async fn insert_sequence_of_bundled_blocks( + storage: impl Storage, + range: RangeInclusive, + num_fragments: usize, + ) { + insert_sequence_of_unbundled_blocks(&storage, range.clone()).await; + + let fragments = std::iter::repeat(Fragment { + data: nonempty![0], + unused_bytes: 1000, + total_bytes: 100.try_into().unwrap(), + }) + .take(num_fragments) + .collect_nonempty() + .unwrap(); + + storage + .insert_bundle_and_fragments(range, fragments) + .await + .unwrap(); + } + + async fn lowest_unbundled_sequence( + storage: impl Storage, + starting_height: u32, + limit: usize, + ) -> RangeInclusive { + storage + .lowest_sequence_of_unbundled_blocks(starting_height, limit) + .await + .unwrap() + .unwrap() + .height_range() } #[tokio::test] - async fn insert_state_submission() -> Result<()> { + async fn can_get_lowest_sequence_of_unbundled_blocks() { // given - let process = PostgresProcess::shared().await?; - let db = process.create_random_db().await?; + let storage = start_db().await; - let (state, fragments) = given_state_and_fragments(); + // Insert blocks 1 to 10 + insert_sequence_of_unbundled_blocks(&storage, 1..=10).await; // when - db.insert_state_submission(state, fragments.clone()).await?; + let height_range = lowest_unbundled_sequence(&storage, 0, usize::MAX).await; // then - let db_fragments = db.get_unsubmitted_fragments().await?; + assert_eq!(height_range, 1..=10); + } + + #[tokio::test] + async fn handles_holes_in_sequences() { + // given + let storage = start_db().await; - assert_eq!(db_fragments.len(), fragments.len()); + insert_sequence_of_unbundled_blocks(&storage, 0..=2).await; + insert_sequence_of_unbundled_blocks(&storage, 4..=6).await; - Ok(()) + // when + let height_range = lowest_unbundled_sequence(&storage, 0, usize::MAX).await; + + // then + assert_eq!(height_range, 0..=2); } #[tokio::test] - async fn record_pending_tx() -> Result<()> { + async fn respects_starting_height() { // given - let process = PostgresProcess::shared().await?; - let db = process.create_random_db().await?; + let storage = start_db().await; - let (state, fragments) = given_state_and_fragments(); - db.insert_state_submission(state, fragments.clone()).await?; - let tx_hash = [1; 32]; - let fragment_ids = vec![1]; + insert_sequence_of_unbundled_blocks(&storage, 0..=10).await; // when - db.record_pending_tx(tx_hash, fragment_ids).await?; + let height_range = lowest_unbundled_sequence(&storage, 2, usize::MAX).await; // then - let has_pending_tx = db.has_pending_txs().await?; - let pending_tx = db.get_pending_txs().await?; + assert_eq!(height_range, 2..=10); + } - assert!(has_pending_tx); + #[tokio::test] + async fn respects_limit() { + // given + let storage = start_db().await; - assert_eq!(pending_tx.len(), 1); - assert_eq!(pending_tx[0].hash, tx_hash); - assert_eq!(pending_tx[0].state, TransactionState::Pending); + insert_sequence_of_unbundled_blocks(&storage, 0..=10).await; - Ok(()) + // when + let height_range = lowest_unbundled_sequence(&storage, 0, 2).await; + + // then + assert_eq!(height_range, 0..=1); } #[tokio::test] - async fn update_submission_tx_state() -> Result<()> { + async fn ignores_bundled_blocks() { // given - let process = PostgresProcess::shared().await?; - let db = process.create_random_db().await?; + let storage = start_db().await; - let (state, fragments) = given_state_and_fragments(); - db.insert_state_submission(state, fragments.clone()).await?; - let tx_hash = [1; 32]; - let fragment_ids = vec![1]; - db.record_pending_tx(tx_hash, fragment_ids).await?; + insert_sequence_of_bundled_blocks(&storage, 0..=2, 1).await; + insert_sequence_of_unbundled_blocks(&storage, 3..=4).await; // when - db.update_submission_tx_state(tx_hash, TransactionState::Finalized) - .await?; + let height_range = lowest_unbundled_sequence(&storage, 0, usize::MAX).await; // then - let has_pending_tx = db.has_pending_txs().await?; - let pending_tx = db.get_pending_txs().await?; + assert_eq!(height_range, 3..=4); + } - assert!(!has_pending_tx); - assert!(pending_tx.is_empty()); + /// This can happen if we change the lookback config a couple of times in a short period of time + #[tokio::test] + async fn can_handle_bundled_blocks_appearing_after_unbundled_ones() { + // given + let storage = start_db().await; - Ok(()) + insert_sequence_of_unbundled_blocks(&storage, 0..=2).await; + insert_sequence_of_bundled_blocks(&storage, 7..=10, 1).await; + insert_sequence_of_unbundled_blocks(&storage, 11..=15).await; + + // when + let height_range = lowest_unbundled_sequence(&storage, 0, usize::MAX).await; + + // then + assert_eq!(height_range, 0..=2); + } + + // Important because sqlx panics if the bundle is too big + #[tokio::test] + async fn can_insert_big_batches() { + let storage = start_db().await; + + // u16::MAX because of implementation details + insert_sequence_of_bundled_blocks(&storage, 0..=u16::MAX as u32 * 2, u16::MAX as usize * 2) + .await; } #[tokio::test] - async fn unsbumitted_fragments_are_not_in_pending_or_finalized_tx() -> Result<()> { + async fn excludes_fragments_from_bundles_ending_before_starting_height() { // given - let process = PostgresProcess::shared().await?; - let db = process.create_random_db().await?; + let storage = start_db().await; + let starting_height = 10; + + // Insert a bundle that ends before the starting_height + storage + .insert_bundle_and_fragments( + 1..=5, // Bundle ends at 5 + nonempty!(Fragment { + data: nonempty![0], + unused_bytes: 1000, + total_bytes: 100.try_into().unwrap() + }), + ) + .await + .unwrap(); + + // Insert a bundle that ends after the starting_height + let fragment = Fragment { + data: nonempty![1], + unused_bytes: 1000, + total_bytes: 100.try_into().unwrap(), + }; + storage + .insert_bundle_and_fragments( + 10..=15, // Bundle ends at 15 + nonempty!(fragment.clone()), + ) + .await + .unwrap(); - let (state, fragments) = given_state_and_fragments(); - db.insert_state_submission(state, fragments.clone()).await?; + // when + let fragments = storage + .oldest_nonfinalized_fragments(starting_height, 10) + .await + .unwrap(); + + // then + assert_eq!(fragments.len(), 1); + assert_eq!(fragments[0].fragment, fragment); + } + + #[tokio::test] + async fn includes_fragments_from_bundles_ending_at_starting_height() { + // given + let storage = start_db().await; + let starting_height = 10; + + // Insert a bundle that ends exactly at the starting_height + let fragment = Fragment { + data: nonempty![2], + unused_bytes: 1000, + total_bytes: 100.try_into().unwrap(), + }; + storage + .insert_bundle_and_fragments( + 5..=10, // Bundle ends at 10 + nonempty!(fragment.clone()), + ) + .await + .unwrap(); // when - // tx failed - let tx_hash = [1; 32]; - let fragment_ids = vec![1, 2]; - db.record_pending_tx(tx_hash, fragment_ids).await?; - db.update_submission_tx_state(tx_hash, TransactionState::Failed) - .await?; - - // tx is finalized - let tx_hash = [2; 32]; - let fragment_ids = vec![2]; - db.record_pending_tx(tx_hash, fragment_ids).await?; - db.update_submission_tx_state(tx_hash, TransactionState::Finalized) - .await?; - - // tx is pending - let tx_hash = [3; 32]; - let fragment_ids = vec![3]; - db.record_pending_tx(tx_hash, fragment_ids).await?; + let fragments = storage + .oldest_nonfinalized_fragments(starting_height, 10) + .await + .unwrap(); // then - let db_fragments = db.get_unsubmitted_fragments().await?; + assert_eq!(fragments.len(), 1); + assert_eq!(fragments[0].fragment, fragment); + } + + #[tokio::test] + async fn empty_db_reports_missing_heights() -> Result<()> { + // given + let current_height = 10; + let storage = start_db().await; + + // when + let missing_blocks = storage.missing_blocks(0, current_height).await?; + + // then + assert_eq!(missing_blocks, vec![0..=current_height]); + + Ok(()) + } - let db_fragment_id: Vec<_> = db_fragments.iter().map(|f| f.id.expect("has id")).collect(); + #[tokio::test] + async fn missing_blocks_no_holes() -> Result<()> { + // given + let current_height = 10; + let storage = start_db().await; - // unsubmitted fragments are not associated to any finalized or pending tx - assert_eq!(db_fragment_id, vec![1, 4, 5]); + insert_sequence_of_unbundled_blocks(&storage, 0..=5).await; + + // when + let missing_blocks = storage.missing_blocks(0, current_height).await?; + + // then + assert_eq!(missing_blocks, vec![6..=current_height]); Ok(()) } - fn given_state_and_fragments() -> (StateSubmission, Vec) { - ( - StateSubmission { - id: None, - block_hash: [0u8; 32], - block_height: 1, - }, - vec![ - StateFragment { - id: None, - submission_id: None, - fragment_idx: 0, - data: vec![1, 2], - created_at: ports::types::Utc::now(), - }, - StateFragment { - id: None, - submission_id: None, - fragment_idx: 1, - data: vec![3, 4], - created_at: ports::types::Utc::now(), - }, - StateFragment { - id: None, - submission_id: None, - fragment_idx: 2, - data: vec![5, 6], - created_at: ports::types::Utc::now(), - }, - StateFragment { - id: None, - submission_id: None, - fragment_idx: 3, - data: vec![7, 8], - created_at: ports::types::Utc::now(), - }, - StateFragment { - id: None, - submission_id: None, - fragment_idx: 4, - data: vec![9, 10], - created_at: ports::types::Utc::now(), - }, - ], - ) + #[tokio::test] + async fn reports_holes_in_blocks() -> Result<()> { + // given + let current_height = 15; + let storage = start_db().await; + + insert_sequence_of_unbundled_blocks(&storage, 3..=5).await; + insert_sequence_of_unbundled_blocks(&storage, 8..=10).await; + + // when + let missing_blocks = storage.missing_blocks(0, current_height).await?; + + // then + assert_eq!(missing_blocks, vec![0..=2, 6..=7, 11..=current_height]); + + Ok(()) } } diff --git a/packages/storage/src/mappings.rs b/packages/storage/src/mappings.rs new file mode 100644 index 00000000..b8b42bef --- /dev/null +++ b/packages/storage/src/mappings.rs @@ -0,0 +1 @@ +pub(crate) mod tables; diff --git a/packages/storage/src/mappings/tables.rs b/packages/storage/src/mappings/tables.rs new file mode 100644 index 00000000..0b61dcd5 --- /dev/null +++ b/packages/storage/src/mappings/tables.rs @@ -0,0 +1,285 @@ +use std::num::NonZeroU32; + +use ports::types::{DateTime, NonEmpty, NonNegative, TransactionState, Utc}; + +macro_rules! bail { + ($msg: literal, $($args: expr),*) => { + return Err($crate::error::Error::Conversion(format!($msg, $($args),*))) + }; +} + +#[derive(sqlx::FromRow)] +pub struct L1FuelBlockSubmission { + pub fuel_block_hash: Vec, + pub fuel_block_height: i64, + pub completed: bool, + pub submittal_height: i64, +} + +impl TryFrom for ports::types::BlockSubmission { + type Error = crate::error::Error; + + fn try_from(value: L1FuelBlockSubmission) -> Result { + let block_hash = value.fuel_block_hash.as_slice(); + let Ok(block_hash) = block_hash.try_into() else { + bail!("Expected 32 bytes for `fuel_block_hash`, but got: {block_hash:?} from db",); + }; + + let Ok(block_height) = value.fuel_block_height.try_into() else { + bail!( + "`fuel_block_height` as read from the db cannot fit in a `u32` as expected. Got: {:?} from db", + value.fuel_block_height + + ); + }; + + let Ok(submittal_height) = value.submittal_height.try_into() else { + bail!("`submittal_height` as read from the db cannot fit in a `u64` as expected. Got: {} from db", value.submittal_height); + }; + + Ok(Self { + block_hash, + block_height, + completed: value.completed, + submittal_height, + }) + } +} + +impl From for L1FuelBlockSubmission { + fn from(value: ports::types::BlockSubmission) -> Self { + Self { + fuel_block_hash: value.block_hash.to_vec(), + fuel_block_height: i64::from(value.block_height), + completed: value.completed, + submittal_height: value.submittal_height.into(), + } + } +} + +#[derive(sqlx::FromRow)] +pub struct BundleFragment { + pub id: i32, + pub idx: i32, + pub bundle_id: i32, + pub data: Vec, + pub unused_bytes: i64, + pub total_bytes: i64, +} + +impl TryFrom for ports::storage::BundleFragment { + type Error = crate::error::Error; + + fn try_from(value: BundleFragment) -> Result { + let idx = value.idx.try_into().map_err(|e| { + crate::error::Error::Conversion(format!( + "Invalid db `idx` ({}). Reason: {e}", + value.idx + )) + })?; + let bundle_id = value.bundle_id.try_into().map_err(|e| { + crate::error::Error::Conversion(format!( + "Invalid db `bundle_id` ({}). Reason: {e}", + value.bundle_id + )) + })?; + + let data = NonEmpty::collect(value.data).ok_or_else(|| { + crate::error::Error::Conversion("db fragment data is invalid".to_owned()) + })?; + + let id = value.id.try_into().map_err(|e| { + crate::error::Error::Conversion(format!("Invalid db `id` ({}). Reason: {e}", value.id)) + })?; + + let unused_bytes: NonNegative = value.unused_bytes.try_into().map_err(|e| { + crate::error::Error::Conversion(format!( + "Invalid db `unused_bytes` ({}). Reason: {e}", + value.unused_bytes + )) + })?; + + let unused_bytes: u32 = unused_bytes.as_u64().try_into().map_err(|e| { + crate::error::Error::Conversion(format!( + "Invalid db `unused_bytes` ({}). Reason: {e}", + value.unused_bytes + )) + })?; + + let total_bytes: NonNegative = value.total_bytes.try_into().map_err(|e| { + crate::error::Error::Conversion(format!( + "Invalid db `total_bytes` ({}). Reason: {e}", + value.total_bytes + )) + })?; + + let total_bytes: u32 = total_bytes.as_u64().try_into().map_err(|e| { + crate::error::Error::Conversion(format!( + "Invalid db `total_bytes` ({}). Reason: {e}", + value.total_bytes + )) + })?; + + let total_bytes: NonZeroU32 = total_bytes.try_into().map_err(|e| { + crate::error::Error::Conversion(format!( + "Invalid db `total_bytes` ({}). Reason: {e}", + value.total_bytes + )) + })?; + + let fragment = ports::types::Fragment { + data, + unused_bytes, + total_bytes, + }; + + Ok(Self { + id, + idx, + bundle_id, + fragment, + }) + } +} + +#[derive(sqlx::FromRow)] +pub struct FuelBlock { + pub hash: Vec, + pub height: i64, + pub data: Vec, +} + +impl From for FuelBlock { + fn from(value: ports::storage::FuelBlock) -> Self { + Self { + hash: value.hash.to_vec(), + height: value.height.into(), + data: value.data.into(), + } + } +} + +impl TryFrom for ports::storage::FuelBlock { + type Error = crate::error::Error; + + fn try_from(value: FuelBlock) -> Result { + let hash = value.hash.as_slice(); + let Ok(block_hash) = hash.try_into() else { + bail!("Expected 32 bytes for `hash`, but got: {hash:?} from db",); + }; + + let height = value.height.try_into().map_err(|e| { + crate::error::Error::Conversion(format!( + "Invalid db `height` ({}). Reason: {e}", + value.height + )) + })?; + + let data = NonEmpty::collect(value.data) + .ok_or_else(|| crate::error::Error::Conversion("Invalid db `data`.".to_string()))?; + + Ok(Self { + height, + hash: block_hash, + data, + }) + } +} + +pub struct L1Tx { + pub id: i64, + pub hash: Vec, + pub state: i16, + pub finalized_at: Option>, +} + +impl L1Tx { + pub fn parse_state(&self) -> Result { + match (self.state, self.finalized_at) { + (0, _) => Ok(TransactionState::Pending), + (1, Some(finalized_at)) => Ok(TransactionState::Finalized(finalized_at)), + (1, None) => { + bail!( + "L1SubmissionTx(id={}) is missing finalized_at field. Must not happen since there should have been a constraint on the table!", self.id + ) + } + (2, _) => Ok(TransactionState::Failed), + _ => { + bail!( + "L1SubmissionTx(id={}) has invalid state {}", + self.id, + self.state + ) + } + } + } +} + +impl From for L1Tx { + fn from(value: ports::types::L1Tx) -> Self { + let state = L1TxState::from(&value.state).into(); + let finalized_at = match value.state { + TransactionState::Finalized(finalized_at) => Some(finalized_at), + _ => None, + }; + + Self { + // if not present use placeholder as id is given by db + id: value.id.unwrap_or_default() as i64, + hash: value.hash.to_vec(), + state, + finalized_at, + } + } +} + +impl TryFrom for ports::types::L1Tx { + type Error = crate::error::Error; + + fn try_from(value: L1Tx) -> Result { + let hash = value.hash.as_slice(); + let Ok(hash) = hash.try_into() else { + bail!("Expected 32 bytes for transaction hash, but got: {hash:?} from db",); + }; + let state = value.parse_state()?; + + let id = value.id.try_into().map_err(|_| { + Self::Error::Conversion(format!( + "Could not convert `id` to u64. Got: {} from db", + value.id + )) + })?; + + Ok(Self { + id: Some(id), + hash, + state, + }) + } +} + +pub enum L1TxState { + Pending, + Finalized, + Failed, +} + +impl From for i16 { + fn from(value: L1TxState) -> Self { + match value { + L1TxState::Pending => 0, + L1TxState::Finalized => 1, + L1TxState::Failed => 2, + } + } +} + +impl From<&TransactionState> for L1TxState { + fn from(value: &TransactionState) -> Self { + match value { + TransactionState::Pending => Self::Pending, + TransactionState::Finalized(_) => Self::Finalized, + TransactionState::Failed => Self::Failed, + } + } +} diff --git a/packages/storage/src/postgres.rs b/packages/storage/src/postgres.rs index e06c67f9..30e8deea 100644 --- a/packages/storage/src/postgres.rs +++ b/packages/storage/src/postgres.rs @@ -1,10 +1,20 @@ -use ports::types::{ - BlockSubmission, StateFragment, StateSubmission, SubmissionTx, TransactionState, +use std::ops::RangeInclusive; + +use itertools::Itertools; +use ports::{ + storage::SequentialFuelBlocks, + types::{ + BlockSubmission, DateTime, Fragment, NonEmpty, NonNegative, TransactionState, + TryCollectNonEmpty, Utc, + }, +}; +use sqlx::{ + postgres::{PgConnectOptions, PgPoolOptions}, + QueryBuilder, }; -use sqlx::postgres::{PgConnectOptions, PgPoolOptions}; use super::error::{Error, Result}; -use crate::tables; +use crate::mappings::tables::{self, L1TxState}; #[derive(Clone)] pub struct Postgres { @@ -83,13 +93,13 @@ impl Postgres { } #[cfg(feature = "test-helpers")] - pub(crate) async fn execute(&self, query: &str) -> Result<()> { - sqlx::query(query).execute(&self.connection_pool).await?; - Ok(()) + pub(crate) fn pool(&self) -> sqlx::Pool { + self.connection_pool.clone() } - pub(crate) async fn _insert(&self, submission: BlockSubmission) -> crate::error::Result<()> { + pub(crate) async fn _insert(&self, submission: BlockSubmission) -> Result<()> { let row = tables::L1FuelBlockSubmission::from(submission); + sqlx::query!( "INSERT INTO l1_fuel_block_submission (fuel_block_hash, fuel_block_height, completed, submittal_height) VALUES ($1, $2, $3, $4)", row.fuel_block_hash, @@ -97,6 +107,110 @@ impl Postgres { row.completed, row.submittal_height ).execute(&self.connection_pool).await?; + + Ok(()) + } + + pub(crate) async fn _oldest_nonfinalized_fragments( + &self, + starting_height: u32, + limit: usize, + ) -> Result> { + let limit: i64 = limit.try_into().unwrap_or(i64::MAX); + let fragments = sqlx::query_as!( + tables::BundleFragment, + r#" + SELECT f.* + FROM l1_fragments f + LEFT JOIN l1_transaction_fragments tf ON tf.fragment_id = f.id + LEFT JOIN l1_transactions t ON t.id = tf.transaction_id + JOIN bundles b ON b.id = f.bundle_id + WHERE (t.id IS NULL OR t.state = $1) + AND b.end_height >= $2 -- Exclude bundles ending before starting_height + ORDER BY b.start_height ASC, f.idx ASC + LIMIT $3; + "#, + i16::from(L1TxState::Failed), + i64::from(starting_height), + limit + ) + .fetch_all(&self.connection_pool) + .await? + .into_iter() + .map(TryFrom::try_from) + .try_collect()?; + + Ok(fragments) + } + + pub(crate) async fn _missing_blocks( + &self, + starting_height: u32, + current_height: u32, + ) -> crate::error::Result>> { + let heights: Vec<_> = sqlx::query!( + r#"WITH all_heights AS (SELECT generate_series($1::BIGINT, $2::BIGINT) AS height) + SELECT ah.height + FROM all_heights ah + LEFT JOIN fuel_blocks fb ON fb.height = ah.height + WHERE fb.height IS NULL + ORDER BY ah.height;"#, + i64::from(starting_height), + i64::from(current_height) + ) + .fetch_all(&self.connection_pool) + .await + .map_err(Error::from)? + .into_iter() + .flat_map(|row| row.height) + .map(|height| { + u32::try_from(height).map_err(|_| { + crate::error::Error::Conversion(format!("invalid block height: {height}")) + }) + }) + .try_collect()?; + + Ok(create_ranges(heights)) + } + + pub(crate) async fn _insert_blocks( + &self, + blocks: NonEmpty, + ) -> Result<()> { + // Currently: hash, height and data + const FIELDS_PER_BLOCK: u16 = 3; + /// The maximum number of bind parameters that can be passed to a single postgres query is + /// u16::MAX. Sqlx panics if this limit is exceeded. + const MAX_BLOCKS_PER_QUERY: usize = (u16::MAX / FIELDS_PER_BLOCK) as usize; + + let mut tx = self.connection_pool.begin().await?; + + let queries = blocks + .into_iter() + .map(tables::FuelBlock::from) + .chunks(MAX_BLOCKS_PER_QUERY) + .into_iter() + .map(|chunk| { + let mut query_builder = + QueryBuilder::new("INSERT INTO fuel_blocks (hash, height, data)"); + + query_builder.push_values(chunk, |mut b, block| { + // update the constants above if you add/remove bindings + b.push_bind(block.hash) + .push_bind(block.height) + .push_bind(block.data); + }); + + query_builder + }) + .collect_vec(); + + for mut query in queries { + query.build().execute(&mut *tx).await?; + } + + tx.commit().await?; + Ok(()) } @@ -113,6 +227,59 @@ impl Postgres { .transpose() } + pub(crate) async fn _last_time_a_fragment_was_finalized( + &self, + ) -> crate::error::Result>> { + let response = sqlx::query!( + r#"SELECT + MAX(l1_transactions.finalized_at) AS last_fragment_time + FROM + l1_transaction_fragments + JOIN + l1_transactions ON l1_transactions.id = l1_transaction_fragments.transaction_id + WHERE + l1_transactions.state = $1; + "#, + i16::from(L1TxState::Finalized) + ) + .fetch_optional(&self.connection_pool) + .await? + .and_then(|response| response.last_fragment_time); + Ok(response) + } + + pub(crate) async fn _lowest_unbundled_blocks( + &self, + starting_height: u32, + limit: usize, + ) -> Result> { + let limit = i64::try_from(limit).unwrap_or(i64::MAX); + + let response = sqlx::query_as!( + tables::FuelBlock, + r#" + SELECT fb.* + FROM fuel_blocks fb WHERE fb.height >= $1 + AND NOT EXISTS ( + SELECT 1 + FROM bundles b + WHERE fb.height BETWEEN b.start_height AND b.end_height + ) + ORDER BY fb.height LIMIT $2"#, + i64::from(starting_height), // Parameter $1 + limit // Parameter $2 + ) + .fetch_all(&self.connection_pool) + .await + .map_err(Error::from)?; + + Ok(response + .into_iter() + .map(ports::storage::FuelBlock::try_from) + .try_collect_nonempty()? + .map(SequentialFuelBlocks::from_first_sequence)) + } + pub(crate) async fn _set_submission_completed( &self, fuel_block_hash: [u8; 32], @@ -131,156 +298,74 @@ impl Postgres { } } - pub(crate) async fn _insert_state_submission( - &self, - state: StateSubmission, - fragments: Vec, - ) -> Result<()> { - if fragments.is_empty() { - return Err(Error::Database( - "cannot insert state with no fragments".to_string(), - )); - } - - let state_row = tables::L1StateSubmission::from(state); - let fragment_rows = fragments - .into_iter() - .map(tables::L1StateFragment::from) - .collect::>(); - - let mut transaction = self.connection_pool.begin().await?; - - // Insert the state submission - let submission_id = sqlx::query!( - "INSERT INTO l1_submissions (fuel_block_hash, fuel_block_height) VALUES ($1, $2) RETURNING id", - state_row.fuel_block_hash, - state_row.fuel_block_height - ) - .fetch_one(&mut *transaction) - .await?.id; - - // Insert the state fragments - // TODO: optimize this - for fragment_row in fragment_rows { - sqlx::query!( - "INSERT INTO l1_fragments (fragment_idx, submission_id, data, created_at) VALUES ($1, $2, $3, $4)", - fragment_row.fragment_idx, - submission_id, - fragment_row.data, - fragment_row.created_at - ) - .execute(&mut *transaction) - .await?; - } - - transaction.commit().await?; - - Ok(()) - } - - pub(crate) async fn _get_unsubmitted_fragments(&self) -> Result> { - const BLOB_LIMIT: i64 = 6; - let rows = sqlx::query_as!( - // all fragments that are not associated to any pending or finalized tx - tables::L1StateFragment, - "SELECT l1_fragments.* - FROM l1_fragments - WHERE l1_fragments.id NOT IN ( - SELECT l1_fragments.id - FROM l1_fragments - JOIN l1_transaction_fragments ON l1_fragments.id = l1_transaction_fragments.fragment_id - JOIN l1_transactions ON l1_transaction_fragments.transaction_id = l1_transactions.id - WHERE l1_transactions.state IN ($1, $2) - ) - ORDER BY l1_fragments.created_at - LIMIT $3;", - TransactionState::Finalized.into_i16(), - TransactionState::Pending.into_i16(), - BLOB_LIMIT - ) - .fetch_all(&self.connection_pool) - .await? - .into_iter() - .map(StateFragment::try_from); - - rows.collect::>>() - } - pub(crate) async fn _record_pending_tx( &self, tx_hash: [u8; 32], - fragment_ids: Vec, + fragment_ids: NonEmpty>, ) -> Result<()> { - let mut transaction = self.connection_pool.begin().await?; + let mut tx = self.connection_pool.begin().await?; - let transaction_id = sqlx::query!( + let tx_id = sqlx::query!( "INSERT INTO l1_transactions (hash, state) VALUES ($1, $2) RETURNING id", tx_hash.as_slice(), - TransactionState::Pending.into_i16(), + i16::from(L1TxState::Pending) ) - .fetch_one(&mut *transaction) + .fetch_one(&mut *tx) .await? .id; - for fragment_id in fragment_ids { + for id in fragment_ids { sqlx::query!( - "INSERT INTO l1_transaction_fragments (transaction_id, fragment_id) VALUES ($1, $2)", - transaction_id, - fragment_id as i64 + "INSERT INTO l1_transaction_fragments (transaction_id, fragment_id) VALUES ($1, $2)", + tx_id, + id.as_i32() ) - .execute(&mut *transaction) + .execute(&mut *tx) .await?; } - transaction.commit().await?; - + tx.commit().await?; Ok(()) } pub(crate) async fn _has_pending_txs(&self) -> Result { Ok(sqlx::query!( "SELECT EXISTS (SELECT 1 FROM l1_transactions WHERE state = $1) AS has_pending_transactions;", - TransactionState::Pending.into_i16() + i16::from(L1TxState::Pending) ) .fetch_one(&self.connection_pool) .await? .has_pending_transactions.unwrap_or(false)) } - pub(crate) async fn _get_pending_txs(&self) -> Result> { + pub(crate) async fn _get_pending_txs(&self) -> Result> { sqlx::query_as!( - tables::L1SubmissionTx, + tables::L1Tx, "SELECT * FROM l1_transactions WHERE state = $1", - TransactionState::Pending.into_i16() + i16::from(L1TxState::Pending) ) .fetch_all(&self.connection_pool) .await? .into_iter() - .map(SubmissionTx::try_from) + .map(TryFrom::try_from) .collect::>>() } - pub(crate) async fn _state_submission_w_latest_block( - &self, - ) -> crate::error::Result> { - sqlx::query_as!( - tables::L1StateSubmission, - "SELECT * FROM l1_submissions ORDER BY fuel_block_height DESC LIMIT 1" - ) - .fetch_optional(&self.connection_pool) - .await? - .map(StateSubmission::try_from) - .transpose() - } - - pub(crate) async fn _update_submission_tx_state( + pub(crate) async fn _update_tx_state( &self, hash: [u8; 32], state: TransactionState, ) -> Result<()> { + let finalized_at = match &state { + TransactionState::Finalized(date_time) => Some(*date_time), + _ => None, + }; + let state = i16::from(L1TxState::from(&state)); + sqlx::query!( - "UPDATE l1_transactions SET state = $1 WHERE hash = $2", - state.into_i16(), + "UPDATE l1_transactions SET state = $1, finalized_at = $2 WHERE hash = $3", + state, + finalized_at, hash.as_slice(), ) .execute(&self.connection_pool) @@ -288,4 +373,376 @@ impl Postgres { Ok(()) } + + pub(crate) async fn _insert_bundle_and_fragments( + &self, + block_range: RangeInclusive, + fragments: NonEmpty, + ) -> Result<()> { + let mut tx = self.connection_pool.begin().await?; + + let start = *block_range.start(); + let end = *block_range.end(); + + // Insert a new bundle and retrieve its ID + let bundle_id = sqlx::query!( + "INSERT INTO bundles(start_height, end_height) VALUES ($1, $2) RETURNING id", + i64::from(start), + i64::from(end) + ) + .fetch_one(&mut *tx) + .await? + .id; + + let bundle_id: NonNegative = bundle_id.try_into().map_err(|e| { + crate::error::Error::Conversion(format!("invalid bundle id received from db: {}", e)) + })?; + + // Define constants for batching + const FIELDS_PER_FRAGMENT: u16 = 5; // idx, data, bundle_id, unused_bytes, total_bytes + const MAX_FRAGMENTS_PER_QUERY: usize = (u16::MAX / FIELDS_PER_FRAGMENT) as usize; + + // Prepare fragments for insertion + let fragment_rows = fragments + .into_iter() + .enumerate() + .map(|(idx, fragment)| { + let idx = i32::try_from(idx).map_err(|_| { + crate::error::Error::Conversion(format!("invalid idx for fragment: {}", idx)) + })?; + Ok(( + idx, + Vec::from(fragment.data), + bundle_id.as_i32(), + i64::from(fragment.unused_bytes), + i64::from(fragment.total_bytes.get()), + )) + }) + .collect::>>()?; + + // Batch insert fragments + let queries = fragment_rows + .into_iter() + .chunks(MAX_FRAGMENTS_PER_QUERY) + .into_iter() + .map(|chunk| { + let mut query_builder = QueryBuilder::new( + "INSERT INTO l1_fragments (idx, data, bundle_id, unused_bytes, total_bytes)", + ); + + query_builder.push_values(chunk, |mut b, values| { + b.push_bind(values.0); + b.push_bind(values.1); + b.push_bind(values.2); + b.push_bind(values.3); + b.push_bind(values.4); + }); + + query_builder + }) + .collect::>(); + + // Execute all fragment insertion queries + for mut query in queries { + query.build().execute(&mut *tx).await?; + } + + // Commit the transaction + tx.commit().await?; + + Ok(()) + } +} + +fn create_ranges(heights: Vec) -> Vec> { + // db should take care of it always being ASC sorted and unique, but just in case it doesn't + // hurt to dedupe and sort here + heights + .into_iter() + .unique() + .sorted_unstable() + .enumerate() + .chunk_by(|(i, height)| { + // consecutive heights will give the same number when subtracted from their indexes + // heights( 5, 6, 7) -> ( 5-0, 6-1, 7-2) = ( 5, 5, 5 ) + height + .checked_sub(*i as u32) + .expect("cannot underflow since elements are sorted and `height` is always >= `i` ") + }) + .into_iter() + .map(|(_key, group)| { + let mut group_iter = group.map(|(_, h)| h); + let start = group_iter.next().expect("group cannot be empty"); + let end = group_iter.last().unwrap_or(start); + start..=end + }) + .collect() +} + +#[cfg(test)] +mod tests { + use std::{env, fs, path::Path}; + + use sqlx::{Executor, PgPool, Row}; + + use crate::test_instance; + + #[tokio::test] + async fn test_second_migration_applies_successfully() { + let db = test_instance::PostgresProcess::shared() + .await + .expect("Failed to initialize PostgresProcess") + .create_noschema_random_db() + .await + .expect("Failed to create random test database"); + + let manifest_dir = env!("CARGO_MANIFEST_DIR"); + let migrations_path = Path::new(manifest_dir).join("migrations"); + + async fn apply_migration(pool: &sqlx::Pool, path: &Path) { + let sql = fs::read_to_string(path) + .map_err(|e| format!("Failed to read migration file {:?}: {}", path, e)) + .unwrap(); + pool.execute(sqlx::raw_sql(&sql)).await.unwrap(); + } + + // ----------------------- + // Apply Initial Migration + // ----------------------- + let initial_migration_path = migrations_path.join("0001_initial.up.sql"); + apply_migration(&db.db.pool(), &initial_migration_path).await; + + // Insert sample data into initial tables + + let fuel_block_hash = vec![0u8; 32]; + let insert_l1_submissions = r#" + INSERT INTO l1_submissions (fuel_block_hash, fuel_block_height) + VALUES ($1, $2) + RETURNING id + "#; + let row = sqlx::query(insert_l1_submissions) + .bind(&fuel_block_hash) + .bind(1000i64) + .fetch_one(&db.db.pool()) + .await + .unwrap(); + let submission_id: i32 = row.try_get("id").unwrap(); + + let insert_l1_fuel_block_submission = r#" + INSERT INTO l1_fuel_block_submission (fuel_block_hash, fuel_block_height, completed, submittal_height) + VALUES ($1, $2, $3, $4) + "#; + sqlx::query(insert_l1_fuel_block_submission) + .bind(&fuel_block_hash) + .bind(1000i64) + .bind(true) + .bind(950i64) + .execute(&db.db.pool()) + .await + .unwrap(); + + // Insert into l1_transactions + let tx_hash = vec![1u8; 32]; + let insert_l1_transactions = r#" + INSERT INTO l1_transactions (hash, state) + VALUES ($1, $2) + RETURNING id + "#; + let row = sqlx::query(insert_l1_transactions) + .bind(&tx_hash) + .bind(0i16) + .fetch_one(&db.db.pool()) + .await + .unwrap(); + let transaction_id: i32 = row.try_get("id").unwrap(); + + // Insert into l1_fragments + let fragment_data = vec![2u8; 10]; + let insert_l1_fragments = r#" + INSERT INTO l1_fragments (fragment_idx, submission_id, data) + VALUES ($1, $2, $3) + RETURNING id + "#; + let row = sqlx::query(insert_l1_fragments) + .bind(0i64) + .bind(submission_id) + .bind(&fragment_data) + .fetch_one(&db.db.pool()) + .await + .unwrap(); + let fragment_id: i32 = row.try_get("id").unwrap(); + + // Insert into l1_transaction_fragments + let insert_l1_transaction_fragments = r#" + INSERT INTO l1_transaction_fragments (transaction_id, fragment_id) + VALUES ($1, $2) + "#; + sqlx::query(insert_l1_transaction_fragments) + .bind(transaction_id) + .bind(fragment_id) + .execute(&db.db.pool()) + .await + .unwrap(); + + // ------------------------ + // Apply Second Migration + // ------------------------ + let second_migration_path = migrations_path.join("0002_better_fragmentation.up.sql"); + apply_migration(&db.db.pool(), &second_migration_path).await; + + // ------------------------ + // Verification Steps + // ------------------------ + + // Function to check table existence + async fn table_exists(pool: &PgPool, table_name: &str) -> bool { + let query = r#" + SELECT EXISTS ( + SELECT FROM information_schema.tables + WHERE table_schema = 'public' + AND table_name = $1 + ) + "#; + let row = sqlx::query(query) + .bind(table_name) + .fetch_one(pool) + .await + .expect("Failed to execute table_exists query"); + row.try_get::(0).unwrap_or(false) + } + + // Function to check column existence and type + async fn column_info(pool: &PgPool, table_name: &str, column_name: &str) -> Option { + let query = r#" + SELECT data_type + FROM information_schema.columns + WHERE table_name = $1 AND column_name = $2 + "#; + let row = sqlx::query(query) + .bind(table_name) + .bind(column_name) + .fetch_optional(pool) + .await + .expect("Failed to execute column_info query"); + row.map(|row| row.try_get("data_type").unwrap_or_default()) + } + + let fuel_blocks_exists = table_exists(&db.db.pool(), "fuel_blocks").await; + assert!(fuel_blocks_exists, "fuel_blocks table does not exist"); + + let bundles_exists = table_exists(&db.db.pool(), "bundles").await; + assert!(bundles_exists, "bundles table does not exist"); + + async fn check_columns(pool: &PgPool, table: &str, column: &str, expected_type: &str) { + let info = column_info(pool, table, column).await; + assert!( + info.is_some(), + "Column '{}' does not exist in table '{}'", + column, + table + ); + let data_type = info.unwrap(); + assert_eq!( + data_type, expected_type, + "Column '{}' in table '{}' has type '{}', expected '{}'", + column, table, data_type, expected_type + ); + } + + // Check that 'l1_fragments' has new columns + check_columns(&db.db.pool(), "l1_fragments", "idx", "integer").await; + check_columns(&db.db.pool(), "l1_fragments", "total_bytes", "bigint").await; + check_columns(&db.db.pool(), "l1_fragments", "unused_bytes", "bigint").await; + check_columns(&db.db.pool(), "l1_fragments", "bundle_id", "integer").await; + + // Verify 'l1_transactions' has 'finalized_at' column + check_columns( + &db.db.pool(), + "l1_transactions", + "finalized_at", + "timestamp with time zone", + ) + .await; + + // Verify that l1_fragments and l1_transaction_fragments are empty after migration + let count_l1_fragments = sqlx::query_scalar::<_, i64>( + r#" + SELECT COUNT(*) FROM l1_fragments + "#, + ) + .fetch_one(&db.db.pool()) + .await + .unwrap(); + assert_eq!( + count_l1_fragments, 0, + "l1_fragments table is not empty after migration" + ); + + let count_l1_transaction_fragments = sqlx::query_scalar::<_, i64>( + r#" + SELECT COUNT(*) FROM l1_transaction_fragments + "#, + ) + .fetch_one(&db.db.pool()) + .await + .unwrap(); + assert_eq!( + count_l1_transaction_fragments, 0, + "l1_transaction_fragments table is not empty after migration" + ); + + // Insert a default bundle to satisfy the foreign key constraint for future inserts + let insert_default_bundle = r#" + INSERT INTO bundles (start_height, end_height) + VALUES ($1, $2) + RETURNING id + "#; + let row = sqlx::query(insert_default_bundle) + .bind(0i64) + .bind(0i64) + .fetch_one(&db.db.pool()) + .await + .unwrap(); + let bundle_id: i32 = row.try_get("id").unwrap(); + assert_eq!(bundle_id, 1, "Default bundle ID is not 1"); + + // Attempt to insert a fragment with empty data + let insert_invalid_fragment = r#" + INSERT INTO l1_fragments (idx, data, total_bytes, unused_bytes, bundle_id) + VALUES ($1, $2, $3, $4, $5) + "#; + let result = sqlx::query(insert_invalid_fragment) + .bind(1i32) + .bind::<&[u8]>(&[]) // Empty data should fail due to check constraint + .bind(10i64) + .bind(5i64) + .bind(1i32) // Valid bundle_id + .execute(&db.db.pool()) + .await; + + assert!( + result.is_err(), + "Inserting empty data should fail due to check constraint" + ); + + // Insert a valid fragment + let fragment_data_valid = vec![3u8; 15]; + let insert_valid_fragment = r#" + INSERT INTO l1_fragments (idx, data, total_bytes, unused_bytes, bundle_id) + VALUES ($1, $2, $3, $4, $5) + RETURNING id + "#; + let row = sqlx::query(insert_valid_fragment) + .bind(1i32) + .bind(&fragment_data_valid) + .bind(15i64) + .bind(0i64) + .bind(1i32) + .fetch_one(&db.db.pool()) + .await + .unwrap(); + + let new_fragment_id: i32 = row.try_get("id").unwrap(); + assert!(new_fragment_id > 0, "Failed to insert a valid fragment"); + } } diff --git a/packages/storage/src/tables.rs b/packages/storage/src/tables.rs deleted file mode 100644 index 9dde848e..00000000 --- a/packages/storage/src/tables.rs +++ /dev/null @@ -1,181 +0,0 @@ -use ports::types::{ - BlockSubmission, StateFragment, StateSubmission, SubmissionTx, TransactionState, -}; -use sqlx::types::chrono; - -macro_rules! bail { - ($msg: literal, $($args: expr),*) => { - return Err(Self::Error::Conversion(format!($msg, $($args),*))); - }; -} - -#[derive(sqlx::FromRow)] -pub struct L1FuelBlockSubmission { - pub fuel_block_hash: Vec, - pub fuel_block_height: i64, - pub completed: bool, - pub submittal_height: i64, -} - -impl TryFrom for BlockSubmission { - type Error = crate::error::Error; - - fn try_from(value: L1FuelBlockSubmission) -> Result { - let block_hash = value.fuel_block_hash.as_slice(); - let Ok(block_hash) = block_hash.try_into() else { - bail!("Expected 32 bytes for `fuel_block_hash`, but got: {block_hash:?} from db",); - }; - - let Ok(block_height) = value.fuel_block_height.try_into() else { - bail!( - "`fuel_block_height` as read from the db cannot fit in a `u32` as expected. Got: {:?} from db", - value.fuel_block_height - - ); - }; - - let Ok(submittal_height) = value.submittal_height.try_into() else { - bail!("`submittal_height` as read from the db cannot fit in a `u64` as expected. Got: {} from db", value.submittal_height); - }; - - Ok(Self { - block_hash, - block_height, - completed: value.completed, - submittal_height, - }) - } -} - -impl From for L1FuelBlockSubmission { - fn from(value: BlockSubmission) -> Self { - Self { - fuel_block_hash: value.block_hash.to_vec(), - fuel_block_height: i64::from(value.block_height), - completed: value.completed, - submittal_height: value.submittal_height.into(), - } - } -} - -#[derive(sqlx::FromRow)] -pub struct L1StateSubmission { - pub id: i64, - pub fuel_block_hash: Vec, - pub fuel_block_height: i64, -} - -impl TryFrom for StateSubmission { - type Error = crate::error::Error; - - fn try_from(value: L1StateSubmission) -> Result { - let block_hash = value.fuel_block_hash.as_slice(); - let Ok(block_hash) = block_hash.try_into() else { - bail!("Expected 32 bytes for `fuel_block_hash`, but got: {block_hash:?} from db",); - }; - - let Ok(block_height) = value.fuel_block_height.try_into() else { - bail!( - "`fuel_block_height` as read from the db cannot fit in a `u32` as expected. Got: {:?} from db", - value.fuel_block_height - - ); - }; - - Ok(Self { - id: Some(value.id as u32), - block_height, - block_hash, - }) - } -} - -impl From for L1StateSubmission { - fn from(value: StateSubmission) -> Self { - Self { - // if not present use placeholder as id is given by db - id: value.id.unwrap_or_default() as i64, - fuel_block_height: i64::from(value.block_height), - fuel_block_hash: value.block_hash.to_vec(), - } - } -} - -#[derive(sqlx::FromRow)] -pub struct L1StateFragment { - pub id: i64, - pub submission_id: i64, - pub fragment_idx: i64, - pub data: Vec, - pub created_at: chrono::DateTime, -} - -impl TryFrom for StateFragment { - type Error = crate::error::Error; - - fn try_from(value: L1StateFragment) -> Result { - Ok(Self { - id: Some(value.id as u32), - submission_id: Some(value.submission_id as u32), - fragment_idx: value.fragment_idx as u32, - data: value.data, - created_at: value.created_at, - }) - } -} - -impl From for L1StateFragment { - fn from(value: StateFragment) -> Self { - Self { - // if not present use placeholder as id is given by db - id: value.id.unwrap_or_default() as i64, - // if not present use placeholder as id is given by db - submission_id: value.submission_id.unwrap_or_default() as i64, - fragment_idx: value.fragment_idx as i64, - data: value.data, - created_at: value.created_at, - } - } -} - -#[derive(sqlx::FromRow)] -pub struct L1SubmissionTx { - pub id: i64, - pub hash: Vec, - pub state: i16, -} - -impl TryFrom for SubmissionTx { - type Error = crate::error::Error; - - fn try_from(value: L1SubmissionTx) -> Result { - let hash = value.hash.as_slice(); - let Ok(hash) = hash.try_into() else { - bail!("Expected 32 bytes for transaction hash, but got: {hash:?} from db",); - }; - - let Some(state) = TransactionState::from_i16(value.state) else { - bail!( - "state: {:?} is not a valid variant of `TransactionState`", - value.state - ); - }; - - Ok(SubmissionTx { - id: Some(value.id as u32), - hash, - state, - }) - } -} - -impl From for L1SubmissionTx { - fn from(value: SubmissionTx) -> Self { - Self { - // if not present use placeholder as id is given by db - id: value.id.unwrap_or_default() as i64, - hash: value.hash.to_vec(), - state: value.state.into_i16(), - } - } -} diff --git a/packages/storage/src/test_instance.rs b/packages/storage/src/test_instance.rs index 6e2b84f3..abfd9d67 100644 --- a/packages/storage/src/test_instance.rs +++ b/packages/storage/src/test_instance.rs @@ -1,8 +1,17 @@ use std::{ borrow::Cow, + ops::RangeInclusive, sync::{Arc, Weak}, }; +use delegate::delegate; +use ports::{ + storage::{BundleFragment, FuelBlock, SequentialFuelBlocks, Storage}, + types::{ + BlockSubmission, DateTime, Fragment, L1Tx, NonEmpty, NonNegative, TransactionState, Utc, + }, +}; +use sqlx::Executor; use testcontainers::{ core::{ContainerPort, WaitFor}, runners::AsyncRunner, @@ -97,7 +106,17 @@ impl PostgresProcess { }) } - pub async fn create_random_db(&self) -> ports::storage::Result { + pub async fn create_random_db(self: &Arc) -> ports::storage::Result { + let db = self.create_noschema_random_db().await?; + + db.db.migrate().await?; + + Ok(db) + } + + pub async fn create_noschema_random_db( + self: &Arc, + ) -> ports::storage::Result { let port = self .container .get_host_port_ipv4(5432) @@ -117,14 +136,69 @@ impl PostgresProcess { let db_name = format!("test_db_{}", rand::random::()); let query = format!("CREATE DATABASE {db_name}"); - db.execute(&query).await?; + db.pool() + .execute(sqlx::query(&query)) + .await + .map_err(crate::error::Error::from)?; config.database = db_name; let db = Postgres::connect(&config).await?; - db.migrate().await?; + Ok(DbWithProcess { + db, + _process: self.clone(), + }) + } +} + +#[derive(Clone)] +pub struct DbWithProcess { + pub db: Postgres, + _process: Arc, +} - Ok(db) +impl DbWithProcess { + delegate! { + to self.db { + pub fn db_name(&self) -> String; + pub fn port(&self) -> u16; + } + } +} + +impl Storage for DbWithProcess { + delegate! { + to self.db { + async fn insert(&self, submission: BlockSubmission) -> ports::storage::Result<()>; + async fn submission_w_latest_block(&self) -> ports::storage::Result>; + async fn set_submission_completed(&self, fuel_block_hash: [u8; 32]) -> ports::storage::Result; + async fn insert_blocks(&self, blocks: NonEmpty) -> ports::storage::Result<()>; + async fn missing_blocks(&self, starting_height: u32, current_height: u32) -> ports::storage::Result>>; + async fn lowest_sequence_of_unbundled_blocks( + &self, + starting_height: u32, + limit: usize, + ) -> ports::storage::Result>; + async fn insert_bundle_and_fragments( + &self, + block_range: RangeInclusive, + fragments: NonEmpty, + ) -> ports::storage::Result<()>; + async fn record_pending_tx( + &self, + tx_hash: [u8; 32], + fragment_ids: NonEmpty>, + ) -> ports::storage::Result<()>; + async fn get_pending_txs(&self) -> ports::storage::Result>; + async fn has_pending_txs(&self) -> ports::storage::Result; + async fn oldest_nonfinalized_fragments( + &self, + starting_height: u32, + limit: usize, + ) -> ports::storage::Result>; + async fn last_time_a_fragment_was_finalized(&self) -> ports::storage::Result>>; + async fn update_tx_state(&self, hash: [u8; 32], state: TransactionState) -> ports::storage::Result<()>; + } } } diff --git a/packages/validator/Cargo.toml b/packages/validator/Cargo.toml deleted file mode 100644 index cd567d1f..00000000 --- a/packages/validator/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "validator" -authors = { workspace = true } -edition = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -repository = { workspace = true } -version = { workspace = true } -publish = { workspace = true } -rust-version = { workspace = true } - -[dependencies] -fuel-core-client = { workspace = true } -fuel-crypto = { workspace = true, optional = true } -mockall = { workspace = true, optional = true } -rand = { workspace = true, optional = true } -serde = { workspace = true, features = ["derive"] } -thiserror = { workspace = true } -hex = { workspace = true } - -[dev-dependencies] -fuel-crypto = { workspace = true, features = ["random"] } -rand = { workspace = true, features = ["std", "std_rng"] } -tai64 = { workspace = true } -validator = { workspace = true, features = ["validator", "test-helpers"] } - -[features] -validator = ["dep:fuel-crypto"] -test-helpers = ["validator", "dep:mockall", "dep:rand"] diff --git a/packages/validator/src/block.rs b/packages/validator/src/block.rs deleted file mode 100644 index 25a18d67..00000000 --- a/packages/validator/src/block.rs +++ /dev/null @@ -1,52 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Eq)] -pub struct ValidatedFuelBlock { - pub(crate) hash: [u8; 32], - pub(crate) height: u32, -} - -impl ValidatedFuelBlock { - pub fn hash(&self) -> [u8; 32] { - self.hash - } - - pub fn height(&self) -> u32 { - self.height - } - - #[cfg(feature = "test-helpers")] - pub fn new(hash: [u8; 32], height: u32) -> Self { - Self { hash, height } - } -} - -impl std::fmt::Debug for ValidatedFuelBlock { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let hash = self.hash.map(|byte| format!("{byte:02x?}")).join(""); - f.debug_struct("ValidatedFuelBlock") - .field("hash", &hash) - .field("height", &self.height) - .finish() - } -} - -#[cfg(feature = "test-helpers")] -impl From for ValidatedFuelBlock { - fn from(block: fuel_core_client::client::types::block::Block) -> Self { - Self { - hash: *block.id, - height: block.header.height, - } - } -} - -#[cfg(feature = "test-helpers")] -impl rand::distributions::Distribution for rand::distributions::Standard { - fn sample(&self, rng: &mut R) -> ValidatedFuelBlock { - ValidatedFuelBlock { - hash: rng.gen(), - height: rng.gen(), - } - } -} diff --git a/packages/validator/src/lib.rs b/packages/validator/src/lib.rs deleted file mode 100644 index 3ed5d36a..00000000 --- a/packages/validator/src/lib.rs +++ /dev/null @@ -1,22 +0,0 @@ -pub mod block; -#[cfg(feature = "validator")] -mod validator; - -use fuel_core_client::client::types::block::Block as FuelBlock; -#[cfg(feature = "validator")] -pub use validator::*; - -use crate::block::ValidatedFuelBlock; - -#[derive(Debug, thiserror::Error)] -pub enum Error { - #[error("{0}")] - BlockValidation(String), -} - -pub type Result = std::result::Result; - -#[cfg_attr(feature = "test-helpers", mockall::automock)] -pub trait Validator: Send + Sync { - fn validate(&self, fuel_block: &FuelBlock) -> Result; -} diff --git a/run_tests.sh b/run_tests.sh index d6629546..6f236515 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -5,7 +5,8 @@ script_location="$(readlink -f "$(dirname "$0")")" workspace_cargo_manifest="$script_location/Cargo.toml" -# So that we may have a binary in `target/debug` -cargo build --manifest-path "$workspace_cargo_manifest" --bin fuel-block-committer +cargo test --manifest-path "$workspace_cargo_manifest" --workspace --exclude e2e -PATH="$script_location/target/debug:$PATH" cargo test --manifest-path "$workspace_cargo_manifest" --workspace +# So that we may have a binary in `target/release` +cargo build --release --manifest-path "$workspace_cargo_manifest" --bin fuel-block-committer +PATH="$script_location/target/release:$PATH" cargo test --manifest-path "$workspace_cargo_manifest" --package e2e --jobs 1 diff --git a/sql-compose.yaml b/sql-compose.yaml new file mode 100644 index 00000000..4150d7aa --- /dev/null +++ b/sql-compose.yaml @@ -0,0 +1,12 @@ +version: '3.8' + +services: + postgres: + image: postgres:latest + container_name: my_postgres + environment: + POSTGRES_USER: username + POSTGRES_PASSWORD: password + POSTGRES_DB: test + ports: + - "5432:5432"