From 3fdd7c668aa9e82b98a07d8bae00875d35a5d998 Mon Sep 17 00:00:00 2001 From: sergerad Date: Thu, 17 Oct 2024 09:31:38 +1300 Subject: [PATCH 01/27] Refactor BlockInterval::new() to facilitate hash range inputs --- scripts/prove_rpc.sh | 22 +-- zero/src/bin/leader.rs | 32 ++-- zero/src/bin/leader/cli.rs | 14 +- zero/src/bin/leader/client.rs | 34 ++-- zero/src/bin/rpc.rs | 15 +- zero/src/block_interval.rs | 332 +++++++++++++++++----------------- zero/src/provider.rs | 7 +- zero/src/rpc/mod.rs | 3 +- 8 files changed, 229 insertions(+), 230 deletions(-) diff --git a/scripts/prove_rpc.sh b/scripts/prove_rpc.sh index 49848fdfe..bacf519b7 100755 --- a/scripts/prove_rpc.sh +++ b/scripts/prove_rpc.sh @@ -1,11 +1,11 @@ #!/bin/bash # Args: -# 1 --> Start block idx -# 2 --> End block index (inclusive) +# 1 --> Start block (number or hash) +# 2 --> End block (number or hash, inclusive) # 3 --> Rpc endpoint:port (eg. http://35.246.1.96:8545) # 4 --> Rpc type (eg. jerigon / native) -# 5 --> Ignore previous proofs (boolean) +# 5 --> Checkpoint block (number or hash, optional) # 6 --> Backoff in milliseconds (optional [default: 0]) # 7 --> Number of retries (optional [default: 0]) # 8 --> Test run only flag `test_only` (optional) @@ -44,7 +44,7 @@ START_BLOCK=$1 END_BLOCK=$2 NODE_RPC_URL=$3 NODE_RPC_TYPE=$4 -IGNORE_PREVIOUS_PROOFS=$5 +CHECKPOINT_BLOCK=$5 BACKOFF=${6:-0} RETRIES=${7:-0} @@ -58,25 +58,17 @@ RECOMMENDED_FILE_HANDLE_LIMIT=8192 mkdir -p $PROOF_OUTPUT_DIR -if $IGNORE_PREVIOUS_PROOFS ; then +if $CHECKPOINT_BLOCK ; then # Set checkpoint height to previous block number for the first block in range - prev_proof_num=$(($1-1)) - PREV_PROOF_EXTRA_ARG="--checkpoint-block-number ${prev_proof_num}" + PREV_PROOF_EXTRA_ARG="--checkpoint-block $CHECKPOINT_BLOCK" else + # TODO(serge in current PR): This is impossible if blocks are specified by hash if [[ $1 -gt 1 ]]; then prev_proof_num=$(($1-1)) PREV_PROOF_EXTRA_ARG="-f ${PROOF_OUTPUT_DIR}/b${prev_proof_num}.zkproof" fi fi -# Convert hex to decimal parameters -if [[ $START_BLOCK == 0x* ]]; then - START_BLOCK=$((16#${START_BLOCK#"0x"})) -fi -if [[ $END_BLOCK == 0x* ]]; then - END_BLOCK=$((16#${END_BLOCK#"0x"})) -fi - # Define block interval if [ $END_BLOCK == '-' ]; then # Follow from the start block to the end of the chain diff --git a/zero/src/bin/leader.rs b/zero/src/bin/leader.rs index 6daaf599f..38ca2a3da 100644 --- a/zero/src/bin/leader.rs +++ b/zero/src/bin/leader.rs @@ -5,12 +5,12 @@ use std::sync::Arc; use anyhow::Result; use clap::Parser; use cli::Command; -use client::RpcParams; use paladin::config::Config; use paladin::runtime::Runtime; use tracing::info; use zero::env::load_dotenvy_vars_if_present; use zero::prover::{ProofRuntime, ProverConfig}; +use zero::rpc::retry::build_http_retry_provider; use zero::{ block_interval::BlockInterval, prover_state::persistence::set_circuit_cache_dir_env_if_not_set, }; @@ -103,26 +103,36 @@ async fn main() -> Result<()> { Command::Rpc { rpc_url, rpc_type, - block_interval, - checkpoint_block_number, + checkpoint_block, previous_proof, block_time, + end_block, + start_block, backoff, max_retries, } => { + // Construct the provider. let previous_proof = get_previous_proof(previous_proof)?; - let block_interval = BlockInterval::new(&block_interval)?; + let retry_provider = build_http_retry_provider(rpc_url.clone(), backoff, max_retries)?; + let cached_provider = Arc::new(zero::provider::CachedProvider::new( + retry_provider, + rpc_type, + )); + // Construct the block interval. + let block_interval = + BlockInterval::new(cached_provider.clone(), start_block, end_block).await?; + + // Convert the checkpoint block to a block number. + let checkpoint_block_number = + BlockInterval::block_to_num(cached_provider.clone(), checkpoint_block).await?; + + // Prove the block interval. info!("Proving interval {block_interval}"); client_main( proof_runtime, - RpcParams { - rpc_url, - rpc_type, - backoff, - max_retries, - block_time, - }, + cached_provider, + block_time, block_interval, LeaderConfig { checkpoint_block_number, diff --git a/zero/src/bin/leader/cli.rs b/zero/src/bin/leader/cli.rs index ad9270ee8..b7d4f0a4d 100644 --- a/zero/src/bin/leader/cli.rs +++ b/zero/src/bin/leader/cli.rs @@ -1,5 +1,6 @@ use std::path::PathBuf; +use alloy::eips::BlockId; use alloy::transports::http::reqwest::Url; use clap::{Parser, Subcommand, ValueEnum, ValueHint}; use zero::prover::cli::CliProverConfig; @@ -63,12 +64,15 @@ pub(crate) enum Command { // The node RPC type (jerigon / native). #[arg(long, short = 't', default_value = "jerigon")] rpc_type: RpcType, - /// The block interval for which to generate a proof. - #[arg(long, short = 'i')] - block_interval: String, + /// The start of the block range to prove (inclusive). + #[arg(long, short = 's')] + start_block: BlockId, + /// The end of the block range to prove (inclusive). + #[arg(long, short = 'e')] + end_block: Option, /// The checkpoint block number. - #[arg(short, long, default_value_t = 0)] - checkpoint_block_number: u64, + #[arg(short, long, default_value_t = BlockId::from(0))] + checkpoint_block: BlockId, /// The previous proof output. #[arg(long, short = 'f', value_hint = ValueHint::FilePath)] previous_proof: Option, diff --git a/zero/src/bin/leader/client.rs b/zero/src/bin/leader/client.rs index 343a2cdcb..191becdc0 100644 --- a/zero/src/bin/leader/client.rs +++ b/zero/src/bin/leader/client.rs @@ -1,7 +1,8 @@ use std::sync::Arc; +use alloy::providers::Provider; use alloy::rpc::types::{BlockId, BlockNumberOrTag}; -use alloy::transports::http::reqwest::Url; +use alloy::transports::Transport; use anyhow::{anyhow, Result}; use tokio::sync::mpsc; use tracing::info; @@ -14,15 +15,6 @@ use zero::rpc::{retry::build_http_retry_provider, RpcType}; use crate::ProofRuntime; -#[derive(Debug)] -pub struct RpcParams { - pub rpc_url: Url, - pub rpc_type: RpcType, - pub backoff: u64, - pub max_retries: u32, - pub block_time: u64, -} - #[derive(Debug)] pub struct LeaderConfig { pub checkpoint_block_number: u64, @@ -31,24 +23,21 @@ pub struct LeaderConfig { } /// The main function for the client. -pub(crate) async fn client_main( +pub(crate) async fn client_main( proof_runtime: Arc, - rpc_params: RpcParams, + cached_provider: Arc>, + block_time: u64, block_interval: BlockInterval, mut leader_config: LeaderConfig, -) -> Result<()> { +) -> Result<()> +where + ProviderT: Provider + 'static, + TransportT: Transport + Clone, +{ use futures::StreamExt; let test_only = leader_config.prover_config.test_only; - let cached_provider = Arc::new(zero::provider::CachedProvider::new( - build_http_retry_provider( - rpc_params.rpc_url.clone(), - rpc_params.backoff, - rpc_params.max_retries, - )?, - )); - if !test_only { // For actual proof runs, perform a sanity check on the provided inputs. check_previous_proof_and_checkpoint( @@ -76,7 +65,7 @@ pub(crate) async fn client_main( let mut block_interval_stream: BlockIntervalStream = match block_interval { block_interval @ BlockInterval::FollowFrom { .. } => { block_interval - .into_unbounded_stream(cached_provider.clone(), rpc_params.block_time) + .into_unbounded_stream(cached_provider.clone(), block_time) .await? } _ => block_interval.into_bounded_stream()?, @@ -92,7 +81,6 @@ pub(crate) async fn client_main( cached_provider.clone(), block_id, leader_config.checkpoint_block_number, - rpc_params.rpc_type, ) .await?; block_tx diff --git a/zero/src/bin/rpc.rs b/zero/src/bin/rpc.rs index 164751df2..bf2f929bc 100644 --- a/zero/src/bin/rpc.rs +++ b/zero/src/bin/rpc.rs @@ -98,13 +98,9 @@ where let (block_num, _is_last_block) = block_interval_elem?; let block_id = BlockId::Number(BlockNumberOrTag::Number(block_num)); // Get the prover input for particular block. - let result = rpc::block_prover_input( - cached_provider.clone(), - block_id, - checkpoint_block_number, - params.rpc_type, - ) - .await?; + let result = + rpc::block_prover_input(cached_provider.clone(), block_id, checkpoint_block_number) + .await?; block_prover_inputs.push(result); } @@ -114,11 +110,12 @@ where impl Cli { /// Execute the cli command. pub async fn execute(self) -> anyhow::Result<()> { - let cached_provider = Arc::new(CachedProvider::new(build_http_retry_provider( + let retry_provider = build_http_retry_provider( self.config.rpc_url.clone(), self.config.backoff, self.config.max_retries, - )?)); + )?; + let cached_provider = Arc::new(CachedProvider::new(retry_provider, self.config.rpc_type)); match self.command { Command::Fetch { diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index e424076e0..110c2a9a2 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -1,15 +1,15 @@ +use std::ops::Range; use std::pin::Pin; use std::sync::Arc; -use alloy::primitives::B256; use alloy::rpc::types::eth::BlockId; +use alloy::rpc::types::BlockTransactionsKind; use alloy::{hex, providers::Provider, transports::Transport}; use anyhow::{anyhow, Result}; use async_stream::try_stream; use futures::Stream; use tracing::info; -use crate::parsing; use crate::provider::CachedProvider; /// The async stream of block numbers. @@ -22,7 +22,7 @@ pub enum BlockInterval { // A single block id (could be number or hash) SingleBlockId(BlockId), // A range of blocks. - Range(std::ops::Range), + Range(Range), // Dynamic interval from the start block to the latest network block FollowFrom { // Interval starting block number @@ -31,64 +31,42 @@ pub enum BlockInterval { } impl BlockInterval { - /// Create a new block interval + /// Create a new block interval. /// - /// A valid block range is of the form: - /// * `block_number` for a single block number - /// * `lhs..rhs`, `lhs..=rhs` as an exclusive/inclusive range - /// * `lhs..` for a range starting from `lhs` to the chain tip. `lhs..=` - /// is also valid format. - /// - /// # Example - /// - /// ```rust - /// # use alloy::rpc::types::eth::BlockId; - /// # use zero::block_interval::BlockInterval; - /// assert_eq!(BlockInterval::new("0..10").unwrap(), BlockInterval::Range(0..10)); - /// assert_eq!(BlockInterval::new("0..=10").unwrap(), BlockInterval::Range(0..11)); - /// assert_eq!(BlockInterval::new("32141").unwrap(), BlockInterval::SingleBlockId(BlockId::Number(32141.into()))); - /// assert_eq!(BlockInterval::new("100..").unwrap(), BlockInterval::FollowFrom{start_block: 100}); - /// ``` - pub fn new(s: &str) -> anyhow::Result { - if (s.starts_with("0x") && s.len() == 66) || s.len() == 64 { - // Try to parse hash - let hash = s - .parse::() - .map_err(|_| anyhow!("invalid block hash '{s}'"))?; - return Ok(BlockInterval::SingleBlockId(BlockId::Hash(hash.into()))); - } - - // First we parse for inclusive range and then for exclusive range, - // because both separators start with `..` - if let Ok(range) = parsing::parse_range_inclusive(s) { - Ok(BlockInterval::Range(range)) - } else if let Ok(range) = parsing::parse_range_exclusive(s) { - Ok(BlockInterval::Range(range)) - } - // Now we look for the follow from range - else if s.contains("..") { - let mut split = s.trim().split("..").filter(|s| *s != "=" && !s.is_empty()); + /// If end_block is None, the interval is unbounded and will follow from + /// start_block. If start_block == end_block, the interval is a single + /// block. Otherwise the interval is a range from start_block to end_block. + pub async fn new( + cached_provider: Arc>, + start_block: BlockId, + end_block: Option, + ) -> Result + where + ProviderT: Provider + 'static, + TransportT: Transport + Clone, + { + // Ensure the start block is a valid block number. + let start_block_num = Self::block_to_num(cached_provider.clone(), start_block).await?; - // Any other character after `..` or `..=` is invalid - if split.clone().count() > 1 { - return Err(anyhow!("invalid block interval range '{s}'")); + // Create the block interval. + match end_block { + // Start and end are the same. + Some(end_block) if end_block == start_block => Ok(BlockInterval::SingleBlockId( + BlockId::Number(start_block_num.into()), + )), + // Bounded range provided. + Some(end_block) => { + let end_block_num = Self::block_to_num(cached_provider.clone(), end_block).await?; + Ok(BlockInterval::Range(start_block_num..end_block_num + 1)) + } + // Unbounded range provided. + None => { + let start_block_num = + Self::block_to_num(cached_provider.clone(), start_block).await?; + Ok(BlockInterval::SingleBlockId(BlockId::Number( + start_block_num.into(), + ))) } - let num = split - .next() - .map(|num| { - num.parse::() - .map_err(|_| anyhow!("invalid block number '{num}'")) - }) - .ok_or(anyhow!("invalid block interval range '{s}'"))??; - return Ok(BlockInterval::FollowFrom { start_block: num }); - } - // Only single block number is left to try to parse - else { - let num: u64 = s - .trim() - .parse() - .map_err(|_| anyhow!("invalid block interval range '{s}'"))?; - return Ok(BlockInterval::SingleBlockId(BlockId::Number(num.into()))); } } @@ -166,6 +144,36 @@ impl BlockInterval { )), } } + + pub async fn block_to_num( + cached_provider: Arc>, + block: BlockId, + ) -> Result + where + ProviderT: Provider + 'static, + TransportT: Transport + Clone, + { + let block_num = match block { + BlockId::Number(num) => num + .as_number() + .ok_or_else(|| anyhow!("invalid block number '{num}'"))?, + BlockId::Hash(hash) => { + let block = cached_provider + .get_provider() + .await? + .get_block(BlockId::Hash(hash.into()), BlockTransactionsKind::Hashes) + .await + .map_err(|e| { + anyhow!("could not retrieve block number by hash from the provider: {e}") + })?; + block + .ok_or(anyhow!("block not found {hash}"))? + .header + .number + } + }; + Ok(block_num) + } } impl std::fmt::Display for BlockInterval { @@ -185,109 +193,105 @@ impl std::fmt::Display for BlockInterval { } } -impl std::str::FromStr for BlockInterval { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - BlockInterval::new(s) - } -} - -#[cfg(test)] -mod test { - use alloy::primitives::B256; - - use super::*; - - #[test] - fn can_create_block_interval_from_exclusive_range() { - assert_eq!( - BlockInterval::new("0..10").unwrap(), - BlockInterval::Range(0..10) - ); - } - - #[test] - fn can_create_block_interval_from_inclusive_range() { - assert_eq!( - BlockInterval::new("0..=10").unwrap(), - BlockInterval::Range(0..11) - ); - } - - #[test] - fn can_create_follow_from_block_interval() { - assert_eq!( - BlockInterval::new("100..").unwrap(), - BlockInterval::FollowFrom { start_block: 100 } - ); - } - - #[test] - fn can_create_single_block_interval() { - assert_eq!( - BlockInterval::new("123415131").unwrap(), - BlockInterval::SingleBlockId(BlockId::Number(123415131.into())) - ); - } - - #[test] - fn new_interval_proper_single_block_error() { - assert_eq!( - BlockInterval::new("113A").err().unwrap().to_string(), - "invalid block interval range '113A'" - ); - } - - #[test] - fn new_interval_proper_range_error() { - assert_eq!( - BlockInterval::new("111...156").err().unwrap().to_string(), - "invalid block interval range '111...156'" - ); - } - - #[test] - fn new_interval_parse_block_hash() { - assert_eq!( - BlockInterval::new( - "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" - ) - .unwrap(), - BlockInterval::SingleBlockId(BlockId::Hash( - "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" - .parse::() - .unwrap() - .into() - )) - ) - } - - #[tokio::test] - async fn can_into_bounded_stream() { - use futures::StreamExt; - let mut result = Vec::new(); - let mut stream = BlockInterval::new("1..10") - .unwrap() - .into_bounded_stream() - .unwrap(); - while let Some(val) = stream.next().await { - result.push(val.unwrap()); - } - let mut expected = Vec::from_iter(1u64..10u64) - .into_iter() - .map(|it| (it, false)) - .collect::>(); - expected.last_mut().unwrap().1 = true; - assert_eq!(result, expected); - } - - #[test] - fn can_create_from_string() { - use std::str::FromStr; - assert_eq!( - &format!("{}", BlockInterval::from_str("0..10").unwrap()), - "0..10" - ); - } -} +// TODO(serge current PR): Add tests using mocks for CachedProvider +//#[cfg(test)] +//mod test { +// use alloy::primitives::B256; +// +// use super::*; +// +// #[test] +// fn can_create_block_interval_from_exclusive_range() { +// assert_eq!( +// BlockInterval::new(BlockId::from(0), BlockId::from(10)).unwrap(), +// //BlockInterval::new("0..10").unwrap(), +// BlockInterval::Range(0..10) +// ); +// } +// +// #[test] +// fn can_create_block_interval_from_inclusive_range() { +// assert_eq!( +// BlockInterval::new("0..=10").unwrap(), +// BlockInterval::Range(0..11) +// ); +// } +// +// #[test] +// fn can_create_follow_from_block_interval() { +// assert_eq!( +// BlockInterval::new("100..").unwrap(), +// BlockInterval::FollowFrom { start_block: 100 } +// ); +// } +// +// #[test] +// fn can_create_single_block_interval() { +// assert_eq!( +// BlockInterval::new("123415131").unwrap(), +// BlockInterval::SingleBlockId(BlockId::Number(123415131.into())) +// ); +// } +// +// #[test] +// fn new_interval_proper_single_block_error() { +// assert_eq!( +// BlockInterval::new("113A").err().unwrap().to_string(), +// "invalid block interval range '113A'" +// ); +// } +// +// #[test] +// fn new_interval_proper_range_error() { +// assert_eq!( +// BlockInterval::new("111...156").err().unwrap().to_string(), +// "invalid block interval range '111...156'" +// ); +// } +// +// #[test] +// fn new_interval_parse_block_hash() { +// assert_eq!( +// BlockInterval::new( +// +// "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" +// ) +// .unwrap(), +// BlockInterval::SingleBlockId(BlockId::Hash( +// +// "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" +// .parse::() +// .unwrap() +// .into() +// )) +// ) +// } +// +// #[tokio::test] +// async fn can_into_bounded_stream() { +// use futures::StreamExt; +// let mut result = Vec::new(); +// let mut stream = BlockInterval::new("1..10") +// .unwrap() +// .into_bounded_stream() +// .unwrap(); +// while let Some(val) = stream.next().await { +// result.push(val.unwrap()); +// } +// let mut expected = Vec::from_iter(1u64..10u64) +// .into_iter() +// .map(|it| (it, false)) +// .collect::>(); +// expected.last_mut().unwrap().1 = true; +// assert_eq!(result, expected); +// } +// +// #[test] +// fn can_create_from_string() { +// use std::str::FromStr; +// assert_eq!( +// &format!("{}", BlockInterval::from_str("0..10").unwrap()), +// "0..10" +// ); +// } +//} diff --git a/zero/src/provider.rs b/zero/src/provider.rs index 876cb270c..d82de1ff1 100644 --- a/zero/src/provider.rs +++ b/zero/src/provider.rs @@ -7,6 +7,8 @@ use alloy::{providers::Provider, transports::Transport}; use anyhow::Context; use tokio::sync::{Mutex, Semaphore, SemaphorePermit}; +use crate::rpc::RpcType; + const CACHE_SIZE: usize = 1024; const MAX_NUMBER_OF_PARALLEL_REQUESTS: usize = 128; @@ -22,6 +24,8 @@ pub struct CachedProvider { blocks_by_number: Arc>>, blocks_by_hash: Arc>>, _phantom: std::marker::PhantomData, + + pub rpc_type: RpcType, } pub struct ProviderGuard<'a, ProviderT> { @@ -48,7 +52,7 @@ where ProviderT: Provider, TransportT: Transport + Clone, { - pub fn new(provider: ProviderT) -> Self { + pub fn new(provider: ProviderT, rpc_type: RpcType) -> Self { Self { provider: provider.into(), semaphore: Arc::new(Semaphore::new(MAX_NUMBER_OF_PARALLEL_REQUESTS)), @@ -58,6 +62,7 @@ where blocks_by_hash: Arc::new(Mutex::new(lru::LruCache::new( std::num::NonZero::new(CACHE_SIZE).unwrap(), ))), + rpc_type, _phantom: std::marker::PhantomData, } } diff --git a/zero/src/rpc/mod.rs b/zero/src/rpc/mod.rs index 007a4fdb2..40e9cd9a7 100644 --- a/zero/src/rpc/mod.rs +++ b/zero/src/rpc/mod.rs @@ -45,13 +45,12 @@ pub async fn block_prover_input( cached_provider: Arc>, block_id: BlockId, checkpoint_block_number: u64, - rpc_type: RpcType, ) -> Result where ProviderT: Provider, TransportT: Transport + Clone, { - match rpc_type { + match cached_provider.rpc_type { RpcType::Jerigon => { jerigon::block_prover_input(cached_provider, block_id, checkpoint_block_number).await } From 69770b6c7d7a604b1c4ff31d95b4b0657074272b Mon Sep 17 00:00:00 2001 From: sergerad Date: Thu, 17 Oct 2024 10:16:48 +1300 Subject: [PATCH 02/27] Update scripts --- .github/workflows/jerigon-native.yml | 4 +-- .github/workflows/jerigon-zero.yml | 33 ++++++++++------------ scripts/prove_rpc.sh | 42 +++++++++++++++------------- 3 files changed, 39 insertions(+), 40 deletions(-) diff --git a/.github/workflows/jerigon-native.yml b/.github/workflows/jerigon-native.yml index 29a380c3a..e641c5033 100644 --- a/.github/workflows/jerigon-native.yml +++ b/.github/workflows/jerigon-native.yml @@ -74,14 +74,14 @@ jobs: run: | ETH_RPC_URL="$(kurtosis port print cancun-testnet el-2-erigon-lighthouse ws-rpc)" ulimit -n 8192 - OUTPUT_TO_TERMINAL=true ./scripts/prove_rpc.sh 0x1 0xf $ETH_RPC_URL native true 3000 100 test_only + OUTPUT_TO_TERMINAL=true ./scripts/prove_rpc.sh 1 15 $ETH_RPC_URL native true 3000 100 test_only echo "Proving blocks in test_only mode finished" - name: Run prove blocks with native tracer in real mode run: | ETH_RPC_URL="$(kurtosis port print cancun-testnet el-2-erigon-lighthouse ws-rpc)" rm -rf proofs/* circuits/* ./proofs.json test.out verify.out leader.out - OUTPUT_TO_TERMINAL=true RUN_VERIFICATION=true ./scripts/prove_rpc.sh 0x4 0x7 $ETH_RPC_URL native true 3000 100 + OUTPUT_TO_TERMINAL=true RUN_VERIFICATION=true ./scripts/prove_rpc.sh 4 7 $ETH_RPC_URL native true 3000 100 echo "Proving blocks in real mode finished" - name: Shut down network diff --git a/.github/workflows/jerigon-zero.yml b/.github/workflows/jerigon-zero.yml index 216b32f8c..c2e994c80 100644 --- a/.github/workflows/jerigon-zero.yml +++ b/.github/workflows/jerigon-zero.yml @@ -10,7 +10,6 @@ on: branches: - "**" - env: CARGO_TERM_COLOR: always REGISTRY: ghcr.io @@ -26,16 +25,16 @@ jobs: uses: actions/checkout@v4 - name: Checkout test-jerigon-network sources - uses: actions/checkout@v4 + uses: actions/checkout@v4 with: repository: 0xPolygonZero/jerigon-test-network - ref: 'feat/kurtosis-network' + ref: "feat/kurtosis-network" path: jerigon-test-network - uses: actions-rust-lang/setup-rust-toolchain@v1 - + - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@v3 - name: Login to GitHub Container Registry uses: docker/login-action@v2 @@ -57,39 +56,35 @@ jobs: #It is much easier to use cast tool in scripts so install foundry - name: Install Foundry - uses: foundry-rs/foundry-toolchain@v1 + uses: foundry-rs/foundry-toolchain@v1 - name: Run cancun test network run: | docker pull ghcr.io/0xpolygonzero/erigon:feat-zero - kurtosis run --enclave cancun-testnet github.com/ethpandaops/ethereum-package@4.0.0 --args-file jerigon-test-network/network_params.yml + kurtosis run --enclave cancun-testnet github.com/ethpandaops/ethereum-package@4.0.0 --args-file jerigon-test-network/network_params.yml - name: Generate blocks with transactions run: | - ETH_RPC_URL="$(kurtosis port print cancun-testnet el-2-erigon-lighthouse ws-rpc)" - cast rpc eth_blockNumber --rpc-url $ETH_RPC_URL - cd jerigon-test-network && set -a && source .env && set +a - bash ./tests/generate_transactions.sh - + ETH_RPC_URL="$(kurtosis port print cancun-testnet el-2-erigon-lighthouse ws-rpc)" + cast rpc eth_blockNumber --rpc-url $ETH_RPC_URL + cd jerigon-test-network && set -a && source .env && set +a + bash ./tests/generate_transactions.sh + - name: Run prove blocks with zero tracer in test_only mode run: | ETH_RPC_URL="$(kurtosis port print cancun-testnet el-2-erigon-lighthouse ws-rpc)" ulimit -n 8192 - OUTPUT_TO_TERMINAL=true ./scripts/prove_rpc.sh 0x1 0xf $ETH_RPC_URL jerigon true 3000 100 test_only + OUTPUT_TO_TERMINAL=true ./scripts/prove_rpc.sh 1 15 $ETH_RPC_URL jerigon true 3000 100 test_only echo "Proving blocks in test_only mode finished" - - name: Run prove blocks with zero tracer in real mode run: | ETH_RPC_URL="$(kurtosis port print cancun-testnet el-2-erigon-lighthouse ws-rpc)" rm -rf proofs/* circuits/* ./proofs.json test.out verify.out leader.out - OUTPUT_TO_TERMINAL=true RUN_VERIFICATION=true ./scripts/prove_rpc.sh 0x2 0x5 $ETH_RPC_URL jerigon true 3000 100 + OUTPUT_TO_TERMINAL=true RUN_VERIFICATION=true ./scripts/prove_rpc.sh 2 5 $ETH_RPC_URL jerigon true 3000 100 echo "Proving blocks in real mode finished" - + - name: Shut down network run: | kurtosis enclave rm -f cancun-testnet kurtosis engine stop - - - diff --git a/scripts/prove_rpc.sh b/scripts/prove_rpc.sh index bacf519b7..a229b04eb 100755 --- a/scripts/prove_rpc.sh +++ b/scripts/prove_rpc.sh @@ -38,7 +38,6 @@ REPO_ROOT=$(git rev-parse --show-toplevel) PROOF_OUTPUT_DIR="${REPO_ROOT}/proofs" OUT_LOG_PATH="${PROOF_OUTPUT_DIR}/b$1_$2.log" ALWAYS_WRITE_LOGS=0 # Change this to `1` if you always want logs to be written. -TOT_BLOCKS=$(($2-$1+1)) START_BLOCK=$1 END_BLOCK=$2 @@ -58,29 +57,20 @@ RECOMMENDED_FILE_HANDLE_LIMIT=8192 mkdir -p $PROOF_OUTPUT_DIR -if $CHECKPOINT_BLOCK ; then +if [ -n "$CHECKPOINT_BLOCK" ] ; then # Set checkpoint height to previous block number for the first block in range PREV_PROOF_EXTRA_ARG="--checkpoint-block $CHECKPOINT_BLOCK" else - # TODO(serge in current PR): This is impossible if blocks are specified by hash + if [[ $START_BLOCK == 0x* ]]; then + echo "Checkpoint block is required when specifying blocks by hash" + exit 1 + fi if [[ $1 -gt 1 ]]; then prev_proof_num=$(($1-1)) PREV_PROOF_EXTRA_ARG="-f ${PROOF_OUTPUT_DIR}/b${prev_proof_num}.zkproof" fi fi -# Define block interval -if [ $END_BLOCK == '-' ]; then - # Follow from the start block to the end of the chain - BLOCK_INTERVAL=$START_BLOCK.. -elif [ $START_BLOCK == $END_BLOCK ]; then - # Single block - BLOCK_INTERVAL=$START_BLOCK -else - # Block range - BLOCK_INTERVAL=$START_BLOCK..=$END_BLOCK -fi - # Print out a warning if the we're using `native` and our file descriptor limit is too low. Don't bother if we can't find `ulimit`. if [ $(command -v ulimit) ] && [ $NODE_RPC_TYPE == "native" ] then @@ -100,10 +90,24 @@ fi # other non-proving code. if [[ $8 == "test_only" ]]; then # test only run - echo "Proving blocks ${BLOCK_INTERVAL} in a test_only mode now... (Total: ${TOT_BLOCKS})" - command='cargo r --release --package zero --bin leader -- --test-only --runtime in-memory --load-strategy on-demand --proof-output-dir $PROOF_OUTPUT_DIR --block-batch-size $BLOCK_BATCH_SIZE rpc --rpc-type "$NODE_RPC_TYPE" --rpc-url "$NODE_RPC_URL" --block-interval $BLOCK_INTERVAL $PREV_PROOF_EXTRA_ARG --backoff "$BACKOFF" --max-retries "$RETRIES" ' + echo "Proving blocks from ($START_BLOCK) to ($END_BLOCK)" + command="cargo r --release --package zero --bin leader -- \ +--test-only \ +--runtime in-memory \ +--load-strategy on-demand \ +--proof-output-dir $PROOF_OUTPUT_DIR \ +--block-batch-size $BLOCK_BATCH_SIZE \ +rpc \ +--rpc-type $NODE_RPC_TYPE \ +--rpc-url $NODE_RPC_URL \ +--start-block $START_BLOCK \ +--end-block $END_BLOCK \ +$PREV_PROOF_EXTRA_ARG \ +--backoff $BACKOFF \ +--max-retries $RETRIES" + if [ "$OUTPUT_TO_TERMINAL" = true ]; then - eval $command + eval "$command" retVal=$? echo -e "Proof witness generation finished with result: $retVal" exit $retVal @@ -142,7 +146,7 @@ else rm $OUT_LOG_PATH fi fi - echo "Successfully generated ${TOT_BLOCKS} proofs!" + echo "Successfully generated proofs!" fi fi From 02dc69c067210ced409dde1ae266381d45d7649b Mon Sep 17 00:00:00 2001 From: sergerad Date: Thu, 17 Oct 2024 10:27:26 +1300 Subject: [PATCH 03/27] Remove blockid from interval --- zero/src/block_interval.rs | 33 +++++++++++---------------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index 110c2a9a2..ffb76f637 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -20,7 +20,7 @@ pub type BlockIntervalStream = Pin), // Dynamic interval from the start block to the latest network block @@ -51,9 +51,9 @@ impl BlockInterval { // Create the block interval. match end_block { // Start and end are the same. - Some(end_block) if end_block == start_block => Ok(BlockInterval::SingleBlockId( - BlockId::Number(start_block_num.into()), - )), + Some(end_block) if end_block == start_block => { + Ok(BlockInterval::SingleBlockId(start_block_num)) + } // Bounded range provided. Some(end_block) => { let end_block_num = Self::block_to_num(cached_provider.clone(), end_block).await?; @@ -63,9 +63,9 @@ impl BlockInterval { None => { let start_block_num = Self::block_to_num(cached_provider.clone(), start_block).await?; - Ok(BlockInterval::SingleBlockId(BlockId::Number( - start_block_num.into(), - ))) + Ok(BlockInterval::FollowFrom { + start_block: start_block_num, + }) } } } @@ -74,10 +74,7 @@ impl BlockInterval { /// second bool flag indicates if the element is last in the interval. pub fn into_bounded_stream(self) -> Result { match self { - BlockInterval::SingleBlockId(BlockId::Number(num)) => { - let num = num - .as_number() - .ok_or(anyhow!("invalid block number '{num}'"))?; + BlockInterval::SingleBlockId(num) => { let range = (num..num + 1).map(|it| Ok((it, true))).collect::>(); Ok(Box::pin(futures::stream::iter(range))) @@ -88,7 +85,7 @@ impl BlockInterval { range.last_mut().map(|it| it.as_mut().map(|it| it.1 = true)); Ok(Box::pin(futures::stream::iter(range))) } - _ => Err(anyhow!( + BlockInterval::FollowFrom { .. } => Err(anyhow!( "could not create bounded stream from unbounded follow-from interval", )), } @@ -96,12 +93,7 @@ impl BlockInterval { pub fn get_start_block(&self) -> Result { match self { - BlockInterval::SingleBlockId(BlockId::Number(num)) => { - let num_value = num - .as_number() - .ok_or_else(|| anyhow!("invalid block number '{num}'"))?; - Ok(num_value) // Return the valid block number - } + BlockInterval::SingleBlockId(num) => Ok(*num), BlockInterval::Range(range) => Ok(range.start), BlockInterval::FollowFrom { start_block, .. } => Ok(*start_block), _ => Err(anyhow!("Unknown BlockInterval variant")), // Handle unknown variants @@ -179,10 +171,7 @@ impl BlockInterval { impl std::fmt::Display for BlockInterval { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { - BlockInterval::SingleBlockId(block_id) => match block_id { - BlockId::Number(it) => f.write_fmt(format_args!("{}", it)), - BlockId::Hash(it) => f.write_fmt(format_args!("0x{}", &hex::encode(it.block_hash))), - }, + BlockInterval::SingleBlockId(num) => f.write_fmt(format_args!("{}", num)), BlockInterval::Range(range) => { write!(f, "{}..{}", range.start, range.end) } From 7a75048e6b131242abbe92d83b101c57d5ff6fd7 Mon Sep 17 00:00:00 2001 From: sergerad Date: Thu, 17 Oct 2024 13:41:47 +1300 Subject: [PATCH 04/27] Script and into() cleanup --- scripts/prove_rpc.sh | 49 +++++++++++++++++++++++--------------- zero/src/block_interval.rs | 2 +- 2 files changed, 31 insertions(+), 20 deletions(-) diff --git a/scripts/prove_rpc.sh b/scripts/prove_rpc.sh index a229b04eb..994fc5346 100755 --- a/scripts/prove_rpc.sh +++ b/scripts/prove_rpc.sh @@ -5,7 +5,7 @@ # 2 --> End block (number or hash, inclusive) # 3 --> Rpc endpoint:port (eg. http://35.246.1.96:8545) # 4 --> Rpc type (eg. jerigon / native) -# 5 --> Checkpoint block (number or hash, optional) +# 5 --> Checkpoint block (number or hash, optional when specifying start block by number) # 6 --> Backoff in milliseconds (optional [default: 0]) # 7 --> Number of retries (optional [default: 0]) # 8 --> Test run only flag `test_only` (optional) @@ -55,7 +55,7 @@ RUN_VERIFICATION="${RUN_VERIFICATION:-false}" # Recommended soft file handle limit. Will warn if it is set lower. RECOMMENDED_FILE_HANDLE_LIMIT=8192 -mkdir -p $PROOF_OUTPUT_DIR +mkdir -p "$PROOF_OUTPUT_DIR" if [ -n "$CHECKPOINT_BLOCK" ] ; then # Set checkpoint height to previous block number for the first block in range @@ -72,7 +72,7 @@ else fi # Print out a warning if the we're using `native` and our file descriptor limit is too low. Don't bother if we can't find `ulimit`. -if [ $(command -v ulimit) ] && [ $NODE_RPC_TYPE == "native" ] +if [ "$(command -v ulimit)" ] && [ "$NODE_RPC_TYPE" == "native" ] then file_desc_limit=$(ulimit -n) @@ -112,38 +112,49 @@ $PREV_PROOF_EXTRA_ARG \ echo -e "Proof witness generation finished with result: $retVal" exit $retVal else - eval $command > $OUT_LOG_PATH 2>&1 - if grep -q 'All proof witnesses have been generated successfully.' $OUT_LOG_PATH; then + eval "$command" > "$OUT_LOG_PATH" 2>&1 + if grep -q 'All proof witnesses have been generated successfully.' "$OUT_LOG_PATH"; then echo -e "Success - Note this was just a test, not a proof" # Remove the log on success if we don't want to keep it. if [ $ALWAYS_WRITE_LOGS -ne 1 ]; then - rm $OUT_LOG_PATH + rm "$OUT_LOG_PATH" fi exit else - echo "Failed to create proof witnesses. See ${OUT_LOG_PATH} for more details." + echo "Failed to create proof witnesses. See $OUT_LOG_PATH for more details." exit 1 fi fi else # normal run - echo "Proving blocks ${BLOCK_INTERVAL} now... (Total: ${TOT_BLOCKS})" - command='cargo r --release --package zero --bin leader -- --runtime in-memory --load-strategy on-demand --proof-output-dir $PROOF_OUTPUT_DIR --block-batch-size $BLOCK_BATCH_SIZE rpc --rpc-type "$NODE_RPC_TYPE" --rpc-url "$3" --block-interval $BLOCK_INTERVAL $PREV_PROOF_EXTRA_ARG --backoff "$BACKOFF" --max-retries "$RETRIES" ' + echo "Proving blocks from ($START_BLOCK) to ($END_BLOCK)" + command="cargo r --release --package zero --bin leader -- \ +--runtime in-memory \ +--load-strategy on-demand \ +--proof-output-dir $PROOF_OUTPUT_DIR \ +--block-batch-size $BLOCK_BATCH_SIZE \ +rpc \ +--rpc-type $NODE_RPC_TYPE \ +--rpc-url $3 \ +--block-interval $BLOCK_INTERVAL \ +$PREV_PROOF_EXTRA_ARG \ +--backoff $BACKOFF \ +--max-retries $RETRIES" if [ "$OUTPUT_TO_TERMINAL" = true ]; then - eval $command + eval "$command" echo -e "Proof generation finished with result: $?" else - eval $command > $OUT_LOG_PATH 2>&1 + eval "$command" > "$OUT_LOG_PATH" 2>&1 retVal=$? if [ $retVal -ne 0 ]; then # Some error occurred, display the logs and exit. - cat $OUT_LOG_PATH - echo "Block ${i} errored. See ${OUT_LOG_PATH} for more details." + cat "$OUT_LOG_PATH" + echo "Error occurred. See $OUT_LOG_PATH for more details." exit $retVal else # Remove the log on success if we don't want to keep it. if [ $ALWAYS_WRITE_LOGS -ne 1 ]; then - rm $OUT_LOG_PATH + rm "$OUT_LOG_PATH" fi fi echo "Successfully generated proofs!" @@ -156,15 +167,15 @@ if [ "$RUN_VERIFICATION" = true ]; then echo "Running the verification for the last proof..." proof_file_name=$PROOF_OUTPUT_DIR/b$END_BLOCK.zkproof - echo "Verifying the proof of the latest block in the interval:" $proof_file_name - cargo r --release --package zero --bin verifier -- -f $proof_file_name > $PROOF_OUTPUT_DIR/verify.out 2>&1 + echo "Verifying the proof of the latest block in the interval:" "$proof_file_name" + cargo r --release --package zero --bin verifier -- -f "$proof_file_name" > "$PROOF_OUTPUT_DIR/verify.out" 2>&1 - if grep -q 'All proofs verified successfully!' $PROOF_OUTPUT_DIR/verify.out; then + if grep -q 'All proofs verified successfully!' "$PROOF_OUTPUT_DIR/verify.out"; then echo "$proof_file_name verified successfully!"; - rm $PROOF_OUTPUT_DIR/verify.out + rm "$PROOF_OUTPUT_DIR/verify.out" else # Some error occurred with verification, display the logs and exit. - cat $PROOF_OUTPUT_DIR/verify.out + cat "$PROOF_OUTPUT_DIR/verify.out" echo "There was an issue with proof verification. See $PROOF_OUTPUT_DIR/verify.out for more details."; exit 1 fi diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index ffb76f637..16527bceb 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -153,7 +153,7 @@ impl BlockInterval { let block = cached_provider .get_provider() .await? - .get_block(BlockId::Hash(hash.into()), BlockTransactionsKind::Hashes) + .get_block(BlockId::Hash(hash), BlockTransactionsKind::Hashes) .await .map_err(|e| { anyhow!("could not retrieve block number by hash from the provider: {e}") From 7536dfc65d8fc710bf3271c1c5b570b9863a42e2 Mon Sep 17 00:00:00 2001 From: sergerad Date: Thu, 17 Oct 2024 14:04:38 +1300 Subject: [PATCH 05/27] comments and cleanup --- zero/src/bin/leader/client.rs | 1 - zero/src/block_interval.rs | 11 +++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/zero/src/bin/leader/client.rs b/zero/src/bin/leader/client.rs index 191becdc0..a51d17a0d 100644 --- a/zero/src/bin/leader/client.rs +++ b/zero/src/bin/leader/client.rs @@ -11,7 +11,6 @@ use zero::pre_checks::check_previous_proof_and_checkpoint; use zero::proof_types::GeneratedBlockProof; use zero::prover::{self, BlockProverInput, ProverConfig}; use zero::rpc; -use zero::rpc::{retry::build_http_retry_provider, RpcType}; use crate::ProofRuntime; diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index 16527bceb..dcc0c67a0 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use alloy::rpc::types::eth::BlockId; use alloy::rpc::types::BlockTransactionsKind; -use alloy::{hex, providers::Provider, transports::Transport}; +use alloy::{providers::Provider, transports::Transport}; use anyhow::{anyhow, Result}; use async_stream::try_stream; use futures::Stream; @@ -36,6 +36,9 @@ impl BlockInterval { /// If end_block is None, the interval is unbounded and will follow from /// start_block. If start_block == end_block, the interval is a single /// block. Otherwise the interval is a range from start_block to end_block. + /// + /// end_block is treated as inclusive because it may have been specified + /// as a block hash. pub async fn new( cached_provider: Arc>, start_block: BlockId, @@ -91,12 +94,12 @@ impl BlockInterval { } } + /// Returns the start block number of the interval. pub fn get_start_block(&self) -> Result { match self { BlockInterval::SingleBlockId(num) => Ok(*num), BlockInterval::Range(range) => Ok(range.start), BlockInterval::FollowFrom { start_block, .. } => Ok(*start_block), - _ => Err(anyhow!("Unknown BlockInterval variant")), // Handle unknown variants } } @@ -137,6 +140,7 @@ impl BlockInterval { } } + /// Converts a [`BlockId`] into a block number by querying the provider. pub async fn block_to_num( cached_provider: Arc>, block: BlockId, @@ -146,9 +150,12 @@ impl BlockInterval { TransportT: Transport + Clone, { let block_num = match block { + // Number already provided BlockId::Number(num) => num .as_number() .ok_or_else(|| anyhow!("invalid block number '{num}'"))?, + + // Hash provided, query the provider for the block number. BlockId::Hash(hash) => { let block = cached_provider .get_provider() From 5ad944d5f07187d5daa718b70e0e380804c21e08 Mon Sep 17 00:00:00 2001 From: sergerad Date: Thu, 17 Oct 2024 18:49:50 +1300 Subject: [PATCH 06/27] check end vs start in range --- zero/src/block_interval.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index dcc0c67a0..2b55ece61 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -60,16 +60,17 @@ impl BlockInterval { // Bounded range provided. Some(end_block) => { let end_block_num = Self::block_to_num(cached_provider.clone(), end_block).await?; + if end_block_num <= start_block_num { + return Err(anyhow!( + "invalid block interval range ({start_block_num}..{end_block_num})" + )); + } Ok(BlockInterval::Range(start_block_num..end_block_num + 1)) } // Unbounded range provided. - None => { - let start_block_num = - Self::block_to_num(cached_provider.clone(), start_block).await?; - Ok(BlockInterval::FollowFrom { - start_block: start_block_num, - }) - } + None => Ok(BlockInterval::FollowFrom { + start_block: start_block_num, + }), } } From 48c02828efc9f176cfd404513c99fea2d61ee34f Mon Sep 17 00:00:00 2001 From: sergerad Date: Thu, 17 Oct 2024 18:50:46 +1300 Subject: [PATCH 07/27] fix script call in workflow --- .github/workflows/jerigon-native.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/jerigon-native.yml b/.github/workflows/jerigon-native.yml index e641c5033..1ef443d9d 100644 --- a/.github/workflows/jerigon-native.yml +++ b/.github/workflows/jerigon-native.yml @@ -74,14 +74,14 @@ jobs: run: | ETH_RPC_URL="$(kurtosis port print cancun-testnet el-2-erigon-lighthouse ws-rpc)" ulimit -n 8192 - OUTPUT_TO_TERMINAL=true ./scripts/prove_rpc.sh 1 15 $ETH_RPC_URL native true 3000 100 test_only + OUTPUT_TO_TERMINAL=true ./scripts/prove_rpc.sh 1 15 $ETH_RPC_URL native 0 3000 100 test_only echo "Proving blocks in test_only mode finished" - name: Run prove blocks with native tracer in real mode run: | ETH_RPC_URL="$(kurtosis port print cancun-testnet el-2-erigon-lighthouse ws-rpc)" rm -rf proofs/* circuits/* ./proofs.json test.out verify.out leader.out - OUTPUT_TO_TERMINAL=true RUN_VERIFICATION=true ./scripts/prove_rpc.sh 4 7 $ETH_RPC_URL native true 3000 100 + OUTPUT_TO_TERMINAL=true RUN_VERIFICATION=true ./scripts/prove_rpc.sh 4 7 $ETH_RPC_URL native 3 3000 100 echo "Proving blocks in real mode finished" - name: Shut down network From 8fdace77fe9f1edd55e1bfa40ed12e568466ee9e Mon Sep 17 00:00:00 2001 From: sergerad Date: Fri, 18 Oct 2024 09:58:37 +1300 Subject: [PATCH 08/27] fix checkpoint arg --- scripts/prove_rpc.sh | 19 ++++++++++++------- zero/src/bin/leader/cli.rs | 2 +- zero/src/block_interval.rs | 2 +- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/scripts/prove_rpc.sh b/scripts/prove_rpc.sh index 994fc5346..17f529bd0 100755 --- a/scripts/prove_rpc.sh +++ b/scripts/prove_rpc.sh @@ -5,7 +5,7 @@ # 2 --> End block (number or hash, inclusive) # 3 --> Rpc endpoint:port (eg. http://35.246.1.96:8545) # 4 --> Rpc type (eg. jerigon / native) -# 5 --> Checkpoint block (number or hash, optional when specifying start block by number) +# 5 --> Checkpoint block (number or hash to ignore previous proofs. empty [""] when specifying start block by number to rely on previous proofs) # 6 --> Backoff in milliseconds (optional [default: 0]) # 7 --> Number of retries (optional [default: 0]) # 8 --> Test run only flag `test_only` (optional) @@ -58,13 +58,16 @@ RECOMMENDED_FILE_HANDLE_LIMIT=8192 mkdir -p "$PROOF_OUTPUT_DIR" if [ -n "$CHECKPOINT_BLOCK" ] ; then - # Set checkpoint height to previous block number for the first block in range + # Checkpoint block provided, pass it to the prover as a flag PREV_PROOF_EXTRA_ARG="--checkpoint-block $CHECKPOINT_BLOCK" else + # Checkpoint block not provided, but is required hash-based start block if [[ $START_BLOCK == 0x* ]]; then echo "Checkpoint block is required when specifying blocks by hash" exit 1 fi + + # Checkpoint block not provided, deduce proof starting point from the start block if [[ $1 -gt 1 ]]; then prev_proof_num=$(($1-1)) PREV_PROOF_EXTRA_ARG="-f ${PROOF_OUTPUT_DIR}/b${prev_proof_num}.zkproof" @@ -102,9 +105,9 @@ rpc \ --rpc-url $NODE_RPC_URL \ --start-block $START_BLOCK \ --end-block $END_BLOCK \ -$PREV_PROOF_EXTRA_ARG \ --backoff $BACKOFF \ ---max-retries $RETRIES" +--max-retries $RETRIES \ +$PREV_PROOF_EXTRA_ARG" if [ "$OUTPUT_TO_TERMINAL" = true ]; then eval "$command" @@ -136,10 +139,12 @@ else rpc \ --rpc-type $NODE_RPC_TYPE \ --rpc-url $3 \ ---block-interval $BLOCK_INTERVAL \ -$PREV_PROOF_EXTRA_ARG \ +--start-block $START_BLOCK \ +--end-block $END_BLOCK \ --backoff $BACKOFF \ ---max-retries $RETRIES" +--max-retries $RETRIES \ +$PREV_PROOF_EXTRA_ARG " + if [ "$OUTPUT_TO_TERMINAL" = true ]; then eval "$command" echo -e "Proof generation finished with result: $?" diff --git a/zero/src/bin/leader/cli.rs b/zero/src/bin/leader/cli.rs index b7d4f0a4d..5f1ea0705 100644 --- a/zero/src/bin/leader/cli.rs +++ b/zero/src/bin/leader/cli.rs @@ -71,7 +71,7 @@ pub(crate) enum Command { #[arg(long, short = 'e')] end_block: Option, /// The checkpoint block number. - #[arg(short, long, default_value_t = BlockId::from(0))] + #[arg(short, long, default_value = "0")] checkpoint_block: BlockId, /// The previous proof output. #[arg(long, short = 'f', value_hint = ValueHint::FilePath)] diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index 2b55ece61..dbc5a69de 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -31,7 +31,7 @@ pub enum BlockInterval { } impl BlockInterval { - /// Create a new block interval. + /// Creates a new block interval. /// /// If end_block is None, the interval is unbounded and will follow from /// start_block. If start_block == end_block, the interval is a single From 2ee75f029789830737514ae927149b6c86c13f7f Mon Sep 17 00:00:00 2001 From: sergerad Date: Sun, 20 Oct 2024 14:01:22 +1300 Subject: [PATCH 09/27] add trait and mocks --- Cargo.lock | 71 +++++++++ zero/Cargo.toml | 2 +- zero/src/block_interval.rs | 301 ++++++++++++++++++++++--------------- zero/src/provider.rs | 23 +++ 4 files changed, 275 insertions(+), 122 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea4fb8060..cb2e30838 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1834,6 +1834,12 @@ version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "dunce" version = "1.0.5" @@ -2189,6 +2195,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fragile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" + [[package]] name = "funty" version = "2.0.0" @@ -3057,6 +3069,32 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "mockall" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +dependencies = [ + "cfg-if", + "downcast", + "fragile", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "mpt_trie" version = "0.4.1" @@ -3789,6 +3827,32 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "predicates" +version = "3.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97" +dependencies = [ + "anstyle", + "predicates-core", +] + +[[package]] +name = "predicates-core" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" + +[[package]] +name = "predicates-tree" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "pretty_env_logger" version = "0.5.0" @@ -4814,6 +4878,12 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "termtree" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" + [[package]] name = "text-size" version = "1.1.1" @@ -5823,6 +5893,7 @@ dependencies = [ "jemallocator", "keccak-hash 0.10.0", "lru", + "mockall", "mpt_trie", "num-traits", "once_cell", diff --git a/zero/Cargo.toml b/zero/Cargo.toml index 7cbf2f351..1a6abfa90 100644 --- a/zero/Cargo.toml +++ b/zero/Cargo.toml @@ -47,11 +47,11 @@ tracing.workspace = true tracing-subscriber.workspace = true url.workspace = true zk_evm_common.workspace = true +mockall = "0.13.0" [target.'cfg(not(target_env = "msvc"))'.dependencies] jemallocator = "0.5.4" - [build-dependencies] anyhow.workspace = true vergen-git2 = { version = "1.0.0", features = ["build"] } diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index dbc5a69de..0bcc63d59 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -3,14 +3,14 @@ use std::pin::Pin; use std::sync::Arc; use alloy::rpc::types::eth::BlockId; -use alloy::rpc::types::BlockTransactionsKind; +use alloy::rpc::types::{Block, BlockTransactionsKind}; use alloy::{providers::Provider, transports::Transport}; use anyhow::{anyhow, Result}; use async_stream::try_stream; use futures::Stream; use tracing::info; -use crate::provider::CachedProvider; +use crate::provider::{BlockProvider, CachedProvider}; /// The async stream of block numbers. /// The second bool flag indicates if the element is last in the interval. @@ -39,15 +39,11 @@ impl BlockInterval { /// /// end_block is treated as inclusive because it may have been specified /// as a block hash. - pub async fn new( - cached_provider: Arc>, + pub async fn new( + cached_provider: Arc, start_block: BlockId, end_block: Option, - ) -> Result - where - ProviderT: Provider + 'static, - TransportT: Transport + Clone, - { + ) -> Result { // Ensure the start block is a valid block number. let start_block_num = Self::block_to_num(cached_provider.clone(), start_block).await?; @@ -142,14 +138,10 @@ impl BlockInterval { } /// Converts a [`BlockId`] into a block number by querying the provider. - pub async fn block_to_num( - cached_provider: Arc>, + pub async fn block_to_num( + cached_provider: Arc, block: BlockId, - ) -> Result - where - ProviderT: Provider + 'static, - TransportT: Transport + Clone, - { + ) -> Result { let block_num = match block { // Number already provided BlockId::Number(num) => num @@ -159,9 +151,7 @@ impl BlockInterval { // Hash provided, query the provider for the block number. BlockId::Hash(hash) => { let block = cached_provider - .get_provider() - .await? - .get_block(BlockId::Hash(hash), BlockTransactionsKind::Hashes) + .get_block_by_id(BlockId::Hash(hash)) .await .map_err(|e| { anyhow!("could not retrieve block number by hash from the provider: {e}") @@ -190,105 +180,174 @@ impl std::fmt::Display for BlockInterval { } } -// TODO(serge current PR): Add tests using mocks for CachedProvider -//#[cfg(test)] -//mod test { -// use alloy::primitives::B256; -// -// use super::*; -// -// #[test] -// fn can_create_block_interval_from_exclusive_range() { -// assert_eq!( -// BlockInterval::new(BlockId::from(0), BlockId::from(10)).unwrap(), -// //BlockInterval::new("0..10").unwrap(), -// BlockInterval::Range(0..10) -// ); -// } -// -// #[test] -// fn can_create_block_interval_from_inclusive_range() { -// assert_eq!( -// BlockInterval::new("0..=10").unwrap(), -// BlockInterval::Range(0..11) -// ); -// } -// -// #[test] -// fn can_create_follow_from_block_interval() { -// assert_eq!( -// BlockInterval::new("100..").unwrap(), -// BlockInterval::FollowFrom { start_block: 100 } -// ); -// } -// -// #[test] -// fn can_create_single_block_interval() { -// assert_eq!( -// BlockInterval::new("123415131").unwrap(), -// BlockInterval::SingleBlockId(BlockId::Number(123415131.into())) -// ); -// } -// -// #[test] -// fn new_interval_proper_single_block_error() { -// assert_eq!( -// BlockInterval::new("113A").err().unwrap().to_string(), -// "invalid block interval range '113A'" -// ); -// } -// -// #[test] -// fn new_interval_proper_range_error() { -// assert_eq!( -// BlockInterval::new("111...156").err().unwrap().to_string(), -// "invalid block interval range '111...156'" -// ); -// } -// -// #[test] -// fn new_interval_parse_block_hash() { -// assert_eq!( -// BlockInterval::new( -// -// "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" -// ) -// .unwrap(), -// BlockInterval::SingleBlockId(BlockId::Hash( -// -// "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" -// .parse::() -// .unwrap() -// .into() -// )) -// ) -// } -// -// #[tokio::test] -// async fn can_into_bounded_stream() { -// use futures::StreamExt; -// let mut result = Vec::new(); -// let mut stream = BlockInterval::new("1..10") -// .unwrap() -// .into_bounded_stream() -// .unwrap(); -// while let Some(val) = stream.next().await { -// result.push(val.unwrap()); -// } -// let mut expected = Vec::from_iter(1u64..10u64) -// .into_iter() -// .map(|it| (it, false)) -// .collect::>(); -// expected.last_mut().unwrap().1 = true; -// assert_eq!(result, expected); -// } -// -// #[test] -// fn can_create_from_string() { -// use std::str::FromStr; -// assert_eq!( -// &format!("{}", BlockInterval::from_str("0..10").unwrap()), -// "0..10" -// ); -// } -//} +#[cfg(test)] +mod test { + use alloy::primitives::B256; + use alloy::rpc::types::{Header, Transaction}; + use mockall::predicate::*; + + use super::*; + use crate::provider::MockBlockProvider; + + #[tokio::test] + async fn can_create_block_interval_from_inclusive_range() { + let mock = Arc::new(MockBlockProvider::new()); + assert_eq!( + BlockInterval::new(mock, BlockId::from(0), Some(BlockId::from(10))) + .await + .unwrap(), + BlockInterval::Range(0..11) + ); + } + + #[tokio::test] + async fn can_create_follow_from_block_interval() { + let mock = Arc::new(MockBlockProvider::new()); + assert_eq!( + BlockInterval::new(mock, BlockId::from(100), None) + .await + .unwrap(), + BlockInterval::FollowFrom { start_block: 100 } + ); + } + + #[tokio::test] + async fn can_create_single_block_interval() { + let mock = Arc::new(MockBlockProvider::new()); + assert_eq!( + BlockInterval::new( + mock, + BlockId::from(123415131), + Some(BlockId::from(123415131)) + ) + .await + .unwrap(), + BlockInterval::SingleBlockId(123415131) + ); + } + + #[tokio::test] + async fn cannot_create_invalid_range() { + let mock = Arc::new(MockBlockProvider::new()); + assert_eq!( + BlockInterval::new(mock, BlockId::from(123415131), Some(BlockId::from(0))) + .await + .unwrap_err() + .to_string(), + anyhow!("invalid block interval range (123415131..0)").to_string() + ); + } + + #[tokio::test] + async fn can_create_single_block_interval_from_hash() { + let mut mock = MockBlockProvider::new(); + let block_id = BlockId::Hash( + "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" + .parse::() + .unwrap() + .into(), + ); + let mut block: Block = Block::default(); + block.header.number = 12345; + mock.expect_get_block_by_id() + .with(eq(block_id)) + .returning(move |_| { + let block = block.clone(); + Box::pin(async move { Ok(Some(block)) }) + }); + let mock = Arc::new(mock); + assert_eq!( + BlockInterval::new(mock, block_id, Some(block_id)) + .await + .unwrap(), + BlockInterval::SingleBlockId(12345) + ); + } + + #[tokio::test] + async fn can_create_block_interval_from_inclusive_hash_range() { + let mut mock = MockBlockProvider::new(); + let start_block_id = BlockId::Hash( + "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" + .parse::() + .unwrap() + .into(), + ); + let end_block_id = BlockId::Hash( + "0x351ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" + .parse::() + .unwrap() + .into(), + ); + let mut start_block: Block = Block::default(); + start_block.header.number = 12345; + mock.expect_get_block_by_id() + .with(eq(start_block_id)) + .returning(move |_| { + let block = start_block.clone(); + Box::pin(async move { Ok(Some(block)) }) + }); + let mut end_block: Block = Block::default(); + end_block.header.number = 12355; + mock.expect_get_block_by_id() + .with(eq(end_block_id)) + .returning(move |_| { + let block = end_block.clone(); + Box::pin(async move { Ok(Some(block)) }) + }); + let mock = Arc::new(mock); + assert_eq!( + BlockInterval::new(mock, start_block_id, Some(end_block_id)) + .await + .unwrap(), + BlockInterval::Range(12345..12356) + ); + } + + #[tokio::test] + async fn can_create_follow_from_block_interval_hash() { + let mut mock = MockBlockProvider::new(); + let start_block_id = BlockId::Hash( + "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" + .parse::() + .unwrap() + .into(), + ); + let mut start_block: Block = Block::default(); + start_block.header.number = 12345; + mock.expect_get_block_by_id() + .with(eq(start_block_id)) + .returning(move |_| { + let block = start_block.clone(); + Box::pin(async move { Ok(Some(block)) }) + }); + let mock = Arc::new(mock); + assert_eq!( + BlockInterval::new(mock, start_block_id, None) + .await + .unwrap(), + BlockInterval::FollowFrom { start_block: 12345 } + ); + } + + #[tokio::test] + async fn can_into_bounded_stream() { + use futures::StreamExt; + let mut result = Vec::new(); + let mock = Arc::new(MockBlockProvider::new()); + let mut stream = BlockInterval::new(mock, BlockId::from(1), Some(BlockId::from(9))) + .await + .unwrap() + .into_bounded_stream() + .unwrap(); + while let Some(val) = stream.next().await { + result.push(val.unwrap()); + } + let mut expected = Vec::from_iter(1u64..10u64) + .into_iter() + .map(|it| (it, false)) + .collect::>(); + expected.last_mut().unwrap().1 = true; + assert_eq!(result, expected); + } +} diff --git a/zero/src/provider.rs b/zero/src/provider.rs index d82de1ff1..225ad2a33 100644 --- a/zero/src/provider.rs +++ b/zero/src/provider.rs @@ -1,3 +1,4 @@ +use std::future::Future; use std::ops::{Deref, DerefMut}; use std::sync::Arc; @@ -5,6 +6,7 @@ use alloy::primitives::BlockHash; use alloy::rpc::types::{Block, BlockId, BlockTransactionsKind}; use alloy::{providers::Provider, transports::Transport}; use anyhow::Context; +use mockall::automock; use tokio::sync::{Mutex, Semaphore, SemaphorePermit}; use crate::rpc::RpcType; @@ -12,6 +14,14 @@ use crate::rpc::RpcType; const CACHE_SIZE: usize = 1024; const MAX_NUMBER_OF_PARALLEL_REQUESTS: usize = 128; +#[automock] +pub trait BlockProvider { + fn get_block_by_id( + &self, + block_id: BlockId, + ) -> impl Future>> + Send; +} + /// Wrapper around alloy provider to cache blocks and other /// frequently used data. pub struct CachedProvider { @@ -123,3 +133,16 @@ where } } } + +impl BlockProvider for CachedProvider +where + ProviderT: Provider, + TransportT: Transport + Clone, +{ + async fn get_block_by_id(&self, block_id: BlockId) -> anyhow::Result> { + Ok(Some( + self.get_block(block_id, BlockTransactionsKind::Hashes) + .await?, + )) + } +} From baaaaf586d6f1219a8b08d504fa148a05319d9a6 Mon Sep 17 00:00:00 2001 From: sergerad Date: Sun, 20 Oct 2024 14:07:27 +1300 Subject: [PATCH 10/27] replace all cached provider --- zero/src/block_interval.rs | 22 +++++++++------------- zero/src/provider.rs | 6 ++++++ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index 0bcc63d59..e7f8f5061 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -40,12 +40,12 @@ impl BlockInterval { /// end_block is treated as inclusive because it may have been specified /// as a block hash. pub async fn new( - cached_provider: Arc, + provider: Arc, start_block: BlockId, end_block: Option, ) -> Result { // Ensure the start block is a valid block number. - let start_block_num = Self::block_to_num(cached_provider.clone(), start_block).await?; + let start_block_num = Self::block_to_num(provider.clone(), start_block).await?; // Create the block interval. match end_block { @@ -55,7 +55,7 @@ impl BlockInterval { } // Bounded range provided. Some(end_block) => { - let end_block_num = Self::block_to_num(cached_provider.clone(), end_block).await?; + let end_block_num = Self::block_to_num(provider.clone(), end_block).await?; if end_block_num <= start_block_num { return Err(anyhow!( "invalid block interval range ({start_block_num}..{end_block_num})" @@ -102,20 +102,16 @@ impl BlockInterval { /// Convert the block interval into an unbounded async stream of block /// numbers. Query the blockchain node for the latest block number. - pub async fn into_unbounded_stream( + pub async fn into_unbounded_stream( self, - cached_provider: Arc>, + provider: Arc, block_time: u64, - ) -> Result - where - ProviderT: Provider + 'static, - TransportT: Transport + Clone, - { + ) -> Result { match self { BlockInterval::FollowFrom { start_block } => Ok(Box::pin(try_stream! { let mut current = start_block; loop { - let last_block_number = cached_provider.get_provider().await?.get_block_number().await.map_err(|e: alloy::transports::RpcError<_>| { + let last_block_number = provider.latest_block_number().await.map_err(|e| { anyhow!("could not retrieve latest block number from the provider: {e}") })?; @@ -139,7 +135,7 @@ impl BlockInterval { /// Converts a [`BlockId`] into a block number by querying the provider. pub async fn block_to_num( - cached_provider: Arc, + provider: Arc, block: BlockId, ) -> Result { let block_num = match block { @@ -150,7 +146,7 @@ impl BlockInterval { // Hash provided, query the provider for the block number. BlockId::Hash(hash) => { - let block = cached_provider + let block = provider .get_block_by_id(BlockId::Hash(hash)) .await .map_err(|e| { diff --git a/zero/src/provider.rs b/zero/src/provider.rs index 225ad2a33..3bded7d39 100644 --- a/zero/src/provider.rs +++ b/zero/src/provider.rs @@ -20,6 +20,8 @@ pub trait BlockProvider { &self, block_id: BlockId, ) -> impl Future>> + Send; + + fn latest_block_number(&self) -> impl Future> + Send; } /// Wrapper around alloy provider to cache blocks and other @@ -145,4 +147,8 @@ where .await?, )) } + + async fn latest_block_number(&self) -> anyhow::Result { + Ok(self.provider.get_block_number().await?) + } } From d96a58199584ca7e7b1aa69e923ea017e53a0b5b Mon Sep 17 00:00:00 2001 From: sergerad Date: Sun, 20 Oct 2024 16:57:51 +1300 Subject: [PATCH 11/27] cleanup tests --- zero/src/block_interval.rs | 105 +++++++++++++++++++------------------ 1 file changed, 53 insertions(+), 52 deletions(-) diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index e7f8f5061..6d895eb6c 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -187,20 +187,22 @@ mod test { #[tokio::test] async fn can_create_block_interval_from_inclusive_range() { - let mock = Arc::new(MockBlockProvider::new()); assert_eq!( - BlockInterval::new(mock, BlockId::from(0), Some(BlockId::from(10))) - .await - .unwrap(), + BlockInterval::new( + Arc::new(MockBlockProvider::new()), + BlockId::from(0), + Some(BlockId::from(10)) + ) + .await + .unwrap(), BlockInterval::Range(0..11) ); } #[tokio::test] async fn can_create_follow_from_block_interval() { - let mock = Arc::new(MockBlockProvider::new()); assert_eq!( - BlockInterval::new(mock, BlockId::from(100), None) + BlockInterval::new(Arc::new(MockBlockProvider::new()), BlockId::from(100), None) .await .unwrap(), BlockInterval::FollowFrom { start_block: 100 } @@ -209,10 +211,9 @@ mod test { #[tokio::test] async fn can_create_single_block_interval() { - let mock = Arc::new(MockBlockProvider::new()); assert_eq!( BlockInterval::new( - mock, + Arc::new(MockBlockProvider::new()), BlockId::from(123415131), Some(BlockId::from(123415131)) ) @@ -224,18 +225,22 @@ mod test { #[tokio::test] async fn cannot_create_invalid_range() { - let mock = Arc::new(MockBlockProvider::new()); assert_eq!( - BlockInterval::new(mock, BlockId::from(123415131), Some(BlockId::from(0))) - .await - .unwrap_err() - .to_string(), + BlockInterval::new( + Arc::new(MockBlockProvider::new()), + BlockId::from(123415131), + Some(BlockId::from(0)) + ) + .await + .unwrap_err() + .to_string(), anyhow!("invalid block interval range (123415131..0)").to_string() ); } #[tokio::test] async fn can_create_single_block_interval_from_hash() { + // Mock the block for single block interval. let mut mock = MockBlockProvider::new(); let block_id = BlockId::Hash( "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" @@ -243,14 +248,9 @@ mod test { .unwrap() .into(), ); - let mut block: Block = Block::default(); - block.header.number = 12345; - mock.expect_get_block_by_id() - .with(eq(block_id)) - .returning(move |_| { - let block = block.clone(); - Box::pin(async move { Ok(Some(block)) }) - }); + mock_block(&mut mock, block_id, 12345); + + // Create the interval. let mock = Arc::new(mock); assert_eq!( BlockInterval::new(mock, block_id, Some(block_id)) @@ -262,6 +262,7 @@ mod test { #[tokio::test] async fn can_create_block_interval_from_inclusive_hash_range() { + // Mock the blocks for the range. let mut mock = MockBlockProvider::new(); let start_block_id = BlockId::Hash( "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" @@ -269,28 +270,16 @@ mod test { .unwrap() .into(), ); + mock_block(&mut mock, start_block_id, 12345); let end_block_id = BlockId::Hash( "0x351ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" .parse::() .unwrap() .into(), ); - let mut start_block: Block = Block::default(); - start_block.header.number = 12345; - mock.expect_get_block_by_id() - .with(eq(start_block_id)) - .returning(move |_| { - let block = start_block.clone(); - Box::pin(async move { Ok(Some(block)) }) - }); - let mut end_block: Block = Block::default(); - end_block.header.number = 12355; - mock.expect_get_block_by_id() - .with(eq(end_block_id)) - .returning(move |_| { - let block = end_block.clone(); - Box::pin(async move { Ok(Some(block)) }) - }); + mock_block(&mut mock, end_block_id, 12355); + + // Create the interval. let mock = Arc::new(mock); assert_eq!( BlockInterval::new(mock, start_block_id, Some(end_block_id)) @@ -302,21 +291,17 @@ mod test { #[tokio::test] async fn can_create_follow_from_block_interval_hash() { - let mut mock = MockBlockProvider::new(); + // Mock a block for range to start from. let start_block_id = BlockId::Hash( "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" .parse::() .unwrap() .into(), ); - let mut start_block: Block = Block::default(); - start_block.header.number = 12345; - mock.expect_get_block_by_id() - .with(eq(start_block_id)) - .returning(move |_| { - let block = start_block.clone(); - Box::pin(async move { Ok(Some(block)) }) - }); + let mut mock = MockBlockProvider::new(); + mock_block(&mut mock, start_block_id, 12345); + + // Create the interval. let mock = Arc::new(mock); assert_eq!( BlockInterval::new(mock, start_block_id, None) @@ -326,16 +311,32 @@ mod test { ); } + /// Configures the mock to expect a query for a block by id and return the + /// expected block number. + fn mock_block(mock: &mut MockBlockProvider, query_id: BlockId, resulting_block_num: u64) { + let mut block: Block = Block::default(); + block.header.number = resulting_block_num; + mock.expect_get_block_by_id() + .with(eq(query_id)) + .returning(move |_| { + let block = block.clone(); + Box::pin(async move { Ok(Some(block)) }) + }); + } + #[tokio::test] async fn can_into_bounded_stream() { use futures::StreamExt; let mut result = Vec::new(); - let mock = Arc::new(MockBlockProvider::new()); - let mut stream = BlockInterval::new(mock, BlockId::from(1), Some(BlockId::from(9))) - .await - .unwrap() - .into_bounded_stream() - .unwrap(); + let mut stream = BlockInterval::new( + Arc::new(MockBlockProvider::new()), + BlockId::from(1), + Some(BlockId::from(9)), + ) + .await + .unwrap() + .into_bounded_stream() + .unwrap(); while let Some(val) = stream.next().await { result.push(val.unwrap()); } From 94e7b299131e9d6b6606cc734cbc57c20f197ea3 Mon Sep 17 00:00:00 2001 From: sergerad Date: Mon, 21 Oct 2024 08:19:54 +1300 Subject: [PATCH 12/27] lint / cleanup --- zero/src/bin/leader/cli.rs | 3 ++- zero/src/bin/rpc.rs | 3 --- zero/src/block_interval.rs | 6 ++---- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/zero/src/bin/leader/cli.rs b/zero/src/bin/leader/cli.rs index 5f1ea0705..6827d5814 100644 --- a/zero/src/bin/leader/cli.rs +++ b/zero/src/bin/leader/cli.rs @@ -46,6 +46,7 @@ pub enum WorkerRunMode { Default, } +#[allow(clippy::large_enum_variant)] #[derive(Subcommand)] pub(crate) enum Command { /// Deletes all the previously cached circuits. @@ -70,7 +71,7 @@ pub(crate) enum Command { /// The end of the block range to prove (inclusive). #[arg(long, short = 'e')] end_block: Option, - /// The checkpoint block number. + /// The checkpoint block. #[arg(short, long, default_value = "0")] checkpoint_block: BlockId, /// The previous proof output. diff --git a/zero/src/bin/rpc.rs b/zero/src/bin/rpc.rs index bf2f929bc..a8a42a6d4 100644 --- a/zero/src/bin/rpc.rs +++ b/zero/src/bin/rpc.rs @@ -25,7 +25,6 @@ struct FetchParams { pub start_block: u64, pub end_block: u64, pub checkpoint_block_number: Option, - pub rpc_type: RpcType, } #[derive(Args, Clone, Debug)] @@ -127,7 +126,6 @@ impl Cli { start_block, end_block, checkpoint_block_number, - rpc_type: self.config.rpc_type, }; let block_prover_inputs = @@ -153,7 +151,6 @@ impl Cli { start_block: block_number, end_block: block_number, checkpoint_block_number: None, - rpc_type: self.config.rpc_type, }; let block_prover_inputs = diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index 6d895eb6c..7ca4f2366 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -3,14 +3,12 @@ use std::pin::Pin; use std::sync::Arc; use alloy::rpc::types::eth::BlockId; -use alloy::rpc::types::{Block, BlockTransactionsKind}; -use alloy::{providers::Provider, transports::Transport}; use anyhow::{anyhow, Result}; use async_stream::try_stream; use futures::Stream; use tracing::info; -use crate::provider::{BlockProvider, CachedProvider}; +use crate::provider::BlockProvider; /// The async stream of block numbers. /// The second bool flag indicates if the element is last in the interval. @@ -179,7 +177,7 @@ impl std::fmt::Display for BlockInterval { #[cfg(test)] mod test { use alloy::primitives::B256; - use alloy::rpc::types::{Header, Transaction}; + use alloy::rpc::types::{Block, Header, Transaction}; use mockall::predicate::*; use super::*; From d32aff558fb531c9a2448cab380b3ff781e52f34 Mon Sep 17 00:00:00 2001 From: sergerad Date: Wed, 23 Oct 2024 10:36:51 +1300 Subject: [PATCH 13/27] pr fixes --- scripts/prove_rpc.sh | 26 ++++++++++++++++++++--- zero/src/block_interval.rs | 43 +++++++++++++++++++++----------------- zero/src/parsing.rs | 18 ---------------- zero/src/provider.rs | 5 ++--- 4 files changed, 49 insertions(+), 43 deletions(-) diff --git a/scripts/prove_rpc.sh b/scripts/prove_rpc.sh index 17f529bd0..23760a527 100755 --- a/scripts/prove_rpc.sh +++ b/scripts/prove_rpc.sh @@ -1,11 +1,11 @@ #!/bin/bash # Args: -# 1 --> Start block (number or hash) -# 2 --> End block (number or hash, inclusive) +# 1 --> Start block (number in decimal or block hash with prefix 0x). E.g. `1234` or `0x1d5e7a08dd1f4ce7fa52afe7f4960d78e82e508c874838dee594d5300b8df625`. +# 2 --> End block (number or hash, inclusive). Same format as start block. # 3 --> Rpc endpoint:port (eg. http://35.246.1.96:8545) # 4 --> Rpc type (eg. jerigon / native) -# 5 --> Checkpoint block (number or hash to ignore previous proofs. empty [""] when specifying start block by number to rely on previous proofs) +# 5 --> Checkpoint block (number or hash). If argument is missing, start block predecessor will be used. # 6 --> Backoff in milliseconds (optional [default: 0]) # 7 --> Number of retries (optional [default: 0]) # 8 --> Test run only flag `test_only` (optional) @@ -57,6 +57,26 @@ RECOMMENDED_FILE_HANDLE_LIMIT=8192 mkdir -p "$PROOF_OUTPUT_DIR" +# Validate start block args +for block_id in "$START_BLOCK" "$END_BLOCK"; do + if [[ $block_id == 0x* ]]; then + if ! [[ ${#block_id} -eq 66 ]]; then + echo "Invalid block hash length: $block_id" + exit 1 + fi + if ! [[ $block_id =~ ^0x[0-9a-fA-F]+$ ]]; then + echo "Invalid block hash format: $block_id" + exit 1 + fi + else + if ! [[ $block_id =~ ^[0-9]+$ ]]; then + echo "Invalid block number format: $block_id" + exit 1 + fi + fi +done + +# Handle checkpoint block arg if [ -n "$CHECKPOINT_BLOCK" ] ; then # Checkpoint block provided, pass it to the prover as a flag PREV_PROOF_EXTRA_ARG="--checkpoint-block $CHECKPOINT_BLOCK" diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index 7ca4f2366..984515e22 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -8,7 +8,7 @@ use async_stream::try_stream; use futures::Stream; use tracing::info; -use crate::provider::BlockProvider; +use crate::provider::ZeroBlockProvider; /// The async stream of block numbers. /// The second bool flag indicates if the element is last in the interval. @@ -33,12 +33,13 @@ impl BlockInterval { /// /// If end_block is None, the interval is unbounded and will follow from /// start_block. If start_block == end_block, the interval is a single - /// block. Otherwise the interval is a range from start_block to end_block. + /// block. Otherwise, the interval is an inclusive range from start_block to + /// end_block. /// - /// end_block is treated as inclusive because it may have been specified - /// as a block hash. + /// end_block is always treated as inclusive because it may have been + /// specified as a block hash. pub async fn new( - provider: Arc, + provider: Arc, start_block: BlockId, end_block: Option, ) -> Result { @@ -102,7 +103,7 @@ impl BlockInterval { /// numbers. Query the blockchain node for the latest block number. pub async fn into_unbounded_stream( self, - provider: Arc, + provider: Arc, block_time: u64, ) -> Result { match self { @@ -133,7 +134,7 @@ impl BlockInterval { /// Converts a [`BlockId`] into a block number by querying the provider. pub async fn block_to_num( - provider: Arc, + provider: Arc, block: BlockId, ) -> Result { let block_num = match block { @@ -181,13 +182,13 @@ mod test { use mockall::predicate::*; use super::*; - use crate::provider::MockBlockProvider; + use crate::provider::MockZeroBlockProvider; #[tokio::test] async fn can_create_block_interval_from_inclusive_range() { assert_eq!( BlockInterval::new( - Arc::new(MockBlockProvider::new()), + Arc::new(MockZeroBlockProvider::new()), BlockId::from(0), Some(BlockId::from(10)) ) @@ -200,9 +201,13 @@ mod test { #[tokio::test] async fn can_create_follow_from_block_interval() { assert_eq!( - BlockInterval::new(Arc::new(MockBlockProvider::new()), BlockId::from(100), None) - .await - .unwrap(), + BlockInterval::new( + Arc::new(MockZeroBlockProvider::new()), + BlockId::from(100), + None + ) + .await + .unwrap(), BlockInterval::FollowFrom { start_block: 100 } ); } @@ -211,7 +216,7 @@ mod test { async fn can_create_single_block_interval() { assert_eq!( BlockInterval::new( - Arc::new(MockBlockProvider::new()), + Arc::new(MockZeroBlockProvider::new()), BlockId::from(123415131), Some(BlockId::from(123415131)) ) @@ -225,7 +230,7 @@ mod test { async fn cannot_create_invalid_range() { assert_eq!( BlockInterval::new( - Arc::new(MockBlockProvider::new()), + Arc::new(MockZeroBlockProvider::new()), BlockId::from(123415131), Some(BlockId::from(0)) ) @@ -239,7 +244,7 @@ mod test { #[tokio::test] async fn can_create_single_block_interval_from_hash() { // Mock the block for single block interval. - let mut mock = MockBlockProvider::new(); + let mut mock = MockZeroBlockProvider::new(); let block_id = BlockId::Hash( "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" .parse::() @@ -261,7 +266,7 @@ mod test { #[tokio::test] async fn can_create_block_interval_from_inclusive_hash_range() { // Mock the blocks for the range. - let mut mock = MockBlockProvider::new(); + let mut mock = MockZeroBlockProvider::new(); let start_block_id = BlockId::Hash( "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" .parse::() @@ -296,7 +301,7 @@ mod test { .unwrap() .into(), ); - let mut mock = MockBlockProvider::new(); + let mut mock = MockZeroBlockProvider::new(); mock_block(&mut mock, start_block_id, 12345); // Create the interval. @@ -311,7 +316,7 @@ mod test { /// Configures the mock to expect a query for a block by id and return the /// expected block number. - fn mock_block(mock: &mut MockBlockProvider, query_id: BlockId, resulting_block_num: u64) { + fn mock_block(mock: &mut MockZeroBlockProvider, query_id: BlockId, resulting_block_num: u64) { let mut block: Block = Block::default(); block.header.number = resulting_block_num; mock.expect_get_block_by_id() @@ -327,7 +332,7 @@ mod test { use futures::StreamExt; let mut result = Vec::new(); let mut stream = BlockInterval::new( - Arc::new(MockBlockProvider::new()), + Arc::new(MockZeroBlockProvider::new()), BlockId::from(1), Some(BlockId::from(9)), ) diff --git a/zero/src/parsing.rs b/zero/src/parsing.rs index d1452a464..5643f82f5 100644 --- a/zero/src/parsing.rs +++ b/zero/src/parsing.rs @@ -34,19 +34,6 @@ where parse_range_gen(s, "..", false) } -/// Parse an inclusive range from a string. -/// -/// A valid range is of the form `lhs..=rhs`, where `lhs` and `rhs` are numbers. -pub(crate) fn parse_range_inclusive( - s: &str, -) -> Result, RangeParseError> -where - NumberT: Display + FromStr + From + Add, - NumberT::Err: Display, -{ - parse_range_gen(s, "..=", true) -} - pub(crate) fn parse_range_gen( s: &str, separator: SeparatorT, @@ -88,11 +75,6 @@ mod test { assert_eq!(parse_range_exclusive::("0..10"), Ok(0..10)); } - #[test] - fn it_parses_inclusive_ranges() { - assert_eq!(parse_range_inclusive::("0..=10"), Ok(0..11)); - } - #[test] fn it_handles_missing_lhs() { assert_eq!( diff --git a/zero/src/provider.rs b/zero/src/provider.rs index 3bded7d39..2dc1b5464 100644 --- a/zero/src/provider.rs +++ b/zero/src/provider.rs @@ -15,7 +15,7 @@ const CACHE_SIZE: usize = 1024; const MAX_NUMBER_OF_PARALLEL_REQUESTS: usize = 128; #[automock] -pub trait BlockProvider { +pub trait ZeroBlockProvider { fn get_block_by_id( &self, block_id: BlockId, @@ -36,7 +36,6 @@ pub struct CachedProvider { blocks_by_number: Arc>>, blocks_by_hash: Arc>>, _phantom: std::marker::PhantomData, - pub rpc_type: RpcType, } @@ -136,7 +135,7 @@ where } } -impl BlockProvider for CachedProvider +impl ZeroBlockProvider for CachedProvider where ProviderT: Provider, TransportT: Transport + Clone, From b00ebfa84629626669ed69f5523595d24b3f841c Mon Sep 17 00:00:00 2001 From: sergerad Date: Wed, 23 Oct 2024 11:39:18 +1300 Subject: [PATCH 14/27] fix mock macro only tests and log proof count --- scripts/prove_rpc.sh | 3 ++- zero/src/bin/leader.rs | 2 +- zero/src/bin/leader/cli.rs | 2 ++ zero/src/bin/leader/client.rs | 3 ++- zero/src/provider.rs | 6 ++++-- 5 files changed, 11 insertions(+), 5 deletions(-) diff --git a/scripts/prove_rpc.sh b/scripts/prove_rpc.sh index 23760a527..12158425d 100755 --- a/scripts/prove_rpc.sh +++ b/scripts/prove_rpc.sh @@ -182,7 +182,8 @@ $PREV_PROOF_EXTRA_ARG " rm "$OUT_LOG_PATH" fi fi - echo "Successfully generated proofs!" + proof_count=$(grep -c 'INFO zero::prover: Proving block \d' < "$OUT_LOG_PATH") + echo "Successfully generated $proof_count proofs!" fi fi diff --git a/zero/src/bin/leader.rs b/zero/src/bin/leader.rs index 38ca2a3da..5d11845c6 100644 --- a/zero/src/bin/leader.rs +++ b/zero/src/bin/leader.rs @@ -106,8 +106,8 @@ async fn main() -> Result<()> { checkpoint_block, previous_proof, block_time, - end_block, start_block, + end_block, backoff, max_retries, } => { diff --git a/zero/src/bin/leader/cli.rs b/zero/src/bin/leader/cli.rs index 6827d5814..c085ae83f 100644 --- a/zero/src/bin/leader/cli.rs +++ b/zero/src/bin/leader/cli.rs @@ -69,6 +69,8 @@ pub(crate) enum Command { #[arg(long, short = 's')] start_block: BlockId, /// The end of the block range to prove (inclusive). + /// If not provided, leader will work in dynamic mode from `start_block` + /// following head of the blockchain. #[arg(long, short = 'e')] end_block: Option, /// The checkpoint block. diff --git a/zero/src/bin/leader/client.rs b/zero/src/bin/leader/client.rs index a51d17a0d..6f2015833 100644 --- a/zero/src/bin/leader/client.rs +++ b/zero/src/bin/leader/client.rs @@ -10,6 +10,7 @@ use zero::block_interval::{BlockInterval, BlockIntervalStream}; use zero::pre_checks::check_previous_proof_and_checkpoint; use zero::proof_types::GeneratedBlockProof; use zero::prover::{self, BlockProverInput, ProverConfig}; +use zero::provider::CachedProvider; use zero::rpc; use crate::ProofRuntime; @@ -24,7 +25,7 @@ pub struct LeaderConfig { /// The main function for the client. pub(crate) async fn client_main( proof_runtime: Arc, - cached_provider: Arc>, + cached_provider: Arc>, block_time: u64, block_interval: BlockInterval, mut leader_config: LeaderConfig, diff --git a/zero/src/provider.rs b/zero/src/provider.rs index 2dc1b5464..033685ba8 100644 --- a/zero/src/provider.rs +++ b/zero/src/provider.rs @@ -6,7 +6,6 @@ use alloy::primitives::BlockHash; use alloy::rpc::types::{Block, BlockId, BlockTransactionsKind}; use alloy::{providers::Provider, transports::Transport}; use anyhow::Context; -use mockall::automock; use tokio::sync::{Mutex, Semaphore, SemaphorePermit}; use crate::rpc::RpcType; @@ -14,7 +13,10 @@ use crate::rpc::RpcType; const CACHE_SIZE: usize = 1024; const MAX_NUMBER_OF_PARALLEL_REQUESTS: usize = 128; -#[automock] +#[cfg(test)] +use mockall::automock; + +#[cfg_attr(test, automock)] pub trait ZeroBlockProvider { fn get_block_by_id( &self, From 359d67de549476e3f19e646a98c79b89f5c6b3b6 Mon Sep 17 00:00:00 2001 From: sergerad Date: Thu, 24 Oct 2024 08:32:46 +1300 Subject: [PATCH 15/27] rm block check and fix sh/readme changes --- .github/workflows/jerigon-zero.yml | 4 ++-- scripts/prove_rpc.sh | 21 +-------------------- zero/README.md | 10 +++++----- 3 files changed, 8 insertions(+), 27 deletions(-) diff --git a/.github/workflows/jerigon-zero.yml b/.github/workflows/jerigon-zero.yml index c2e994c80..e0dc4c40f 100644 --- a/.github/workflows/jerigon-zero.yml +++ b/.github/workflows/jerigon-zero.yml @@ -74,14 +74,14 @@ jobs: run: | ETH_RPC_URL="$(kurtosis port print cancun-testnet el-2-erigon-lighthouse ws-rpc)" ulimit -n 8192 - OUTPUT_TO_TERMINAL=true ./scripts/prove_rpc.sh 1 15 $ETH_RPC_URL jerigon true 3000 100 test_only + OUTPUT_TO_TERMINAL=true ./scripts/prove_rpc.sh 1 15 $ETH_RPC_URL jerigon 0 3000 100 test_only echo "Proving blocks in test_only mode finished" - name: Run prove blocks with zero tracer in real mode run: | ETH_RPC_URL="$(kurtosis port print cancun-testnet el-2-erigon-lighthouse ws-rpc)" rm -rf proofs/* circuits/* ./proofs.json test.out verify.out leader.out - OUTPUT_TO_TERMINAL=true RUN_VERIFICATION=true ./scripts/prove_rpc.sh 2 5 $ETH_RPC_URL jerigon true 3000 100 + OUTPUT_TO_TERMINAL=true RUN_VERIFICATION=true ./scripts/prove_rpc.sh 2 5 $ETH_RPC_URL jerigon 1 3000 100 echo "Proving blocks in real mode finished" - name: Shut down network diff --git a/scripts/prove_rpc.sh b/scripts/prove_rpc.sh index 12158425d..d56d202fb 100755 --- a/scripts/prove_rpc.sh +++ b/scripts/prove_rpc.sh @@ -57,31 +57,12 @@ RECOMMENDED_FILE_HANDLE_LIMIT=8192 mkdir -p "$PROOF_OUTPUT_DIR" -# Validate start block args -for block_id in "$START_BLOCK" "$END_BLOCK"; do - if [[ $block_id == 0x* ]]; then - if ! [[ ${#block_id} -eq 66 ]]; then - echo "Invalid block hash length: $block_id" - exit 1 - fi - if ! [[ $block_id =~ ^0x[0-9a-fA-F]+$ ]]; then - echo "Invalid block hash format: $block_id" - exit 1 - fi - else - if ! [[ $block_id =~ ^[0-9]+$ ]]; then - echo "Invalid block number format: $block_id" - exit 1 - fi - fi -done - # Handle checkpoint block arg if [ -n "$CHECKPOINT_BLOCK" ] ; then # Checkpoint block provided, pass it to the prover as a flag PREV_PROOF_EXTRA_ARG="--checkpoint-block $CHECKPOINT_BLOCK" else - # Checkpoint block not provided, but is required hash-based start block + # Checkpoint block not provided, but is required for hash-based start block if [[ $START_BLOCK == 0x* ]]; then echo "Checkpoint block is required when specifying blocks by hash" exit 1 diff --git a/zero/README.md b/zero/README.md index 936a73c91..e320b8d62 100644 --- a/zero/README.md +++ b/zero/README.md @@ -425,13 +425,13 @@ For testing proof generation for blocks, the `testing` branch should be used. If you want to generate a full block proof, you can use `tools/prove_rpc.sh`: ```sh -./prove_rpc.sh +./prove_rpc.sh ``` Which may look like this: ```sh -./prove_rpc.sh 17 18 http://127.0.0.1:8545 jerigon false +./prove_rpc.sh 17 18 http://127.0.0.1:8545 jerigon ``` Which will attempt to generate proofs for blocks `17` & `18` consecutively and incorporate the previous block proof during generation. @@ -439,7 +439,7 @@ Which will attempt to generate proofs for blocks `17` & `18` consecutively and i A few other notes: - Proving blocks is very resource intensive in terms of both CPU and memory. You can also only generate the witness for a block instead (see [Generating Witnesses Only](#generating-witnesses-only)) to significantly reduce the CPU and memory requirements. -- Because incorporating the previous block proof requires a chain of proofs back to the last checkpoint height, you can also disable this requirement by passing `true` for `` (which internally just sets the current checkpoint height to the previous block height). +- Because incorporating the previous block proof requires a chain of proofs back to the last checkpoint height, you must specify a ``. The above example omits this argument which causes the command to treat block `16` as the checkpoint. - When proving multiple blocks concurrently, one may need to increase the system resource usage limit because of the number of RPC connections opened simultaneously, in particular when running a native tracer. For Linux systems, it is recommended to set `ulimit` to 8192. ### Generating Witnesses Only @@ -447,13 +447,13 @@ A few other notes: If you want to test a block without the high CPU & memory requirements that come with creating a full proof, you can instead generate only the witness using `tools/prove_rpc.sh` in the `test_only` mode: ```sh -./prove_rpc.sh test_only +./prove_rpc.sh test_only ``` Filled in: ```sh -./prove_rpc.sh 18299898 18299899 http://34.89.57.138:8545 jerigon true 0 0 test_only +./prove_rpc.sh 18299898 18299899 http://34.89.57.138:8545 jerigon 18299897 0 0 test_only ``` Finally, note that both of these testing scripts force proof generation to be sequential by allowing only one worker. Because of this, this is not a realistic representation of performance but makes the debugging logs much easier to follow. From 906f6f8692aa2cddb6dae0f5f3de3a7b7317306d Mon Sep 17 00:00:00 2001 From: sergerad Date: Thu, 24 Oct 2024 08:40:48 +1300 Subject: [PATCH 16/27] fix taplo --- zero/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zero/Cargo.toml b/zero/Cargo.toml index 1a6abfa90..4a0a7ff2c 100644 --- a/zero/Cargo.toml +++ b/zero/Cargo.toml @@ -27,6 +27,7 @@ hex.workspace = true itertools.workspace = true keccak-hash.workspace = true lru.workspace = true +mockall = "0.13.0" mpt_trie.workspace = true num-traits.workspace = true once_cell.workspace = true @@ -47,7 +48,6 @@ tracing.workspace = true tracing-subscriber.workspace = true url.workspace = true zk_evm_common.workspace = true -mockall = "0.13.0" [target.'cfg(not(target_env = "msvc"))'.dependencies] jemallocator = "0.5.4" From fa605b75c85317ad86c20b7bf2e29a30991c5946 Mon Sep 17 00:00:00 2001 From: sergerad Date: Thu, 24 Oct 2024 08:42:52 +1300 Subject: [PATCH 17/27] devdep --- zero/Cargo.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/zero/Cargo.toml b/zero/Cargo.toml index 4a0a7ff2c..88c8c580b 100644 --- a/zero/Cargo.toml +++ b/zero/Cargo.toml @@ -27,7 +27,6 @@ hex.workspace = true itertools.workspace = true keccak-hash.workspace = true lru.workspace = true -mockall = "0.13.0" mpt_trie.workspace = true num-traits.workspace = true once_cell.workspace = true @@ -56,6 +55,9 @@ jemallocator = "0.5.4" anyhow.workspace = true vergen-git2 = { version = "1.0.0", features = ["build"] } +[dev-dependencies] +mockall = "0.13.0" + [features] default = ["eth_mainnet"] From 651004760afa3eca96b16a9e225e5106d42480c4 Mon Sep 17 00:00:00 2001 From: sergerad Date: Fri, 25 Oct 2024 08:26:11 +1300 Subject: [PATCH 18/27] move provider --- zero/src/block_interval.rs | 70 +++++++++++++++++++++++++++++--------- zero/src/provider.rs | 31 ----------------- 2 files changed, 54 insertions(+), 47 deletions(-) diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index 984515e22..c9fb2e06f 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -1,14 +1,48 @@ -use std::ops::Range; use std::pin::Pin; use std::sync::Arc; +use std::{future::Future, ops::Range}; -use alloy::rpc::types::eth::BlockId; +use alloy::providers::Provider; +use alloy::rpc::types::BlockTransactionsKind; +use alloy::rpc::types::{eth::BlockId, Block}; +use alloy::transports::Transport; use anyhow::{anyhow, Result}; use async_stream::try_stream; use futures::Stream; +#[cfg(test)] +use mockall::automock; use tracing::info; -use crate::provider::ZeroBlockProvider; +use crate::provider::CachedProvider; + +#[cfg_attr(test, automock)] +pub trait BlockIntervalProvider { + fn get_block_by_id( + &self, + block_id: BlockId, + ) -> impl Future>> + Send; + + fn latest_block_number(&self) -> impl Future> + Send; +} + +impl BlockIntervalProvider for CachedProvider +where + ProviderT: Provider, + TransportT: Transport + Clone, +{ + /// Retrieves block without transaction contents from the provider. + async fn get_block_by_id(&self, block_id: BlockId) -> anyhow::Result> { + Ok(Some( + self.get_block(block_id, BlockTransactionsKind::Hashes) + .await?, + )) + } + + /// Retrieves the latest block number from the provider. + async fn latest_block_number(&self) -> anyhow::Result { + Ok(self.get_provider().await?.get_block_number().await?) + } +} /// The async stream of block numbers. /// The second bool flag indicates if the element is last in the interval. @@ -39,7 +73,7 @@ impl BlockInterval { /// end_block is always treated as inclusive because it may have been /// specified as a block hash. pub async fn new( - provider: Arc, + provider: Arc, start_block: BlockId, end_block: Option, ) -> Result { @@ -103,7 +137,7 @@ impl BlockInterval { /// numbers. Query the blockchain node for the latest block number. pub async fn into_unbounded_stream( self, - provider: Arc, + provider: Arc, block_time: u64, ) -> Result { match self { @@ -134,7 +168,7 @@ impl BlockInterval { /// Converts a [`BlockId`] into a block number by querying the provider. pub async fn block_to_num( - provider: Arc, + provider: Arc, block: BlockId, ) -> Result { let block_num = match block { @@ -180,15 +214,15 @@ mod test { use alloy::primitives::B256; use alloy::rpc::types::{Block, Header, Transaction}; use mockall::predicate::*; + use MockBlockIntervalProvider; use super::*; - use crate::provider::MockZeroBlockProvider; #[tokio::test] async fn can_create_block_interval_from_inclusive_range() { assert_eq!( BlockInterval::new( - Arc::new(MockZeroBlockProvider::new()), + Arc::new(MockBlockIntervalProvider::new()), BlockId::from(0), Some(BlockId::from(10)) ) @@ -202,7 +236,7 @@ mod test { async fn can_create_follow_from_block_interval() { assert_eq!( BlockInterval::new( - Arc::new(MockZeroBlockProvider::new()), + Arc::new(MockBlockIntervalProvider::new()), BlockId::from(100), None ) @@ -216,7 +250,7 @@ mod test { async fn can_create_single_block_interval() { assert_eq!( BlockInterval::new( - Arc::new(MockZeroBlockProvider::new()), + Arc::new(MockBlockIntervalProvider::new()), BlockId::from(123415131), Some(BlockId::from(123415131)) ) @@ -230,7 +264,7 @@ mod test { async fn cannot_create_invalid_range() { assert_eq!( BlockInterval::new( - Arc::new(MockZeroBlockProvider::new()), + Arc::new(MockBlockIntervalProvider::new()), BlockId::from(123415131), Some(BlockId::from(0)) ) @@ -244,7 +278,7 @@ mod test { #[tokio::test] async fn can_create_single_block_interval_from_hash() { // Mock the block for single block interval. - let mut mock = MockZeroBlockProvider::new(); + let mut mock = MockBlockIntervalProvider::new(); let block_id = BlockId::Hash( "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" .parse::() @@ -266,7 +300,7 @@ mod test { #[tokio::test] async fn can_create_block_interval_from_inclusive_hash_range() { // Mock the blocks for the range. - let mut mock = MockZeroBlockProvider::new(); + let mut mock = MockBlockIntervalProvider::new(); let start_block_id = BlockId::Hash( "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" .parse::() @@ -301,7 +335,7 @@ mod test { .unwrap() .into(), ); - let mut mock = MockZeroBlockProvider::new(); + let mut mock = MockBlockIntervalProvider::new(); mock_block(&mut mock, start_block_id, 12345); // Create the interval. @@ -316,7 +350,11 @@ mod test { /// Configures the mock to expect a query for a block by id and return the /// expected block number. - fn mock_block(mock: &mut MockZeroBlockProvider, query_id: BlockId, resulting_block_num: u64) { + fn mock_block( + mock: &mut MockBlockIntervalProvider, + query_id: BlockId, + resulting_block_num: u64, + ) { let mut block: Block = Block::default(); block.header.number = resulting_block_num; mock.expect_get_block_by_id() @@ -332,7 +370,7 @@ mod test { use futures::StreamExt; let mut result = Vec::new(); let mut stream = BlockInterval::new( - Arc::new(MockZeroBlockProvider::new()), + Arc::new(MockBlockIntervalProvider::new()), BlockId::from(1), Some(BlockId::from(9)), ) diff --git a/zero/src/provider.rs b/zero/src/provider.rs index 033685ba8..85b6665ec 100644 --- a/zero/src/provider.rs +++ b/zero/src/provider.rs @@ -1,4 +1,3 @@ -use std::future::Future; use std::ops::{Deref, DerefMut}; use std::sync::Arc; @@ -13,19 +12,6 @@ use crate::rpc::RpcType; const CACHE_SIZE: usize = 1024; const MAX_NUMBER_OF_PARALLEL_REQUESTS: usize = 128; -#[cfg(test)] -use mockall::automock; - -#[cfg_attr(test, automock)] -pub trait ZeroBlockProvider { - fn get_block_by_id( - &self, - block_id: BlockId, - ) -> impl Future>> + Send; - - fn latest_block_number(&self) -> impl Future> + Send; -} - /// Wrapper around alloy provider to cache blocks and other /// frequently used data. pub struct CachedProvider { @@ -136,20 +122,3 @@ where } } } - -impl ZeroBlockProvider for CachedProvider -where - ProviderT: Provider, - TransportT: Transport + Clone, -{ - async fn get_block_by_id(&self, block_id: BlockId) -> anyhow::Result> { - Ok(Some( - self.get_block(block_id, BlockTransactionsKind::Hashes) - .await?, - )) - } - - async fn latest_block_number(&self) -> anyhow::Result { - Ok(self.provider.get_block_number().await?) - } -} From 2e45be2095d25b127dd9b438e280f73df83b46c4 Mon Sep 17 00:00:00 2001 From: sergerad Date: Fri, 25 Oct 2024 08:47:22 +1300 Subject: [PATCH 19/27] generalize BlockIntervalProvider impl --- zero/src/block_interval.rs | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index c9fb2e06f..68f039862 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -5,7 +5,6 @@ use std::{future::Future, ops::Range}; use alloy::providers::Provider; use alloy::rpc::types::BlockTransactionsKind; use alloy::rpc::types::{eth::BlockId, Block}; -use alloy::transports::Transport; use anyhow::{anyhow, Result}; use async_stream::try_stream; use futures::Stream; @@ -13,8 +12,6 @@ use futures::Stream; use mockall::automock; use tracing::info; -use crate::provider::CachedProvider; - #[cfg_attr(test, automock)] pub trait BlockIntervalProvider { fn get_block_by_id( @@ -25,22 +22,17 @@ pub trait BlockIntervalProvider { fn latest_block_number(&self) -> impl Future> + Send; } -impl BlockIntervalProvider for CachedProvider -where - ProviderT: Provider, - TransportT: Transport + Clone, -{ +impl BlockIntervalProvider for T { /// Retrieves block without transaction contents from the provider. async fn get_block_by_id(&self, block_id: BlockId) -> anyhow::Result> { - Ok(Some( - self.get_block(block_id, BlockTransactionsKind::Hashes) - .await?, - )) + Ok(self + .get_block(block_id, BlockTransactionsKind::Hashes) + .await?) } /// Retrieves the latest block number from the provider. async fn latest_block_number(&self) -> anyhow::Result { - Ok(self.get_provider().await?.get_block_number().await?) + Ok(self.get_block_number().await?) } } From 33fb9a124e1d10e4a386be2f77f758842f40e4cb Mon Sep 17 00:00:00 2001 From: sergerad Date: Fri, 25 Oct 2024 14:34:09 +1300 Subject: [PATCH 20/27] partial fix --- zero/src/block_interval.rs | 384 +++++++++++++++++++------------------ zero/src/provider.rs | 9 + 2 files changed, 204 insertions(+), 189 deletions(-) diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index 68f039862..998c754db 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -5,6 +5,7 @@ use std::{future::Future, ops::Range}; use alloy::providers::Provider; use alloy::rpc::types::BlockTransactionsKind; use alloy::rpc::types::{eth::BlockId, Block}; +use alloy::transports::{BoxTransport, Transport}; use anyhow::{anyhow, Result}; use async_stream::try_stream; use futures::Stream; @@ -12,8 +13,8 @@ use futures::Stream; use mockall::automock; use tracing::info; -#[cfg_attr(test, automock)] -pub trait BlockIntervalProvider { +//#[cfg_attr(test, automock)] +pub trait BlockIntervalProvider { fn get_block_by_id( &self, block_id: BlockId, @@ -22,7 +23,7 @@ pub trait BlockIntervalProvider { fn latest_block_number(&self) -> impl Future> + Send; } -impl BlockIntervalProvider for T { +impl> BlockIntervalProvider for P { /// Retrieves block without transaction contents from the provider. async fn get_block_by_id(&self, block_id: BlockId) -> anyhow::Result> { Ok(self @@ -64,8 +65,8 @@ impl BlockInterval { /// /// end_block is always treated as inclusive because it may have been /// specified as a block hash. - pub async fn new( - provider: Arc, + pub async fn new( + provider: Arc>, start_block: BlockId, end_block: Option, ) -> Result { @@ -127,9 +128,9 @@ impl BlockInterval { /// Convert the block interval into an unbounded async stream of block /// numbers. Query the blockchain node for the latest block number. - pub async fn into_unbounded_stream( + pub async fn into_unbounded_stream( self, - provider: Arc, + provider: Arc + 'static>, block_time: u64, ) -> Result { match self { @@ -159,8 +160,8 @@ impl BlockInterval { } /// Converts a [`BlockId`] into a block number by querying the provider. - pub async fn block_to_num( - provider: Arc, + pub async fn block_to_num( + provider: Arc>, block: BlockId, ) -> Result { let block_num = match block { @@ -201,183 +202,188 @@ impl std::fmt::Display for BlockInterval { } } -#[cfg(test)] -mod test { - use alloy::primitives::B256; - use alloy::rpc::types::{Block, Header, Transaction}; - use mockall::predicate::*; - use MockBlockIntervalProvider; - - use super::*; - - #[tokio::test] - async fn can_create_block_interval_from_inclusive_range() { - assert_eq!( - BlockInterval::new( - Arc::new(MockBlockIntervalProvider::new()), - BlockId::from(0), - Some(BlockId::from(10)) - ) - .await - .unwrap(), - BlockInterval::Range(0..11) - ); - } - - #[tokio::test] - async fn can_create_follow_from_block_interval() { - assert_eq!( - BlockInterval::new( - Arc::new(MockBlockIntervalProvider::new()), - BlockId::from(100), - None - ) - .await - .unwrap(), - BlockInterval::FollowFrom { start_block: 100 } - ); - } - - #[tokio::test] - async fn can_create_single_block_interval() { - assert_eq!( - BlockInterval::new( - Arc::new(MockBlockIntervalProvider::new()), - BlockId::from(123415131), - Some(BlockId::from(123415131)) - ) - .await - .unwrap(), - BlockInterval::SingleBlockId(123415131) - ); - } - - #[tokio::test] - async fn cannot_create_invalid_range() { - assert_eq!( - BlockInterval::new( - Arc::new(MockBlockIntervalProvider::new()), - BlockId::from(123415131), - Some(BlockId::from(0)) - ) - .await - .unwrap_err() - .to_string(), - anyhow!("invalid block interval range (123415131..0)").to_string() - ); - } - - #[tokio::test] - async fn can_create_single_block_interval_from_hash() { - // Mock the block for single block interval. - let mut mock = MockBlockIntervalProvider::new(); - let block_id = BlockId::Hash( - "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" - .parse::() - .unwrap() - .into(), - ); - mock_block(&mut mock, block_id, 12345); - - // Create the interval. - let mock = Arc::new(mock); - assert_eq!( - BlockInterval::new(mock, block_id, Some(block_id)) - .await - .unwrap(), - BlockInterval::SingleBlockId(12345) - ); - } - - #[tokio::test] - async fn can_create_block_interval_from_inclusive_hash_range() { - // Mock the blocks for the range. - let mut mock = MockBlockIntervalProvider::new(); - let start_block_id = BlockId::Hash( - "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" - .parse::() - .unwrap() - .into(), - ); - mock_block(&mut mock, start_block_id, 12345); - let end_block_id = BlockId::Hash( - "0x351ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" - .parse::() - .unwrap() - .into(), - ); - mock_block(&mut mock, end_block_id, 12355); - - // Create the interval. - let mock = Arc::new(mock); - assert_eq!( - BlockInterval::new(mock, start_block_id, Some(end_block_id)) - .await - .unwrap(), - BlockInterval::Range(12345..12356) - ); - } - - #[tokio::test] - async fn can_create_follow_from_block_interval_hash() { - // Mock a block for range to start from. - let start_block_id = BlockId::Hash( - "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" - .parse::() - .unwrap() - .into(), - ); - let mut mock = MockBlockIntervalProvider::new(); - mock_block(&mut mock, start_block_id, 12345); - - // Create the interval. - let mock = Arc::new(mock); - assert_eq!( - BlockInterval::new(mock, start_block_id, None) - .await - .unwrap(), - BlockInterval::FollowFrom { start_block: 12345 } - ); - } - - /// Configures the mock to expect a query for a block by id and return the - /// expected block number. - fn mock_block( - mock: &mut MockBlockIntervalProvider, - query_id: BlockId, - resulting_block_num: u64, - ) { - let mut block: Block = Block::default(); - block.header.number = resulting_block_num; - mock.expect_get_block_by_id() - .with(eq(query_id)) - .returning(move |_| { - let block = block.clone(); - Box::pin(async move { Ok(Some(block)) }) - }); - } - - #[tokio::test] - async fn can_into_bounded_stream() { - use futures::StreamExt; - let mut result = Vec::new(); - let mut stream = BlockInterval::new( - Arc::new(MockBlockIntervalProvider::new()), - BlockId::from(1), - Some(BlockId::from(9)), - ) - .await - .unwrap() - .into_bounded_stream() - .unwrap(); - while let Some(val) = stream.next().await { - result.push(val.unwrap()); - } - let mut expected = Vec::from_iter(1u64..10u64) - .into_iter() - .map(|it| (it, false)) - .collect::>(); - expected.last_mut().unwrap().1 = true; - assert_eq!(result, expected); - } -} +//#[cfg(test)] +//mod test { +// use alloy::primitives::B256; +// use alloy::rpc::types::{Block, Header, Transaction}; +// use mockall::predicate::*; +// use MockBlockIntervalProvider; +// +// use super::*; +// +// #[tokio::test] +// async fn can_create_block_interval_from_inclusive_range() { +// assert_eq!( +// BlockInterval::new( +// Arc::new(MockBlockIntervalProvider::new()), +// BlockId::from(0), +// Some(BlockId::from(10)) +// ) +// .await +// .unwrap(), +// BlockInterval::Range(0..11) +// ); +// } +// +// #[tokio::test] +// async fn can_create_follow_from_block_interval() { +// assert_eq!( +// BlockInterval::new( +// Arc::new(MockBlockIntervalProvider::new()), +// BlockId::from(100), +// None +// ) +// .await +// .unwrap(), +// BlockInterval::FollowFrom { start_block: 100 } +// ); +// } +// +// #[tokio::test] +// async fn can_create_single_block_interval() { +// assert_eq!( +// BlockInterval::new( +// Arc::new(MockBlockIntervalProvider::new()), +// BlockId::from(123415131), +// Some(BlockId::from(123415131)) +// ) +// .await +// .unwrap(), +// BlockInterval::SingleBlockId(123415131) +// ); +// } +// +// #[tokio::test] +// async fn cannot_create_invalid_range() { +// assert_eq!( +// BlockInterval::new( +// Arc::new(MockBlockIntervalProvider::new()), +// BlockId::from(123415131), +// Some(BlockId::from(0)) +// ) +// .await +// .unwrap_err() +// .to_string(), +// anyhow!("invalid block interval range (123415131..0)").to_string() +// ); +// } +// +// #[tokio::test] +// async fn can_create_single_block_interval_from_hash() { +// // Mock the block for single block interval. +// let mut mock = MockBlockIntervalProvider::new(); +// let block_id = BlockId::Hash( +// +// "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" +// .parse::() +// .unwrap() +// .into(), +// ); +// mock_block(&mut mock, block_id, 12345); +// +// // Create the interval. +// let mock = Arc::new(mock); +// assert_eq!( +// BlockInterval::new(mock, block_id, Some(block_id)) +// .await +// .unwrap(), +// BlockInterval::SingleBlockId(12345) +// ); +// } +// +// #[tokio::test] +// async fn can_create_block_interval_from_inclusive_hash_range() { +// // Mock the blocks for the range. +// let mut mock = MockBlockIntervalProvider::new(); +// let start_block_id = BlockId::Hash( +// +// "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" +// .parse::() +// .unwrap() +// .into(), +// ); +// mock_block(&mut mock, start_block_id, 12345); +// let end_block_id = BlockId::Hash( +// +// "0x351ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" +// .parse::() +// .unwrap() +// .into(), +// ); +// mock_block(&mut mock, end_block_id, 12355); +// +// // Create the interval. +// let mock = Arc::new(mock); +// assert_eq!( +// BlockInterval::new(mock, start_block_id, Some(end_block_id)) +// .await +// .unwrap(), +// BlockInterval::Range(12345..12356) +// ); +// } +// +// #[tokio::test] +// async fn can_create_follow_from_block_interval_hash() { +// // Mock a block for range to start from. +// let start_block_id = BlockId::Hash( +// +// "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" +// .parse::() +// .unwrap() +// .into(), +// ); +// let mut mock = MockBlockIntervalProvider::new(); +// mock_block(&mut mock, start_block_id, 12345); +// +// // Create the interval. +// let mock = Arc::new(mock); +// assert_eq!( +// BlockInterval::new(mock, start_block_id, None) +// .await +// .unwrap(), +// BlockInterval::FollowFrom { start_block: 12345 } +// ); +// } +// +// /// Configures the mock to expect a query for a block by id and return the +// /// expected block number. +// fn mock_block( +// mock: &mut MockBlockIntervalProvider, +// query_id: BlockId, +// resulting_block_num: u64, +// ) { +// let mut block: Block = Block::default(); +// block.header.number = resulting_block_num; +// mock.expect_get_block_by_id() +// .with(eq(query_id)) +// .returning(move |_| { +// let block = block.clone(); +// Box::pin(async move { Ok(Some(block)) }) +// }); +// } +// +// #[tokio::test] +// async fn can_into_bounded_stream() { +// use futures::StreamExt; +// let mut result = Vec::new(); +// let mut stream = BlockInterval::new( +// Arc::new(MockBlockIntervalProvider::new()), +// BlockId::from(1), +// Some(BlockId::from(9)), +// ) +// .await +// .unwrap() +// .into_bounded_stream() +// .unwrap(); +// while let Some(val) = stream.next().await { +// result.push(val.unwrap()); +// } +// let mut expected = Vec::from_iter(1u64..10u64) +// .into_iter() +// .map(|it| (it, false)) +// .collect::>(); +// expected.last_mut().unwrap().1 = true; +// assert_eq!(result, expected); +// } +//} +// diff --git a/zero/src/provider.rs b/zero/src/provider.rs index 85b6665ec..f13be870e 100644 --- a/zero/src/provider.rs +++ b/zero/src/provider.rs @@ -2,16 +2,25 @@ use std::ops::{Deref, DerefMut}; use std::sync::Arc; use alloy::primitives::BlockHash; +use alloy::providers::RootProvider; use alloy::rpc::types::{Block, BlockId, BlockTransactionsKind}; +use alloy::transports::BoxTransport; use alloy::{providers::Provider, transports::Transport}; use anyhow::Context; use tokio::sync::{Mutex, Semaphore, SemaphorePermit}; +use crate::rpc::retry::RetryService; use crate::rpc::RpcType; const CACHE_SIZE: usize = 1024; const MAX_NUMBER_OF_PARALLEL_REQUESTS: usize = 128; +impl Provider for CachedProvider, T> { + fn root(&self) -> &RootProvider { + self.provider.root() + } +} + /// Wrapper around alloy provider to cache blocks and other /// frequently used data. pub struct CachedProvider { From 9ad05eb0518b44fb98dd0c829f359b6c461acaf9 Mon Sep 17 00:00:00 2001 From: sergerad Date: Fri, 25 Oct 2024 14:56:01 +1300 Subject: [PATCH 21/27] compiling --- zero/src/provider.rs | 2 +- zero/src/rpc/mod.rs | 12 +++++++----- zero/src/rpc/native/mod.rs | 3 ++- zero/src/rpc/native/state.rs | 1 + 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/zero/src/provider.rs b/zero/src/provider.rs index f13be870e..51f8cb7c9 100644 --- a/zero/src/provider.rs +++ b/zero/src/provider.rs @@ -15,7 +15,7 @@ use crate::rpc::RpcType; const CACHE_SIZE: usize = 1024; const MAX_NUMBER_OF_PARALLEL_REQUESTS: usize = 128; -impl Provider for CachedProvider, T> { +impl> Provider for CachedProvider { fn root(&self) -> &RootProvider { self.provider.root() } diff --git a/zero/src/rpc/mod.rs b/zero/src/rpc/mod.rs index 40e9cd9a7..ac53fb36f 100644 --- a/zero/src/rpc/mod.rs +++ b/zero/src/rpc/mod.rs @@ -105,7 +105,8 @@ where let block = cached_provider .get_block((block_num as u64).into(), BlockTransactionsKind::Hashes) .await - .context("couldn't get block")?; + .expect("could not retrieve block from provider") + .ok_or(anyhow!("block not found"))?; anyhow::Ok([ (block.header.hash, Some(block_num)), (block.header.parent_hash, previous_block_number), @@ -211,8 +212,8 @@ where { let target_block = cached_provider .get_block(target_block_id, BlockTransactionsKind::Hashes) - .await?; - let target_block_number = target_block.header.number; + .await? + .ok_or(anyhow!("target block not found"))?; let chain_id = cached_provider.get_provider().await?.get_chain_id().await?; // Grab interval checkpoint block state trie @@ -222,11 +223,12 @@ where BlockTransactionsKind::Hashes, ) .await? + .ok_or(anyhow!("checkpoint block not found"))? .header .state_root; let prev_hashes = - fetch_previous_block_hashes(cached_provider.clone(), target_block_number).await?; + fetch_previous_block_hashes(cached_provider.clone(), target_block.header.number).await?; let checkpoint_prev_hashes = fetch_previous_block_hashes(cached_provider, checkpoint_block_number + 1) // include the checkpoint block .await? @@ -237,7 +239,7 @@ where b_meta: BlockMetadata { block_beneficiary: target_block.header.miner.compat(), block_timestamp: target_block.header.timestamp.into(), - block_number: target_block_number.into(), + block_number: target_block.header.number.into(), block_difficulty: target_block.header.difficulty.into(), block_random: target_block .header diff --git a/zero/src/rpc/native/mod.rs b/zero/src/rpc/native/mod.rs index 5b4ed5dd9..3fd3ca3e8 100644 --- a/zero/src/rpc/native/mod.rs +++ b/zero/src/rpc/native/mod.rs @@ -50,7 +50,8 @@ where { let block = cached_provider .get_block(block_number, BlockTransactionsKind::Full) - .await?; + .await? + .ok_or(anyhow::anyhow!("block not found"))?; let (code_db, txn_info) = txn::process_transactions(&block, cached_provider.get_provider().await?.deref()).await?; diff --git a/zero/src/rpc/native/state.rs b/zero/src/rpc/native/state.rs index 3c37e8cbc..326c93081 100644 --- a/zero/src/rpc/native/state.rs +++ b/zero/src/rpc/native/state.rs @@ -35,6 +35,7 @@ where let prev_state_root = cached_provider .get_block((block_number - 1).into(), BlockTransactionsKind::Hashes) .await? + .ok_or(anyhow::anyhow!("block not found"))? .header .state_root; From c47e2a604af72a317f75692b1406b136b0c3f678 Mon Sep 17 00:00:00 2001 From: sergerad Date: Fri, 25 Oct 2024 15:21:59 +1300 Subject: [PATCH 22/27] fix provider generics --- zero/src/block_interval.rs | 15 +++++++-------- zero/src/provider.rs | 2 -- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index 998c754db..495cad838 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -5,12 +5,12 @@ use std::{future::Future, ops::Range}; use alloy::providers::Provider; use alloy::rpc::types::BlockTransactionsKind; use alloy::rpc::types::{eth::BlockId, Block}; -use alloy::transports::{BoxTransport, Transport}; +use alloy::transports::Transport; use anyhow::{anyhow, Result}; use async_stream::try_stream; use futures::Stream; -#[cfg(test)] -use mockall::automock; +//#[cfg(test)] +//use mockall::mock; use tracing::info; //#[cfg_attr(test, automock)] @@ -264,8 +264,8 @@ impl std::fmt::Display for BlockInterval { // .await // .unwrap_err() // .to_string(), -// anyhow!("invalid block interval range (123415131..0)").to_string() -// ); +// anyhow!("invalid block interval range +// (123415131..0)").to_string() ); // } // // #[tokio::test] @@ -345,8 +345,8 @@ impl std::fmt::Display for BlockInterval { // ); // } // -// /// Configures the mock to expect a query for a block by id and return the -// /// expected block number. +// /// Configures the mock to expect a query for a block by id and return +// the /// expected block number. // fn mock_block( // mock: &mut MockBlockIntervalProvider, // query_id: BlockId, @@ -386,4 +386,3 @@ impl std::fmt::Display for BlockInterval { // assert_eq!(result, expected); // } //} -// diff --git a/zero/src/provider.rs b/zero/src/provider.rs index 51f8cb7c9..fbd34e7f8 100644 --- a/zero/src/provider.rs +++ b/zero/src/provider.rs @@ -4,12 +4,10 @@ use std::sync::Arc; use alloy::primitives::BlockHash; use alloy::providers::RootProvider; use alloy::rpc::types::{Block, BlockId, BlockTransactionsKind}; -use alloy::transports::BoxTransport; use alloy::{providers::Provider, transports::Transport}; use anyhow::Context; use tokio::sync::{Mutex, Semaphore, SemaphorePermit}; -use crate::rpc::retry::RetryService; use crate::rpc::RpcType; const CACHE_SIZE: usize = 1024; From 22d01bbd14999149adf147c373808f1485bb765e Mon Sep 17 00:00:00 2001 From: sergerad Date: Fri, 25 Oct 2024 19:12:05 +1300 Subject: [PATCH 23/27] reinstate unit tests --- zero/src/block_interval.rs | 400 ++++++++++++++++++------------------- 1 file changed, 200 insertions(+), 200 deletions(-) diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index 495cad838..889f8664a 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -2,18 +2,15 @@ use std::pin::Pin; use std::sync::Arc; use std::{future::Future, ops::Range}; -use alloy::providers::Provider; -use alloy::rpc::types::BlockTransactionsKind; use alloy::rpc::types::{eth::BlockId, Block}; -use alloy::transports::Transport; use anyhow::{anyhow, Result}; use async_stream::try_stream; use futures::Stream; -//#[cfg(test)] -//use mockall::mock; +#[cfg(test)] +use mockall::automock; use tracing::info; -//#[cfg_attr(test, automock)] +#[cfg_attr(test, automock)] pub trait BlockIntervalProvider { fn get_block_by_id( &self, @@ -23,17 +20,25 @@ pub trait BlockIntervalProvider { fn latest_block_number(&self) -> impl Future> + Send; } -impl> BlockIntervalProvider for P { - /// Retrieves block without transaction contents from the provider. - async fn get_block_by_id(&self, block_id: BlockId) -> anyhow::Result> { - Ok(self - .get_block(block_id, BlockTransactionsKind::Hashes) - .await?) - } +#[cfg(not(test))] +mod block_interval_provider_impl { + use alloy::providers::Provider; + use alloy::rpc::types::BlockTransactionsKind; + use alloy::transports::Transport; + + use super::{Block, BlockId, BlockIntervalProvider}; + impl> BlockIntervalProvider for P { + /// Retrieves block without transaction contents from the provider. + async fn get_block_by_id(&self, block_id: BlockId) -> anyhow::Result> { + Ok(self + .get_block(block_id, BlockTransactionsKind::Hashes) + .await?) + } - /// Retrieves the latest block number from the provider. - async fn latest_block_number(&self) -> anyhow::Result { - Ok(self.get_block_number().await?) + /// Retrieves the latest block number from the provider. + async fn latest_block_number(&self) -> anyhow::Result { + Ok(self.get_block_number().await?) + } } } @@ -202,187 +207,182 @@ impl std::fmt::Display for BlockInterval { } } -//#[cfg(test)] -//mod test { -// use alloy::primitives::B256; -// use alloy::rpc::types::{Block, Header, Transaction}; -// use mockall::predicate::*; -// use MockBlockIntervalProvider; -// -// use super::*; -// -// #[tokio::test] -// async fn can_create_block_interval_from_inclusive_range() { -// assert_eq!( -// BlockInterval::new( -// Arc::new(MockBlockIntervalProvider::new()), -// BlockId::from(0), -// Some(BlockId::from(10)) -// ) -// .await -// .unwrap(), -// BlockInterval::Range(0..11) -// ); -// } -// -// #[tokio::test] -// async fn can_create_follow_from_block_interval() { -// assert_eq!( -// BlockInterval::new( -// Arc::new(MockBlockIntervalProvider::new()), -// BlockId::from(100), -// None -// ) -// .await -// .unwrap(), -// BlockInterval::FollowFrom { start_block: 100 } -// ); -// } -// -// #[tokio::test] -// async fn can_create_single_block_interval() { -// assert_eq!( -// BlockInterval::new( -// Arc::new(MockBlockIntervalProvider::new()), -// BlockId::from(123415131), -// Some(BlockId::from(123415131)) -// ) -// .await -// .unwrap(), -// BlockInterval::SingleBlockId(123415131) -// ); -// } -// -// #[tokio::test] -// async fn cannot_create_invalid_range() { -// assert_eq!( -// BlockInterval::new( -// Arc::new(MockBlockIntervalProvider::new()), -// BlockId::from(123415131), -// Some(BlockId::from(0)) -// ) -// .await -// .unwrap_err() -// .to_string(), -// anyhow!("invalid block interval range -// (123415131..0)").to_string() ); -// } -// -// #[tokio::test] -// async fn can_create_single_block_interval_from_hash() { -// // Mock the block for single block interval. -// let mut mock = MockBlockIntervalProvider::new(); -// let block_id = BlockId::Hash( -// -// "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" -// .parse::() -// .unwrap() -// .into(), -// ); -// mock_block(&mut mock, block_id, 12345); -// -// // Create the interval. -// let mock = Arc::new(mock); -// assert_eq!( -// BlockInterval::new(mock, block_id, Some(block_id)) -// .await -// .unwrap(), -// BlockInterval::SingleBlockId(12345) -// ); -// } -// -// #[tokio::test] -// async fn can_create_block_interval_from_inclusive_hash_range() { -// // Mock the blocks for the range. -// let mut mock = MockBlockIntervalProvider::new(); -// let start_block_id = BlockId::Hash( -// -// "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" -// .parse::() -// .unwrap() -// .into(), -// ); -// mock_block(&mut mock, start_block_id, 12345); -// let end_block_id = BlockId::Hash( -// -// "0x351ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" -// .parse::() -// .unwrap() -// .into(), -// ); -// mock_block(&mut mock, end_block_id, 12355); -// -// // Create the interval. -// let mock = Arc::new(mock); -// assert_eq!( -// BlockInterval::new(mock, start_block_id, Some(end_block_id)) -// .await -// .unwrap(), -// BlockInterval::Range(12345..12356) -// ); -// } -// -// #[tokio::test] -// async fn can_create_follow_from_block_interval_hash() { -// // Mock a block for range to start from. -// let start_block_id = BlockId::Hash( -// -// "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" -// .parse::() -// .unwrap() -// .into(), -// ); -// let mut mock = MockBlockIntervalProvider::new(); -// mock_block(&mut mock, start_block_id, 12345); -// -// // Create the interval. -// let mock = Arc::new(mock); -// assert_eq!( -// BlockInterval::new(mock, start_block_id, None) -// .await -// .unwrap(), -// BlockInterval::FollowFrom { start_block: 12345 } -// ); -// } -// -// /// Configures the mock to expect a query for a block by id and return -// the /// expected block number. -// fn mock_block( -// mock: &mut MockBlockIntervalProvider, -// query_id: BlockId, -// resulting_block_num: u64, -// ) { -// let mut block: Block = Block::default(); -// block.header.number = resulting_block_num; -// mock.expect_get_block_by_id() -// .with(eq(query_id)) -// .returning(move |_| { -// let block = block.clone(); -// Box::pin(async move { Ok(Some(block)) }) -// }); -// } -// -// #[tokio::test] -// async fn can_into_bounded_stream() { -// use futures::StreamExt; -// let mut result = Vec::new(); -// let mut stream = BlockInterval::new( -// Arc::new(MockBlockIntervalProvider::new()), -// BlockId::from(1), -// Some(BlockId::from(9)), -// ) -// .await -// .unwrap() -// .into_bounded_stream() -// .unwrap(); -// while let Some(val) = stream.next().await { -// result.push(val.unwrap()); -// } -// let mut expected = Vec::from_iter(1u64..10u64) -// .into_iter() -// .map(|it| (it, false)) -// .collect::>(); -// expected.last_mut().unwrap().1 = true; -// assert_eq!(result, expected); -// } -//} +#[cfg(test)] +mod test { + use alloy::primitives::B256; + use alloy::rpc::types::{Block, Header, Transaction}; + use alloy::transports::BoxTransport; + use mockall::predicate::*; + use MockBlockIntervalProvider; + + use super::*; + + type Mocker = MockBlockIntervalProvider; + + #[tokio::test] + async fn can_create_block_interval_from_inclusive_range() { + assert_eq!( + BlockInterval::new( + Arc::new(Mocker::new()), + BlockId::from(0), + Some(BlockId::from(10)) + ) + .await + .unwrap(), + BlockInterval::Range(0..11) + ); + } + + #[tokio::test] + async fn can_create_follow_from_block_interval() { + assert_eq!( + BlockInterval::new(Arc::new(Mocker::new()), BlockId::from(100), None) + .await + .unwrap(), + BlockInterval::FollowFrom { start_block: 100 } + ); + } + + #[tokio::test] + async fn can_create_single_block_interval() { + assert_eq!( + BlockInterval::new( + Arc::new(Mocker::new()), + BlockId::from(123415131), + Some(BlockId::from(123415131)) + ) + .await + .unwrap(), + BlockInterval::SingleBlockId(123415131) + ); + } + + #[tokio::test] + async fn cannot_create_invalid_range() { + assert_eq!( + BlockInterval::new( + Arc::new(Mocker::new()), + BlockId::from(123415131), + Some(BlockId::from(0)) + ) + .await + .unwrap_err() + .to_string(), + anyhow!("invalid block interval range (123415131..0)").to_string() + ); + } + + #[tokio::test] + async fn can_create_single_block_interval_from_hash() { + // Mock the block for single block interval. + let mut mock = Mocker::new(); + let block_id = BlockId::Hash( + "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" + .parse::() + .unwrap() + .into(), + ); + mock_block(&mut mock, block_id, 12345); + + // Create the interval. + let mock = Arc::new(mock); + assert_eq!( + BlockInterval::new(mock, block_id, Some(block_id)) + .await + .unwrap(), + BlockInterval::SingleBlockId(12345) + ); + } + + #[tokio::test] + async fn can_create_block_interval_from_inclusive_hash_range() { + // Mock the blocks for the range. + let mut mock = Mocker::new(); + let start_block_id = BlockId::Hash( + "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" + .parse::() + .unwrap() + .into(), + ); + mock_block(&mut mock, start_block_id, 12345); + let end_block_id = BlockId::Hash( + "0x351ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" + .parse::() + .unwrap() + .into(), + ); + mock_block(&mut mock, end_block_id, 12355); + + // Create the interval. + let mock = Arc::new(mock); + assert_eq!( + BlockInterval::new(mock, start_block_id, Some(end_block_id)) + .await + .unwrap(), + BlockInterval::Range(12345..12356) + ); + } + + #[tokio::test] + async fn can_create_follow_from_block_interval_hash() { + // Mock a block for range to start from. + let start_block_id = BlockId::Hash( + "0xb51ceca7ba912779ed6721d2b93849758af0d2354683170fb71dead6e439e6cb" + .parse::() + .unwrap() + .into(), + ); + let mut mock = Mocker::new(); + mock_block(&mut mock, start_block_id, 12345); + + // Create the interval. + let mock = Arc::new(mock); + assert_eq!( + BlockInterval::new(mock, start_block_id, None) + .await + .unwrap(), + BlockInterval::FollowFrom { start_block: 12345 } + ); + } + + /// Configures the mock to expect a query for a block by id and return + /// the expected block number. + fn mock_block( + mock: &mut MockBlockIntervalProvider, + query_id: BlockId, + resulting_block_num: u64, + ) { + let mut block: Block = Block::default(); + block.header.number = resulting_block_num; + mock.expect_get_block_by_id() + .with(eq(query_id)) + .returning(move |_| { + let block = block.clone(); + Box::pin(async move { Ok(Some(block)) }) + }); + } + + #[tokio::test] + async fn can_into_bounded_stream() { + use futures::StreamExt; + let mut result = Vec::new(); + let mut stream = BlockInterval::new( + Arc::new(Mocker::new()), + BlockId::from(1), + Some(BlockId::from(9)), + ) + .await + .unwrap() + .into_bounded_stream() + .unwrap(); + while let Some(val) = stream.next().await { + result.push(val.unwrap()); + } + let mut expected = Vec::from_iter(1u64..10u64) + .into_iter() + .map(|it| (it, false)) + .collect::>(); + expected.last_mut().unwrap().1 = true; + assert_eq!(result, expected); + } +} From e7314f2ba61f111001c9daae8d2ec57d50923b1f Mon Sep 17 00:00:00 2001 From: sergerad Date: Fri, 25 Oct 2024 19:22:31 +1300 Subject: [PATCH 24/27] add comment --- zero/src/block_interval.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index 889f8664a..e73c00d62 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -27,6 +27,8 @@ mod block_interval_provider_impl { use alloy::transports::Transport; use super::{Block, BlockId, BlockIntervalProvider}; + + /// Implements the [`BlockIntervalProvider`] trait for [`Provider`]. impl> BlockIntervalProvider for P { /// Retrieves block without transaction contents from the provider. async fn get_block_by_id(&self, block_id: BlockId) -> anyhow::Result> { From 4c000da11de5e1b92cddde22dfa98e9fa95568d1 Mon Sep 17 00:00:00 2001 From: sergerad Date: Fri, 25 Oct 2024 19:59:56 +1300 Subject: [PATCH 25/27] add block to errors --- zero/src/rpc/mod.rs | 9 ++++++--- zero/src/rpc/native/mod.rs | 2 +- zero/src/rpc/native/state.rs | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/zero/src/rpc/mod.rs b/zero/src/rpc/mod.rs index ac53fb36f..78d6f961f 100644 --- a/zero/src/rpc/mod.rs +++ b/zero/src/rpc/mod.rs @@ -106,7 +106,7 @@ where .get_block((block_num as u64).into(), BlockTransactionsKind::Hashes) .await .expect("could not retrieve block from provider") - .ok_or(anyhow!("block not found"))?; + .ok_or(anyhow!("block not found {block_num}"))?; anyhow::Ok([ (block.header.hash, Some(block_num)), (block.header.parent_hash, previous_block_number), @@ -213,7 +213,7 @@ where let target_block = cached_provider .get_block(target_block_id, BlockTransactionsKind::Hashes) .await? - .ok_or(anyhow!("target block not found"))?; + .ok_or(anyhow!("target block not found {}", target_block_id))?; let chain_id = cached_provider.get_provider().await?.get_chain_id().await?; // Grab interval checkpoint block state trie @@ -223,7 +223,10 @@ where BlockTransactionsKind::Hashes, ) .await? - .ok_or(anyhow!("checkpoint block not found"))? + .ok_or(anyhow!( + "checkpoint block not found {}", + checkpoint_block_number + ))? .header .state_root; diff --git a/zero/src/rpc/native/mod.rs b/zero/src/rpc/native/mod.rs index 3fd3ca3e8..a4dc7e0c6 100644 --- a/zero/src/rpc/native/mod.rs +++ b/zero/src/rpc/native/mod.rs @@ -51,7 +51,7 @@ where let block = cached_provider .get_block(block_number, BlockTransactionsKind::Full) .await? - .ok_or(anyhow::anyhow!("block not found"))?; + .ok_or(anyhow::anyhow!("block not found {}", block_number))?; let (code_db, txn_info) = txn::process_transactions(&block, cached_provider.get_provider().await?.deref()).await?; diff --git a/zero/src/rpc/native/state.rs b/zero/src/rpc/native/state.rs index 326c93081..b5b82106a 100644 --- a/zero/src/rpc/native/state.rs +++ b/zero/src/rpc/native/state.rs @@ -35,7 +35,7 @@ where let prev_state_root = cached_provider .get_block((block_number - 1).into(), BlockTransactionsKind::Hashes) .await? - .ok_or(anyhow::anyhow!("block not found"))? + .ok_or(anyhow::anyhow!("block not found {}", block_number - 1))? .header .state_root; From 904c6c59b08b8a85d5ff0143ed8962e4d16952da Mon Sep 17 00:00:00 2001 From: sergerad Date: Sat, 26 Oct 2024 05:57:38 +1300 Subject: [PATCH 26/27] rm expect --- zero/src/rpc/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/zero/src/rpc/mod.rs b/zero/src/rpc/mod.rs index 78d6f961f..016c1d242 100644 --- a/zero/src/rpc/mod.rs +++ b/zero/src/rpc/mod.rs @@ -104,8 +104,7 @@ where async move { let block = cached_provider .get_block((block_num as u64).into(), BlockTransactionsKind::Hashes) - .await - .expect("could not retrieve block from provider") + .await? .ok_or(anyhow!("block not found {block_num}"))?; anyhow::Ok([ (block.header.hash, Some(block_num)), From 99dbbc2ab853a36e90b1a046d22c3a56b72d66c4 Mon Sep 17 00:00:00 2001 From: sergerad Date: Sat, 26 Oct 2024 06:21:41 +1300 Subject: [PATCH 27/27] add where clause --- zero/src/block_interval.rs | 6 +++++- zero/src/provider.rs | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/zero/src/block_interval.rs b/zero/src/block_interval.rs index e73c00d62..27d03fe3c 100644 --- a/zero/src/block_interval.rs +++ b/zero/src/block_interval.rs @@ -29,7 +29,11 @@ mod block_interval_provider_impl { use super::{Block, BlockId, BlockIntervalProvider}; /// Implements the [`BlockIntervalProvider`] trait for [`Provider`]. - impl> BlockIntervalProvider for P { + impl BlockIntervalProvider for P + where + T: Transport + Clone, + P: Provider, + { /// Retrieves block without transaction contents from the provider. async fn get_block_by_id(&self, block_id: BlockId) -> anyhow::Result> { Ok(self diff --git a/zero/src/provider.rs b/zero/src/provider.rs index fbd34e7f8..a2168bbb7 100644 --- a/zero/src/provider.rs +++ b/zero/src/provider.rs @@ -13,7 +13,11 @@ use crate::rpc::RpcType; const CACHE_SIZE: usize = 1024; const MAX_NUMBER_OF_PARALLEL_REQUESTS: usize = 128; -impl> Provider for CachedProvider { +impl Provider for CachedProvider +where + T: Transport + Clone, + P: Provider, +{ fn root(&self) -> &RootProvider { self.provider.root() }