diff --git a/Cargo.lock b/Cargo.lock index edcac97fb..0cbc8d73b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -594,11 +594,14 @@ dependencies = [ "astria-config", "astria-core", "astria-eyre", + "astria-grpc-mock", "astria-sequencer-client", "astria-telemetry", "astria-test-utils", "async-trait", "axum", + "base64 0.21.7", + "bytes", "ethers", "futures", "hex", @@ -607,6 +610,7 @@ dependencies = [ "insta", "itertools 0.12.1", "once_cell", + "pbjson-types", "pin-project-lite", "prost", "rand 0.8.5", @@ -808,6 +812,7 @@ dependencies = [ "ibc-types", "insta", "matchit", + "pbjson-types", "penumbra-ibc", "penumbra-proto", "penumbra-tower-trace", diff --git a/charts/composer/templates/configmap.yaml b/charts/composer/templates/configmap.yaml index 3d9b13a73..a2a47e792 100644 --- a/charts/composer/templates/configmap.yaml +++ b/charts/composer/templates/configmap.yaml @@ -9,9 +9,12 @@ data: ASTRIA_COMPOSER_GRPC_ADDR: "0.0.0.0:{{ .Values.ports.grpc }}" ASTRIA_COMPOSER_SEQUENCER_CHAIN_ID: "{{ tpl .Values.config.sequencerChainId . }}" ASTRIA_COMPOSER_SEQUENCER_URL: "{{ tpl .Values.config.sequencerRpc . }}" - ASTRIA_COMPOSER_ROLLUPS: "{{ include "composer.rollups" . }}" + ASTRIA_COMPOSER_ROLLUP: "{{ .Values.config.rollupName }}" + ASTRIA_COMPOSER_ROLLUP_WEBSOCKET_URL: "ws://{{ .Values.global.rollupName }}-evm-service.{{ default .Release.Namespace .Values.global.namespaceOverride }}.svc.cluster.local:8546" + ASTRIA_COMPOSER_EXECUTION_API_URL: "http://{{ .Values.global.rollupName }}-evm-service.{{ default .Release.Namespace .Values.global.namespaceOverride }}.svc.cluster.local:50051" ASTRIA_COMPOSER_PRIVATE_KEY_FILE: "/var/secrets/{{ .Values.config.privateKey.secret.filename }}" ASTRIA_COMPOSER_MAX_BYTES_PER_BUNDLE: "{{ .Values.config.maxBytesPerBundle }}" + ASTRIA_COMPOSER_MAX_BUNDLE_SIZE: "{{ .Values.config.maxBundleSize }}" ASTRIA_COMPOSER_BUNDLE_QUEUE_CAPACITY: "{{ .Values.config.bundleQueueCapacity }}" ASTRIA_COMPOSER_MAX_SUBMIT_INTERVAL_MS: "{{ .Values.config.maxSubmitInterval }}" ASTRIA_COMPOSER_SEQUENCER_ADDRESS_PREFIX: "{{ .Values.config.sequencerAddressPrefix}}" diff --git a/charts/composer/values.yaml b/charts/composer/values.yaml index bd239b779..dedf8f58e 100644 --- a/charts/composer/values.yaml +++ b/charts/composer/values.yaml @@ -14,6 +14,7 @@ images: config: logLevel: "debug" maxBytesPerBundle: 200000 + maxBundleSize: 200000 bundleQueueCapacity: 40000 maxSubmitInterval: 2000 sequencerAddressPrefix: astria @@ -25,9 +26,7 @@ config: secret: filename: "key.hex" resourceName: "projects/$PROJECT_ID/secrets/sequencerPrivateKey/versions/latest" - rollups: - - name: "astria" - wsRpc: "ws://" + rollupName: "astria" otel: enabled: false @@ -72,4 +71,4 @@ resources: ports: grpc: 50052 healthApi: 2450 - metrics: 6060 + metrics: 6060 \ No newline at end of file diff --git a/charts/evm-rollup/templates/_helpers.tpl b/charts/evm-rollup/templates/_helpers.tpl index be8aa4af2..0a34c9a72 100644 --- a/charts/evm-rollup/templates/_helpers.tpl +++ b/charts/evm-rollup/templates/_helpers.tpl @@ -52,8 +52,7 @@ The log level represented as a number Full image paths for Astria built images */}} {{- define "rollup.image" -}} -{{ .Values.images.geth.repo }}:{{ if .Values.images.geth.overrideTag }}{{ .Values.images.geth.overrideTag }}{{ else }}{{ if .Values.global.dev }}{{ .Values.images.geth.devTag }}{{ else }}{{ .Values.images.geth.tag }}{{ end }} -{{- end }} +{{ .Values.images.geth.repo }}:{{ if .Values.images.geth.overrideTag }}{{ .Values.images.geth.overrideTag }}{{ else if .Values.global.dev }}{{ .Values.images.geth.devTag }}{{ else }}{{ .Values.images.geth.tag }}{{ end }} {{- end }} {{- define "conductor.image" -}} {{ .Values.images.conductor.repo }}:{{ if .Values.global.dev }}{{ .Values.images.conductor.devTag }}{{ else }}{{ .Values.images.conductor.tag }}{{ end }} diff --git a/charts/evm-rollup/templates/service.yaml b/charts/evm-rollup/templates/service.yaml index a3c3ce26f..eee3a07c5 100644 --- a/charts/evm-rollup/templates/service.yaml +++ b/charts/evm-rollup/templates/service.yaml @@ -13,6 +13,9 @@ spec: - name: ws-rpc-svc port: {{ .Values.ports.wsRPC }} targetPort: ws-rpc + - name: exec-grpc-svc + port: {{ .Values.ports.executionGRPC }} + targetPort: execution-grpc --- {{- if .Values.metrics.enabled }} kind: Service diff --git a/charts/evm-rollup/values.yaml b/charts/evm-rollup/values.yaml index 89659c997..bf71b70fd 100644 --- a/charts/evm-rollup/values.yaml +++ b/charts/evm-rollup/values.yaml @@ -11,9 +11,8 @@ images: repo: ghcr.io/astriaorg/astria-geth tag: 0.14.0 devTag: latest - overrideTag: "" conductor: - repo: ghcr.io/astriaorg/conductor + repo: astria-conductor tag: "0.20.0" devTag: latest @@ -89,6 +88,10 @@ genesis: value: balance: "0" code: "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3" + - address: "0xC873fC6685abF295cc537811C234B2B3aD54Af42" + value: + balance: "1000000000000000000000" + # Example of simple genesis account funding # - address: "0xaC21B97d35Bf75A7dAb16f35b111a50e78A72F30" # value: @@ -268,6 +271,24 @@ ingress: # - secretName: chart-example-tls # hosts: # - chart-example.local +# exec-api: +# enabled: true +# hosts: +# - exec-executor.{{ include "rollup.name" . }}.{{ .Values.ingress.hostname }} +# path: / +# pathType: Prefix +# service: +# name: '{{ include "rollup.name" . }}-evm-service' +# port: +# name: exec-grpc-svc +# annotations: {} +# # kubernetes.io/ingress.class: nginx +# # kubernetes.io/tls-acme: "true" +# labels: {} +# tls: {} +# # - secretName: chart-example-tls +# # hosts: +# # - chart-example.local # Default persistent storage values # NOTE - `rollupName` will be used with `persistentVolumeName` to generate names for kubernetes resources. diff --git a/charts/evm-stack/values.yaml b/charts/evm-stack/values.yaml index 6aba3ae14..01b386495 100644 --- a/charts/evm-stack/values.yaml +++ b/charts/evm-stack/values.yaml @@ -39,9 +39,6 @@ evm-rollup: otlpHeaders: "{{ .Values.global.otel.otlpHeaders }}" traceHeaders: "{{ .Values.global.otel.traceHeaders }}" -celestia-node: - enabled: false - composer: enabled: false config: @@ -59,6 +56,10 @@ composer: otlpHeaders: "{{ .Values.global.otel.otlpHeaders }}" traceHeaders: "{{ .Values.global.otel.traceHeaders }}" + +celestia-node: + enabled: false + evm-faucet: enabled: false config: diff --git a/charts/sequencer/files/cometbft/config/genesis.json b/charts/sequencer/files/cometbft/config/genesis.json index 8caf12aa3..8046c4453 100644 --- a/charts/sequencer/files/cometbft/config/genesis.json +++ b/charts/sequencer/files/cometbft/config/genesis.json @@ -31,7 +31,7 @@ {{- if $index }},{{- end }} { "address": {{ include "sequencer.address" $value.address }}, - "balance": {{ include "sequencer.toUint128Proto" ( toString $value.balance | replace "\"" "" ) }} + "balance": {{ include "sequencer.toUint128Proto" ( toString $value.balance | replace "\"" "" ) }} } {{- end }} ], diff --git a/charts/sequencer/templates/configmaps.yaml b/charts/sequencer/templates/configmaps.yaml index 2f49d4416..779025436 100644 --- a/charts/sequencer/templates/configmaps.yaml +++ b/charts/sequencer/templates/configmaps.yaml @@ -64,6 +64,8 @@ data: ASTRIA_SEQUENCER_METRICS_HTTP_LISTENER_ADDR: "0.0.0.0:{{ .Values.ports.sequencerMetrics }}" ASTRIA_SEQUENCER_FORCE_STDOUT: "{{ .Values.global.useTTY }}" ASTRIA_SEQUENCER_PRETTY_PRINT: "{{ .Values.global.useTTY }}" + ASTRIA_SEQUENCER_COMPOSER_HOOK: "http://composer-service.{{ default .Release.Namespace .Values.global.namespaceOverride }}.svc.cluster.local:50052" + ASTRIA_SEQUENCER_COMPOSER_HOOK_ENABLED: "{{ .Values.sequencer.composerHook.enabled}}" NO_COLOR: "{{ .Values.global.useTTY }}" ASTRIA_SEQUENCER_NO_OTEL: "{{ not .Values.sequencer.otel.enabled }}" OTEL_EXPORTER_OTLP_ENDPOINT: "{{ .Values.sequencer.otel.endpoint }}" diff --git a/charts/sequencer/values.yaml b/charts/sequencer/values.yaml index 89dfb9acc..85f5b4696 100644 --- a/charts/sequencer/values.yaml +++ b/charts/sequencer/values.yaml @@ -107,8 +107,6 @@ cometbft: # List of seeds to connect to seeds: [] - # List of nodes to keep persistent connections to - persistentPeers: [] # List of node IDs, to which a connection will be (re)established ignoring any existing limits unconditionalPeers: [] # Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) diff --git a/crates/astria-composer/Cargo.toml b/crates/astria-composer/Cargo.toml index 9dad92be7..26536fb30 100644 --- a/crates/astria-composer/Cargo.toml +++ b/crates/astria-composer/Cargo.toml @@ -13,7 +13,11 @@ name = "astria-composer" [dependencies] astria-build-info = { path = "../astria-build-info", features = ["runtime"] } -astria-core = { path = "../astria-core", features = ["serde", "server"] } +astria-core = { path = "../astria-core", features = [ + "client", + "serde", + "server", +] } astria-eyre = { path = "../astria-eyre" } config = { package = "astria-config", path = "../astria-config" } telemetry = { package = "astria-telemetry", path = "../astria-telemetry", features = [ @@ -53,6 +57,9 @@ tracing = { workspace = true, features = ["attributes"] } tryhard = { workspace = true } tonic = { workspace = true } tokio-stream = { workspace = true, features = ["net"] } +pbjson-types = { workspace = true } +bytes = { workspace = true } +astria-grpc-mock = { path = "../astria-grpc-mock" } [dependencies.sequencer-client] package = "astria-sequencer-client" @@ -63,15 +70,16 @@ features = ["http"] config = { package = "astria-config", path = "../astria-config", features = [ "tests", ] } + test_utils = { package = "astria-test-utils", path = "../astria-test-utils", features = [ "geth", ] } insta = { workspace = true, features = ["json"] } tempfile = { workspace = true } tokio-test = { workspace = true } -astria-core = { path = "../astria-core", features = ["client"] } tendermint-rpc = { workspace = true } wiremock = { workspace = true } +base64 = { workspace = true } [build-dependencies] astria-build-info = { path = "../astria-build-info", features = ["build"] } diff --git a/crates/astria-composer/justfile b/crates/astria-composer/justfile index e5c9ef965..fbb5671b6 100644 --- a/crates/astria-composer/justfile +++ b/crates/astria-composer/justfile @@ -1,3 +1,4 @@ + default: @just --list diff --git a/crates/astria-composer/local.env.example b/crates/astria-composer/local.env.example index 8936089de..78ec68a81 100644 --- a/crates/astria-composer/local.env.example +++ b/crates/astria-composer/local.env.example @@ -33,7 +33,11 @@ ASTRIA_COMPOSER_SEQUENCER_CHAIN_ID="astria-dev-1" # A list of execution `::,::`. # Rollup names are not case sensitive. If a name is repeated, the last list item is used. # names are sha256 hashed and used as the `rollup_id` in `SequenceAction`s -ASTRIA_COMPOSER_ROLLUPS="astriachain::ws://127.0.0.1:8545" +ASTRIA_COMPOSER_ROLLUP="" + +# The websocket url of the Rollup from which we can stream transactions using the +# getPendingTransactions API. +ASTRIA_COMPOSER_ROLLUP_WEBSOCKET_URL="" # The path to the file storing the private key for the sequencer account used for signing # transactions. The file should contain a hex-encoded Ed25519 secret key. @@ -53,6 +57,10 @@ ASTRIA_COMPOSER_MAX_SUBMIT_INTERVAL_MS=2000 # key and nonce bytes ASTRIA_COMPOSER_MAX_BYTES_PER_BUNDLE=200000 +# Max size of a bundle of sequence actions in bytes which can fit into a block. +# This is the sum of the sizes of all the `SequenceAction`s. +ASTRIA_COMPOSER_MAX_BUNDLE_SIZE=200000 + # Max amount of finished bundles that can be in the submission queue. # ASTRIA_COMPOSER_BUNDLE_QUEUE_CAPACITY * ASTRIA_COMPOSER_MAX_BYTES_PER_BUNDLE (e.g. # 40000 * 200KB=8GB) is the limit on how much memory the finished bundle queue can consume. @@ -72,6 +80,9 @@ ASTRIA_COMPOSER_GRPC_ADDR="0.0.0.0:0" # The asset to use for paying for transactions submitted to sequencer. ASTRIA_COMPOSER_FEE_ASSET="nria" +# The url of the execution api server +ASTRIA_COMPOSER_EXECUTION_API_URL="0.0.0.0:5031" + # The OTEL specific config options follow the OpenTelemetry Protocol Exporter v1 # specification as defined here: # https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/protocol/exporter.md diff --git a/crates/astria-composer/src/composer.rs b/crates/astria-composer/src/composer.rs index a4b82f27b..babc3d447 100644 --- a/crates/astria-composer/src/composer.rs +++ b/crates/astria-composer/src/composer.rs @@ -16,7 +16,10 @@ use tokio::{ signal, SignalKind, }, - sync::watch, + sync::{ + mpsc, + watch, + }, task::{ JoinError, JoinHandle, @@ -47,6 +50,11 @@ use crate::{ grpc, grpc::GrpcServer, metrics::Metrics, + sequencer_hooks::{ + FinalizedHashInfo, + OptimisticBlockInfo, + SequencerHooks, + }, Config, }; @@ -123,6 +131,11 @@ impl Composer { let (composer_status_sender, _) = watch::channel(Status::default()); let shutdown_token = CancellationToken::new(); + let (filtered_sequencer_block_sender, filtered_sequencer_block_receiver) = + mpsc::channel::(1000); + let (finalized_hash_sender, finalized_hash_receiver) = + mpsc::channel::(1000); + let (executor, executor_handle) = executor::Builder { sequencer_url: cfg.sequencer_url.clone(), sequencer_chain_id: cfg.sequencer_chain_id.clone(), @@ -131,7 +144,14 @@ impl Composer { block_time_ms: cfg.block_time_ms, max_bytes_per_bundle: cfg.max_bytes_per_bundle, bundle_queue_capacity: cfg.bundle_queue_capacity, + execution_api_url: cfg.execution_api_url.clone(), + fee_asset: cfg.fee_asset.clone(), + chain_name: cfg.rollup.clone(), + max_bundle_size: cfg.max_bundle_size, shutdown_token: shutdown_token.clone(), + // TODO - rename these?? + filtered_block_receiver: filtered_sequencer_block_receiver, + finalized_block_hash_receiver: finalized_hash_receiver, metrics, } .build() @@ -143,6 +163,10 @@ impl Composer { shutdown_token: shutdown_token.clone(), metrics, fee_asset: cfg.fee_asset.clone(), + sequencer_hooks: SequencerHooks::new( + filtered_sequencer_block_sender, + finalized_hash_sender, + ), } .build() .await @@ -160,7 +184,11 @@ impl Composer { "API server listening" ); - let rollups = cfg.parse_rollups()?; + // TODO - we don't need a map here, we can just use a single collector. This is done to + // get things working for now. we need to clean up later + let mut rollups = HashMap::new(); + rollups.insert(cfg.rollup.clone(), cfg.rollup_websocket_url.clone()); + let geth_collectors = rollups .iter() .map(|(rollup_name, url)| { @@ -176,6 +204,7 @@ impl Composer { (rollup_name.clone(), collector) }) .collect::>(); + let geth_collector_statuses: HashMap> = geth_collectors .iter() diff --git a/crates/astria-composer/src/config.rs b/crates/astria-composer/src/config.rs index c8e125d96..586db264a 100644 --- a/crates/astria-composer/src/config.rs +++ b/crates/astria-composer/src/config.rs @@ -1,18 +1,10 @@ -use std::{ - collections::HashMap, - net::SocketAddr, -}; +use std::net::SocketAddr; use serde::{ Deserialize, Serialize, }; -use crate::rollup::{ - ParseError, - Rollup, -}; - // this is a config, may have many boolean values #[allow(clippy::struct_excessive_bools)] #[derive(Debug, Deserialize, Serialize)] @@ -30,8 +22,11 @@ pub struct Config { /// The chain ID of the sequencer chain pub sequencer_chain_id: String, - /// A list of `::` pairs - pub rollups: String, + /// The rollup name + pub rollup: String, + + /// The URL of the websocket server for the rollup chain + pub rollup_websocket_url: String, /// Path to private key for the sequencer account used for signing transactions pub private_key_file: String, @@ -70,21 +65,12 @@ pub struct Config { /// The IBC asset to pay for transactions submiited to the sequencer. pub fee_asset: astria_core::primitive::v1::asset::Denom, -} -impl Config { - /// Returns a map of rollup names to rollup URLs. - /// - /// # Errors - /// - /// Returns an error if parsing fails. - pub fn parse_rollups(&self) -> Result, ParseError> { - self.rollups - .split(',') - .filter(|s| !s.is_empty()) - .map(|s| Rollup::parse(s).map(Rollup::into_parts)) - .collect::, _>>() - } + /// The URL of the execution API server + pub execution_api_url: String, + + /// The maximum possible size of a bundle + pub max_bundle_size: usize, } impl config::Config for Config { diff --git a/crates/astria-composer/src/executor/builder.rs b/crates/astria-composer/src/executor/builder.rs index 871a4f1b3..65c874ab3 100644 --- a/crates/astria-composer/src/executor/builder.rs +++ b/crates/astria-composer/src/executor/builder.rs @@ -5,8 +5,13 @@ use std::{ }; use astria_core::{ + self, crypto::SigningKey, - primitive::v1::Address, + primitive::v1::{ + asset, + Address, + RollupId, + }, protocol::transaction::v1alpha1::action::SequenceAction, }; use astria_eyre::eyre::{ @@ -14,13 +19,24 @@ use astria_eyre::eyre::{ eyre, WrapErr as _, }; -use tokio::sync::watch; +use tokio::sync::{ + mpsc, + watch, +}; use tokio_util::sync::CancellationToken; +use tracing::info; use crate::{ executor, - executor::Status, + executor::{ + simulator::BundleSimulator, + Status, + }, metrics::Metrics, + sequencer_hooks::{ + FinalizedHashInfo, + OptimisticBlockInfo, + }, }; pub(crate) struct Builder { @@ -32,6 +48,12 @@ pub(crate) struct Builder { pub(crate) max_bytes_per_bundle: usize, pub(crate) bundle_queue_capacity: usize, pub(crate) shutdown_token: CancellationToken, + pub(crate) execution_api_url: String, + pub(crate) chain_name: String, + pub(crate) fee_asset: asset::Denom, + pub(crate) max_bundle_size: usize, + pub(crate) filtered_block_receiver: mpsc::Receiver, + pub(crate) finalized_block_hash_receiver: mpsc::Receiver, pub(crate) metrics: &'static Metrics, } @@ -46,6 +68,12 @@ impl Builder { max_bytes_per_bundle, bundle_queue_capacity, shutdown_token, + execution_api_url, + chain_name, + fee_asset, + max_bundle_size, + filtered_block_receiver, + finalized_block_hash_receiver, metrics, } = self; let sequencer_client = sequencer_client::HttpClient::new(sequencer_url.as_str()) @@ -65,6 +93,13 @@ impl Builder { let (serialized_rollup_transaction_tx, serialized_rollup_transaction_rx) = tokio::sync::mpsc::channel::(256); + let rollup_id = RollupId::from_unhashed_bytes(&chain_name); + info!( + rollup_name = %chain_name, + rollup_id = %rollup_id, + "created new geth collector for rollup", + ); + Ok(( super::Executor { status, @@ -76,7 +111,14 @@ impl Builder { block_time: Duration::from_millis(block_time_ms), max_bytes_per_bundle, bundle_queue_capacity, + bundle_simulator: BundleSimulator::new(execution_api_url.as_str()) + .wrap_err("failed constructing bundle simulator")?, shutdown_token, + rollup_id, + fee_asset, + max_bundle_size, + filtered_block_receiver, + finalized_block_hash_receiver, metrics, }, executor::Handle::new(serialized_rollup_transaction_tx), diff --git a/crates/astria-composer/src/executor/bundle_factory/mod.rs b/crates/astria-composer/src/executor/bundle_factory/mod.rs index 58a3e5a9b..e64d060b8 100644 --- a/crates/astria-composer/src/executor/bundle_factory/mod.rs +++ b/crates/astria-composer/src/executor/bundle_factory/mod.rs @@ -26,7 +26,7 @@ use tracing::trace; mod tests; #[derive(Debug, thiserror::Error)] -enum SizedBundleError { +pub(super) enum SizedBundleError { #[error("bundle does not have enough space left for the given sequence action")] NotEnoughSpace(SequenceAction), #[error("sequence action is larger than the max bundle size")] @@ -65,7 +65,7 @@ pub(super) struct SizedBundle { impl SizedBundle { /// Create a new empty bundle with the given max size. - fn new(max_size: usize) -> Self { + pub(super) fn new(max_size: usize) -> Self { Self { buffer: vec![], curr_size: 0, @@ -78,7 +78,7 @@ impl SizedBundle { /// # Errors /// - `seq_action` is beyond the max size allowed for the entire bundle /// - `seq_action` does not fit in the remaining space in the bundle - fn try_push(&mut self, seq_action: SequenceAction) -> Result<(), SizedBundleError> { + pub(super) fn try_push(&mut self, seq_action: SequenceAction) -> Result<(), SizedBundleError> { let seq_action_size = encoded_len(&seq_action); if seq_action_size > self.max_size { @@ -102,7 +102,7 @@ impl SizedBundle { } /// Replace self with a new empty bundle, returning the old bundle. - fn flush(&mut self) -> SizedBundle { + pub(super) fn flush(&mut self) -> SizedBundle { mem::replace(self, Self::new(self.max_size)) } @@ -234,6 +234,7 @@ impl BundleFactory { /// /// The bundle is only removed from the factory on calling [`NextFinishedBundle::pop`]. /// This method primarily exists to work around async cancellation. + #[allow(dead_code)] pub(super) fn next_finished(&mut self) -> Option { if self.finished.is_empty() { None @@ -259,11 +260,13 @@ impl BundleFactory { } } +#[allow(dead_code)] pub(super) struct NextFinishedBundle<'a> { bundle_factory: &'a mut BundleFactory, } impl<'a> NextFinishedBundle<'a> { + #[allow(dead_code)] pub(super) fn pop(self) -> SizedBundle { self.bundle_factory .finished diff --git a/crates/astria-composer/src/executor/client.rs b/crates/astria-composer/src/executor/client.rs new file mode 100644 index 000000000..74a81c8e9 --- /dev/null +++ b/crates/astria-composer/src/executor/client.rs @@ -0,0 +1,306 @@ +use std::time::Duration; + +use astria_core::{ + execution::v1alpha2::{ + CommitmentState, + ExecuteBlockResponse, + }, + generated::{ + execution::{ + v1alpha2 as raw, + v1alpha2::execution_service_client::ExecutionServiceClient, + }, + sequencerblock::v1alpha1::RollupData, + }, + Protobuf as _, +}; +use astria_eyre::eyre::{ + self, + WrapErr as _, +}; +use bytes::Bytes; +use pbjson_types::Timestamp; +use tonic::transport::{ + Channel, + Endpoint, + Uri, +}; +use tracing::{ + instrument, + warn, + Instrument, + Span, +}; +use tryhard::{ + backoff_strategies::BackoffStrategy, + RetryPolicy, +}; + +/// A newtype wrapper around [`ExecutionServiceClient`] to work with +/// idiomatic types. +#[derive(Clone)] +pub(crate) struct Client { + uri: Uri, + inner: ExecutionServiceClient, +} + +impl Client { + pub(crate) fn connect_lazy(uri: &str) -> eyre::Result { + let uri: Uri = uri + .parse() + .wrap_err("failed to parse provided string as uri")?; + let endpoint = Endpoint::from(uri.clone()).connect_lazy(); + let inner = ExecutionServiceClient::new(endpoint); + Ok(Self { + uri, + inner, + }) + } + + pub(crate) fn uri(&self) -> String { + self.uri.to_string() + } + + /// Calls remote procedure `astria.execution.v1alpha2.ExecuteBlock` + /// + /// # Arguments + /// + /// * `prev_block_hash` - Block hash of the parent block + /// * `transactions` - List of transactions extracted from the sequencer block + /// * `timestamp` - Optional timestamp of the sequencer block + #[instrument(skip_all, fields(uri = %self.uri), err)] + pub(super) async fn execute_block_with_retry( + &self, + prev_block_hash: Bytes, + transactions: Vec>, + timestamp: Timestamp, + simulate_only: bool, + ) -> eyre::Result { + use prost::Message; + + let transactions = transactions + .into_iter() + .map(|tx| RollupData::decode(tx.as_slice())) + .collect::>() + .wrap_err("failed to decode tx bytes as RollupData")?; + + let request = raw::ExecuteBlockRequest { + prev_block_hash, + transactions, + timestamp: Some(timestamp), + simulate_only, + }; + let response = tryhard::retry_fn(|| { + let mut client = self.inner.clone(); + let request = request.clone(); + async move { client.execute_block(request).await } + }) + .with_config(retry_config()) + .in_current_span() + .await + .wrap_err( + "failed to execute astria.execution.v1alpha2.ExecuteBlock RPC because of gRPC status \ + code or because number of retries were exhausted", + )? + .into_inner(); + let execute_block_response = ExecuteBlockResponse::try_from_raw(response) + .wrap_err("failed converting raw response to validated execute block response")?; + + Ok(execute_block_response) + } + + /// Calls remote procedure `astria.execution.v1alpha2.GetCommitmentState` + #[instrument(skip_all, fields(uri = %self.uri), err)] + pub(crate) async fn get_commitment_state_with_retry(&self) -> eyre::Result { + let response = tryhard::retry_fn(|| { + let mut client = self.inner.clone(); + async move { + let request = raw::GetCommitmentStateRequest {}; + client.get_commitment_state(request).await + } + }) + .with_config(retry_config()) + .in_current_span() + .await + .wrap_err( + "failed to execute astria.execution.v1alpha2.GetCommitmentState RPC because of gRPC \ + status code or because number of retries were exhausted", + )? + .into_inner(); + let commitment_state = CommitmentState::try_from_raw(response) + .wrap_err("failed converting raw response to validated commitment state")?; + Ok(commitment_state) + } +} + +struct OnRetry { + parent: Span, +} + +impl tryhard::OnRetry for OnRetry { + type Future = futures::future::Ready<()>; + + fn on_retry( + &mut self, + attempt: u32, + next_delay: Option, + previous_error: &tonic::Status, + ) -> Self::Future { + let wait_duration = next_delay + .map(humantime::format_duration) + .map(tracing::field::display); + warn!( + parent: self.parent.id(), + attempt, + wait_duration, + error = previous_error as &dyn std::error::Error, + "failed executing RPC; retrying after after backoff" + ); + futures::future::ready(()) + } +} + +fn retry_config() -> tryhard::RetryFutureConfig { + tryhard::RetryFutureConfig::new(u32::MAX) + .custom_backoff(ExecutionApiRetryStrategy { + delay: Duration::from_millis(100), + }) + // XXX: This should probably be configurable. + .max_delay(Duration::from_secs(10)) + .on_retry(OnRetry { + parent: Span::current(), + }) +} + +/// An exponential retry strategy branching on [`tonic::Status::code`]. +/// +/// This retry strategy behaves exactly like +/// [`tryhard::backoff_strategies::ExponentialBackoff`] but is specialized to +/// work with [`tonic::Status`]. +/// +/// Execution will be retried under the following conditions: +/// +/// ```text +/// Code::Cancelled +/// Code::Unknown +/// Code::DeadlineExceeded +/// Code::NotFound +/// Code::ResourceExhausted +/// Code::Aborted +/// Code::Unavailable +/// ``` +struct ExecutionApiRetryStrategy { + delay: Duration, +} + +impl<'a> BackoffStrategy<'a, tonic::Status> for ExecutionApiRetryStrategy { + type Output = RetryPolicy; + + fn delay(&mut self, _attempt: u32, error: &'a tonic::Status) -> Self::Output { + if should_retry(error) { + let prev_delay = self.delay; + self.delay = self.delay.saturating_mul(2); + RetryPolicy::Delay(prev_delay) + } else { + RetryPolicy::Break + } + } +} + +fn should_retry(status: &tonic::Status) -> bool { + use tonic::Code; + // gRPC return codes and if they should be retried. Also refer to + // [1] https://github.com/grpc/grpc/blob/1309eb283c3e11c471191f286ceab01b75477ffc/doc/statuscodes.md + // + // Code::Ok => no, success + // Code::Cancelled => yes, but should be revisited if "we" would cancel + // Code::Unknown => yes, could this be returned if the endpoint is unavailable? + // Code::InvalidArgument => no, no point retrying + // Code::DeadlineExceeded => yes, server might be slow + // Code::NotFound => yes, resource might not yet be available + // Code::AlreadyExists => no, no point retrying + // Code::PermissionDenied => no, execution API uses permission-denied restart-trigger + // Code::ResourceExhausted => yes, retry after a while + // Code::FailedPrecondition => no, failed precondition should not be retried unless the + // precondition is fixed, see [1] + // Code::Aborted => yes, although this applies to a read-modify-write sequence. We should + // implement this not per-request but per-request-sequence (for example, + // execute + update-commitment-state) + // Code::OutOfRange => no, we don't expect to send any out-of-range requests. + // Code::Unimplemented => no, no point retrying + // Code::Internal => no, this is a serious error on the backend; don't retry + // Code::Unavailable => yes, retry after backoff is desired + // Code::DataLoss => no, unclear how this would happen, but sounds very terminal + // Code::Unauthenticated => no, this status code will likely not change after retrying + matches!( + status.code(), + Code::Cancelled + | Code::Unknown + | Code::DeadlineExceeded + | Code::NotFound + | Code::ResourceExhausted + | Code::Aborted + | Code::Unavailable + ) +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use tonic::{ + Code, + Status, + }; + + use super::{ + BackoffStrategy as _, + ExecutionApiRetryStrategy, + RetryPolicy, + }; + + #[track_caller] + fn assert_retry_policy(code: Code) { + let mut strat = ExecutionApiRetryStrategy { + delay: Duration::from_secs(1), + }; + let status = Status::new(code, ""); + let actual = strat.delay(1, &status); + if SHOULD_RETRY { + let expected = RetryPolicy::Delay(Duration::from_secs(1)); + assert_eq!( + expected, actual, + "gRPC code `{code}` should lead to retry, but instead gave break" + ); + } else { + let expected = RetryPolicy::Break; + assert_eq!( + expected, actual, + "gRPC code `{code}` should lead to break, but instead gave delay" + ); + } + } + + #[test] + fn status_codes_lead_to_expected_retry_policy() { + const SHOULD_RETRY: bool = true; + const SHOULD_BREAK: bool = false; + assert_retry_policy::(Code::Ok); + assert_retry_policy::(Code::Cancelled); + assert_retry_policy::(Code::Unknown); + assert_retry_policy::(Code::InvalidArgument); + assert_retry_policy::(Code::DeadlineExceeded); + assert_retry_policy::(Code::NotFound); + assert_retry_policy::(Code::AlreadyExists); + assert_retry_policy::(Code::PermissionDenied); + assert_retry_policy::(Code::ResourceExhausted); + assert_retry_policy::(Code::FailedPrecondition); + assert_retry_policy::(Code::Aborted); + assert_retry_policy::(Code::OutOfRange); + assert_retry_policy::(Code::Unimplemented); + assert_retry_policy::(Code::Internal); + assert_retry_policy::(Code::Unavailable); + assert_retry_policy::(Code::DataLoss); + assert_retry_policy::(Code::Unauthenticated); + } +} diff --git a/crates/astria-composer/src/executor/mod.rs b/crates/astria-composer/src/executor/mod.rs index a873119d7..0e5f098cf 100644 --- a/crates/astria-composer/src/executor/mod.rs +++ b/crates/astria-composer/src/executor/mod.rs @@ -1,8 +1,3 @@ -/// ! The `Executor` is responsible for: -/// - Nonce management -/// - Transaction signing -/// - Managing the connection to the sequencer -/// - Submitting transactions to the sequencer use std::{ collections::VecDeque, pin::Pin, @@ -10,8 +5,19 @@ use std::{ time::Duration, }; +/// ! The `Executor` is responsible for: +/// - Nonce management +/// - Transaction signing +/// - Managing the connection to the sequencer +/// - Submitting transactions to the sequencer +use astria_core::sequencerblock::v1alpha1::block::RollupData; use astria_core::{ crypto::SigningKey, + generated as raw, + primitive::v1::{ + asset, + RollupId, + }, protocol::{ abci::AbciErrorCode, transaction::v1alpha1::{ @@ -26,9 +32,9 @@ use astria_eyre::eyre::{ self, WrapErr as _, }; +use bytes::Bytes; use futures::{ future::{ - self, Fuse, FusedFuture as _, FutureExt as _, @@ -91,11 +97,24 @@ use crate::{ mod bundle_factory; pub(crate) mod builder; +mod client; +mod simulator; #[cfg(test)] mod tests; pub(crate) use builder::Builder; +use crate::{ + executor::simulator::{ + BundleSimulationResult, + BundleSimulator, + }, + sequencer_hooks::{ + FinalizedHashInfo, + OptimisticBlockInfo, + }, +}; + // Duration to wait for the executor to drain all the remaining bundles before shutting down. // This is 16s because the timeout for the higher level executor task is 17s to shut down. // The extra second is to prevent the higher level executor task from timing out before the @@ -140,6 +159,16 @@ pub(super) struct Executor { bundle_queue_capacity: usize, // Token to signal the executor to stop upon shutdown. shutdown_token: CancellationToken, + // `BundleSimulator` simulates the execution of a bundle of transactions. + bundle_simulator: BundleSimulator, + // The rollup id associated with this executor + rollup_id: RollupId, + // The asset used for sequencer fees + fee_asset: asset::Denom, + // The maximum possible size for a bundle so that it can fit into a block + max_bundle_size: usize, + filtered_block_receiver: mpsc::Receiver, + finalized_block_hash_receiver: mpsc::Receiver, metrics: &'static Metrics, } @@ -190,6 +219,83 @@ impl Executor { self.status.subscribe() } + #[instrument(skip_all)] + // TODO - improve the func args and the return type + async fn simulate_bundle( + &self, + bundle: SizedBundle, + parent_block: Vec, + time: pbjson_types::Timestamp, + ) -> eyre::Result<(SizedBundle, BundleSimulationResult)> { + let filtered_sequence_actions: Vec = parent_block + .iter() + .filter(|action| action.rollup_id == self.rollup_id) + .cloned() + .collect(); + let mut parent_block_rollup_data_items = vec![]; + for seq_action in filtered_sequence_actions { + let rollup_data = RollupData::SequencedData(seq_action.data); + parent_block_rollup_data_items.push(rollup_data); + } + + let bundle_simulator = self.bundle_simulator.clone(); + + info!("Creating the parent block for simulating the bundle"); + let parent_bundle_simulation_result = bundle_simulator + .clone() + .create_parent_block(parent_block_rollup_data_items, time) + .await + .wrap_err("failed to simulate bundle")?; + info!("Created the parent block!"); + + // now we have the parent block hash, we should simulate the bundle on top of the parent + // hash + info!("Simulating the bundle on top of the created parent block!"); + let bundle_simulation_result = bundle_simulator + .clone() + .simulate_bundle_on_block( + bundle, + parent_bundle_simulation_result.block().clone(), + None, + ) + .await + .wrap_err("failed to simulate bundle")?; + info!("Simulation done!"); + + let rollup_data_items: Vec = + bundle_simulation_result + .included_actions() + .iter() + .map(astria_core::Protobuf::to_raw) + .collect(); + + info!("Creating a Builder Bundle Packet"); + let builder_bundle = raw::composer::v1alpha1::BuilderBundle { + transactions: rollup_data_items, + parent_hash: bundle_simulation_result.parent_hash(), + }; + + let builder_bundle_packet = raw::composer::v1alpha1::BuilderBundlePacket { + bundle: Some(builder_bundle), + signature: Bytes::from(vec![]), + }; + let encoded_builder_bundle_packet = builder_bundle_packet.encode_to_vec(); + + info!("Created builder bundle packet!"); + // we can give the BuilderBundlePacket the highest bundle max size possible + // since this is the only sequence action we are sending + let mut final_bundle = SizedBundle::new(self.max_bundle_size); + final_bundle + .try_push(SequenceAction { + rollup_id: self.rollup_id, + data: encoded_builder_bundle_packet.into(), + fee_asset: self.fee_asset.clone(), + }) + .wrap_err("couldn't push sequence action to bundle")?; + + Ok((final_bundle, bundle_simulation_result)) + } + /// Create a future to submit a bundle to the sequencer. #[instrument(skip_all, fields(nonce.initial = %nonce))] fn submit_bundle( @@ -232,8 +338,6 @@ impl Executor { self.metrics.set_current_nonce(nonce); - self.status.send_modify(|status| status.is_connected = true); - let block_timer = time::sleep(self.block_time); tokio::pin!(block_timer); let mut bundle_factory = @@ -245,6 +349,11 @@ impl Executor { .expect("block_time should not be large enough to cause an overflow") }; + self.status.send_modify(|status| status.is_connected = true); + + let mut current_finalized_block_hash: Option = None; + let mut pending_builder_bundle_packet: Option = None; + let reason = loop { select! { biased; @@ -259,10 +368,39 @@ impl Executor { }; } - Some(next_bundle) = future::ready(bundle_factory.next_finished()), if submission_fut.is_terminated() => { - let bundle = next_bundle.pop(); + // from process_proposal + Some(filtered_sequencer_block) = self.filtered_block_receiver.recv() => { + // we need to simulate the bundle + // and cache the simulated bundle and the block_hash + let bundle = bundle_factory.pop_now(); if !bundle.is_empty() { - submission_fut = self.submit_bundle(nonce, bundle, self.metrics); + info!("received {:?} sequence actions from process_proposal", filtered_sequencer_block.seq_actions().len()); + match self.simulate_bundle(bundle, filtered_sequencer_block.seq_actions(), filtered_sequencer_block.time()) + .await.wrap_err("failed to simulate bundle on top of received parent block") { + Ok(res) => { + pending_builder_bundle_packet = Some(res.0); + current_finalized_block_hash = Some(filtered_sequencer_block.block_hash().clone()); + }, + Err(e) => { + error!(%e, "failed to simulate bundle on top of received parent block"); + } + } + info!("simulation done on transactions received from process_proposal!"); + } + } + + // from finalize_block + Some(finalized_block_hash) = self.finalized_block_hash_receiver.recv(), if submission_fut.is_terminated() => { + if let Some(block_hash) = current_finalized_block_hash.take() { + if block_hash == finalized_block_hash.block_hash() { + // we can submit the pending builder bundle packet + if let Some(builder_bundle_packet) = pending_builder_bundle_packet.take() { + info!("received finalized block hash matches that of process_proposal, submitting pending builder bundle packet"); + if !builder_bundle_packet.is_empty() { + submission_fut = self.submit_bundle(nonce, builder_bundle_packet, self.metrics); + } + } + } } } @@ -270,16 +408,6 @@ impl Executor { Some(seq_action) = self.serialized_rollup_transactions.recv(), if !bundle_factory.is_full() => { self.bundle_seq_action(seq_action, &mut bundle_factory); } - - // try to preempt current bundle if the timer has ticked without submitting the next bundle - () = &mut block_timer, if submission_fut.is_terminated() => { - let bundle = bundle_factory.pop_now(); - if bundle.is_empty() { - block_timer.as_mut().reset(reset_time()); - } else { - submission_fut = self.submit_bundle(nonce, bundle, self.metrics); - } - } } }; diff --git a/crates/astria-composer/src/executor/simulator.rs b/crates/astria-composer/src/executor/simulator.rs new file mode 100644 index 000000000..ca9633b3a --- /dev/null +++ b/crates/astria-composer/src/executor/simulator.rs @@ -0,0 +1,188 @@ +use astria_core::execution::v1alpha2::Block; +/// ! `BundleSimulator` is responsible for fetching the latest rollup commitment state +/// and simulating the given bundle on top of the latest soft block. +use astria_core::{ + sequencerblock::v1alpha1::block::RollupData, + Protobuf, +}; +use astria_eyre::{ + eyre, + eyre::WrapErr as _, +}; +use bytes::Bytes; +use pbjson_types::Timestamp; +use prost::Message; +use tracing::{ + info, + instrument, +}; + +use crate::executor::{ + bundle_factory::SizedBundle, + client::Client, +}; + +#[derive(Clone)] +pub(crate) struct BundleSimulator { + execution_service_client: Client, +} + +pub(crate) struct BundleSimulationResult { + block: Block, + included_actions: Vec, + parent_hash: Bytes, +} + +impl BundleSimulationResult { + pub(crate) fn new( + included_sequence_actions: Vec, + block: Block, + parent_hash: Bytes, + ) -> Self { + Self { + block, + included_actions: included_sequence_actions, + parent_hash, + } + } + + pub(crate) fn included_actions(&self) -> &[RollupData] { + self.included_actions.as_slice() + } + + pub(crate) fn parent_hash(&self) -> Bytes { + self.parent_hash.clone() + } + + pub(crate) fn block(&self) -> &Block { + &self.block + } +} + +impl BundleSimulator { + pub(crate) fn new(execution_api_uri: &str) -> eyre::Result { + Ok(Self { + execution_service_client: Client::connect_lazy(execution_api_uri) + .wrap_err("failed to connect to execution service")?, + }) + } + + #[instrument(skip_all, fields(uri=self.execution_service_client.uri()))] + pub(crate) async fn create_parent_block( + self, + rollup_data: Vec, + time: pbjson_types::Timestamp, + ) -> eyre::Result { + // call GetCommitmentState to get the soft block + let commitment_state = self + .execution_service_client + .get_commitment_state_with_retry() + .await + .wrap_err("failed to get commitment state")?; + + let soft_block = commitment_state.soft(); + // convert the sized bundle actions to a list of Vec + let actions: Vec> = rollup_data + .iter() + .map(|action| match action.clone() { + RollupData::SequencedData(data) => data.to_vec(), + RollupData::Deposit(_) => vec![], + }) + .filter(|data| !data.is_empty()) + .collect(); + + self.inner_simulate_bundle_on_block(actions, soft_block.clone(), Some(time), false) + .await + } + + #[instrument(skip_all, fields(uri=self.execution_service_client.uri()))] + pub(crate) async fn simulate_bundle( + self, + bundle: SizedBundle, + ) -> eyre::Result { + // call GetCommitmentState to get the soft block + info!("Calling GetCommitmentState!"); + let commitment_state = self + .execution_service_client + .get_commitment_state_with_retry() + .await + .wrap_err("failed to get commitment state")?; + info!("Received CommitmentState of rollup"); + + let soft_block = commitment_state.soft(); + info!("Soft block hash is {:?}", soft_block.hash()); + + let actions = convert_bundle_to_byte_array(bundle); + + self.inner_simulate_bundle_on_block(actions, soft_block.clone(), None, true) + .await + } + + #[instrument(skip_all, fields(uri=self.execution_service_client.uri()))] + pub(crate) async fn simulate_bundle_on_block( + self, + bundle: SizedBundle, + block: Block, + timestamp: Option, + ) -> eyre::Result { + let actions = convert_bundle_to_byte_array(bundle); + self.inner_simulate_bundle_on_block(actions, block, timestamp, true) + .await + } + + #[instrument(skip_all, fields(uri=self.execution_service_client.uri()), err)] + async fn inner_simulate_bundle_on_block( + self, + bundle: Vec>, + block: Block, + timestamp: Option, + simulate_only: bool, + ) -> eyre::Result { + // convert the sized bundle actions to a list of Vec + // as long as the timestamp > parent block timestamp, the block will be successfully + // created. It doesn't matter what timestamp we use anyway since we are not going to + // commit the block to the chain. + let timestamp = timestamp.unwrap_or(Timestamp { + seconds: block.timestamp().seconds + 3, + nanos: 0, + }); + // call execute block with the bundle to get back the included transactions + let execute_block_response = self + .execution_service_client + .execute_block_with_retry( + block.hash().clone(), + bundle, + // use current timestamp + timestamp, + simulate_only, + ) + .await + .wrap_err("failed to execute block")?; + + let included_transactions = execute_block_response.included_transactions(); + info!( + "Bundle simulated on top of {:?} and {:?} transactions were included", + block.hash().clone(), + included_transactions.len() + ); + Ok(BundleSimulationResult::new( + included_transactions.to_vec(), + execute_block_response.block().clone(), + block.hash().clone(), + )) + } +} + +fn convert_bundle_to_byte_array(bundle: SizedBundle) -> Vec> { + bundle + .into_actions() + .iter() + .map(|action| match action.as_sequence() { + Some(seq_action) => RollupData::SequencedData(seq_action.clone().data) + .to_raw() + .encode_to_vec(), + None => vec![], + }) + .filter(|data| !data.is_empty()) + .collect() +} diff --git a/crates/astria-composer/src/executor/tests.rs b/crates/astria-composer/src/executor/tests.rs index bb0e4af53..9d8828f6b 100644 --- a/crates/astria-composer/src/executor/tests.rs +++ b/crates/astria-composer/src/executor/tests.rs @@ -8,8 +8,13 @@ use std::{ }; use astria_core::{ - generated::protocol::accounts::v1alpha1::NonceResponse, + generated::{ + composer::v1alpha1::BuilderBundlePacket, + protocol::accounts::v1alpha1::NonceResponse, + sequencerblock::v1alpha1 as raw_sequencer, + }, primitive::v1::{ + asset, asset::{ Denom, IbcPrefixed, @@ -18,8 +23,11 @@ use astria_core::{ ROLLUP_ID_LEN, }, protocol::transaction::v1alpha1::action::SequenceAction, + sequencerblock::v1alpha1::block::RollupData, + Protobuf, }; use astria_eyre::eyre; +use futures::future::join; use once_cell::sync::Lazy; use prost::{ bytes::Bytes, @@ -68,6 +76,16 @@ use crate::{ executor, executor::EnsureChainIdError, metrics::Metrics, + mock_grpc::{ + MockGrpc, + TestExecutor, + }, + mount_executed_block, + mount_get_commitment_state, + sequencer_hooks::{ + FinalizedHashInfo, + OptimisticBlockInfo, + }, test_utils::sequence_action_of_max_size, Config, }; @@ -80,7 +98,8 @@ static TELEMETRY: Lazy<()> = Lazy::new(|| { api_listen_addr: SocketAddr::new(IpAddr::from([0, 0, 0, 0]), 0), sequencer_url: String::new(), sequencer_chain_id: String::new(), - rollups: String::new(), + rollup: "".to_string(), + rollup_websocket_url: "".to_string(), private_key_file: String::new(), sequencer_address_prefix: String::new(), block_time_ms: 0, @@ -93,6 +112,8 @@ static TELEMETRY: Lazy<()> = Lazy::new(|| { pretty_print: false, grpc_addr: SocketAddr::new(IpAddr::from([0, 0, 0, 0]), 0), fee_asset: Denom::IbcPrefixed(IbcPrefixed::new([0; 32])), + execution_api_url: "".to_string(), + max_bundle_size: 0, }; if std::env::var_os("TEST_LOG").is_some() { let filter_directives = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".into()); @@ -111,18 +132,19 @@ static TELEMETRY: Lazy<()> = Lazy::new(|| { } }); -fn sequence_action() -> SequenceAction { +fn sequence_action(rollup_id: RollupId, fee_asset: asset::Denom) -> SequenceAction { SequenceAction { - rollup_id: RollupId::new([0; ROLLUP_ID_LEN]), - data: Bytes::new(), - fee_asset: "nria".parse().unwrap(), + rollup_id, + data: Bytes::from(vec![]), + fee_asset, } } /// Start a mock sequencer server and mount a mock for the `accounts/nonce` query. -async fn setup() -> (MockServer, Config, NamedTempFile) { +async fn setup() -> (MockServer, Config, NamedTempFile, TestExecutor) { Lazy::force(&TELEMETRY); let server = MockServer::start().await; + let execution_api_server = MockGrpc::spawn().await; let keyfile = NamedTempFile::new().unwrap(); (&keyfile) @@ -132,7 +154,8 @@ async fn setup() -> (MockServer, Config, NamedTempFile) { let cfg = Config { log: String::new(), api_listen_addr: "127.0.0.1:0".parse().unwrap(), - rollups: String::new(), + rollup: "test-chain-1".to_string(), + rollup_websocket_url: String::new(), sequencer_url: server.uri(), sequencer_chain_id: "test-chain-1".to_string(), private_key_file: keyfile.path().to_string_lossy().to_string(), @@ -146,9 +169,22 @@ async fn setup() -> (MockServer, Config, NamedTempFile) { metrics_http_listener_addr: String::new(), pretty_print: true, grpc_addr: "127.0.0.1:0".parse().unwrap(), - fee_asset: "nria".parse().unwrap(), + fee_asset: "nria" + .parse::() + .unwrap() + .to_ibc_prefixed() + .into(), + max_bundle_size: 200000, + execution_api_url: format!("http://{}", execution_api_server.local_addr), }; - (server, cfg, keyfile) + ( + server, + cfg, + keyfile, + TestExecutor { + mock_grpc: execution_api_server, + }, + ) } /// Assert that given error is of correct type and contains the expected chain IDs. @@ -322,9 +358,14 @@ async fn wait_for_startup( #[tokio::test] async fn full_bundle() { // set up the executor, channel for writing seq actions, and the sequencer mock - let (sequencer, cfg, _keyfile) = setup().await; + let (sequencer, cfg, _keyfile, test_executor) = setup().await; let shutdown_token = CancellationToken::new(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&cfg).unwrap())); + let (filtered_block_sender, filtered_block_receiver) = + tokio::sync::mpsc::channel::(10); + let (finalized_hash_sender, finalized_hash_receiver) = + tokio::sync::mpsc::channel::(10); + mount_genesis(&sequencer, &cfg.sequencer_chain_id).await; let (executor, executor_handle) = executor::Builder { sequencer_url: cfg.sequencer_url.clone(), @@ -335,11 +376,30 @@ async fn full_bundle() { max_bytes_per_bundle: cfg.max_bytes_per_bundle, bundle_queue_capacity: cfg.bundle_queue_capacity, shutdown_token: shutdown_token.clone(), + execution_api_url: cfg.execution_api_url, + chain_name: cfg.rollup.clone(), + fee_asset: cfg.fee_asset, + max_bundle_size: cfg.max_bundle_size, + filtered_block_receiver, + finalized_block_hash_receiver: finalized_hash_receiver, metrics, } .build() .unwrap(); + let rollup_id = RollupId::from_unhashed_bytes(cfg.rollup.clone()); + + let soft_parent_hash = [1; 64]; + let soft_block_number = 1; + let soft_block_hash = [2; 64]; + + mount_get_commitment_state!( + test_executor, + firm: ( number: 1, hash: [1; 64], parent: [0; 64], ), + soft: ( number: soft_block_number, hash: soft_block_hash, parent: soft_parent_hash, ), + base_celestia_height: 1, + ); + let nonce_guard = mount_default_nonce_query_mock(&sequencer).await; let status = executor.subscribe(); @@ -352,13 +412,29 @@ async fn full_bundle() { // send two sequence actions to the executor, the first of which is large enough to fill the // bundle sending the second should cause the first to immediately be submitted in // order to make space for the second - let seq0 = sequence_action_of_max_size(cfg.max_bytes_per_bundle); + let seq0 = SequenceAction { + rollup_id, + ..sequence_action_of_max_size(cfg.max_bytes_per_bundle) + }; let seq1 = SequenceAction { - rollup_id: RollupId::new([1; ROLLUP_ID_LEN]), + rollup_id, ..sequence_action_of_max_size(cfg.max_bytes_per_bundle) }; + let rollup_data: Vec = vec![seq0.clone(), seq1.clone()] + .iter() + .map(|item| RollupData::SequencedData(item.clone().data).to_raw()) + .collect(); + + let execute_block = mount_executed_block!(test_executor, + mock_name: "execute_block", + number: soft_block_number, + hash: soft_block_hash, + included_transactions: rollup_data.clone(), + parent: soft_parent_hash.to_vec(), + ); + // push both sequence actions to the executor in order to force the full bundle to be sent executor_handle .send_timeout(seq0.clone(), Duration::from_millis(1000)) @@ -372,14 +448,16 @@ async fn full_bundle() { // wait for the mock sequencer to receive the signed transaction tokio::time::timeout( Duration::from_millis(100), - response_guard.wait_until_satisfied(), + join( + response_guard.wait_until_satisfied(), + execute_block.wait_until_satisfied(), + ), ) .await .unwrap(); // verify only one signed transaction was received by the mock sequencer // i.e. only the full bundle was sent and not the second one due to the block timer - let expected_seq_actions = [seq0]; let requests = response_guard.received_requests().await; assert_eq!(requests.len(), 1); @@ -387,24 +465,45 @@ async fn full_bundle() { let signed_tx = signed_tx_from_request(&requests[0]); let actions = signed_tx.actions(); + // we send only 1 action to the sequencer which is a BuilderBundlePacket + // we first verify that the action sent to the sequencer is a builder bundle packet. + + // only 1 sequence action which is a BuilderBundlePacket is sent + assert_eq!(actions.len(), 1); + + // decode the sequence action to its BuilderBundlePacket + let seq_action = actions.iter().next().unwrap().as_sequence().unwrap(); + let proto_builder_bundle_packet = + BuilderBundlePacket::decode(&mut seq_action.data.clone()).unwrap(); + let builder_bundle_packet = astria_core::composer::v1alpha1::BuilderBundlePacket::try_from_raw( + proto_builder_bundle_packet.clone(), + ) + .unwrap(); + assert_eq!( - actions.len(), - expected_seq_actions.len(), - "received more than one action, one was supposed to fill the bundle" + builder_bundle_packet.bundle().parent_hash(), + soft_block_hash.to_vec() ); - for (action, expected_seq_action) in actions.iter().zip(expected_seq_actions.iter()) { - let seq_action = action.as_sequence().unwrap(); - assert_eq!( - seq_action.rollup_id, expected_seq_action.rollup_id, - "chain id does not match. actual {:?} expected {:?}", - seq_action.rollup_id, expected_seq_action.rollup_id - ); - assert_eq!( - seq_action.data, expected_seq_action.data, - "data does not match expected data for action with rollup_id {:?}", - seq_action.rollup_id, - ); + let bundle_txs = builder_bundle_packet.bundle().transactions(); + + // there should only be 1 sequence action in the bundle + assert_eq!(bundle_txs.len(), 1); + + assert_eq!(seq_action.fee_asset, seq0.fee_asset); + assert_eq!(seq_action.rollup_id, seq0.rollup_id); + + match bundle_txs.iter().next().unwrap() { + RollupData::SequencedData(data) => { + assert_eq!(data.clone(), seq0.data) + } + _ => { + assert!( + true, + "expected RollupData::SequencedData, but got {:?}", + bundle_txs.iter().next().unwrap() + ) + } } } @@ -413,9 +512,13 @@ async fn full_bundle() { #[tokio::test] async fn bundle_triggered_by_block_timer() { // set up the executor, channel for writing seq actions, and the sequencer mock - let (sequencer, cfg, _keyfile) = setup().await; + let (sequencer, cfg, _keyfile, test_executor) = setup().await; let shutdown_token = CancellationToken::new(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&cfg).unwrap())); + let (filtered_block_sender, filtered_block_receiver) = + tokio::sync::mpsc::channel::(10); + let (finalized_hash_sender, finalized_hash_receiver) = + tokio::sync::mpsc::channel::(10); mount_genesis(&sequencer, &cfg.sequencer_chain_id).await; let (executor, executor_handle) = executor::Builder { sequencer_url: cfg.sequencer_url.clone(), @@ -426,11 +529,19 @@ async fn bundle_triggered_by_block_timer() { max_bytes_per_bundle: cfg.max_bytes_per_bundle, bundle_queue_capacity: cfg.bundle_queue_capacity, shutdown_token: shutdown_token.clone(), + execution_api_url: cfg.execution_api_url, + chain_name: cfg.rollup.clone(), + fee_asset: cfg.fee_asset.clone(), + max_bundle_size: cfg.max_bundle_size, + filtered_block_receiver, + finalized_block_hash_receiver: finalized_hash_receiver, metrics, } .build() .unwrap(); + let rollup_id = RollupId::from_unhashed_bytes(cfg.rollup.clone()); + let nonce_guard = mount_default_nonce_query_mock(&sequencer).await; let status = executor.subscribe(); @@ -444,10 +555,34 @@ async fn bundle_triggered_by_block_timer() { // send two sequence actions to the executor, both small enough to fit in a single bundle // without filling it let seq0 = SequenceAction { - data: vec![0u8; cfg.max_bytes_per_bundle / 4].into(), - ..sequence_action() + data: Bytes::from(vec![0u8; cfg.max_bytes_per_bundle / 4]), + ..sequence_action(rollup_id.clone(), cfg.fee_asset.clone()) }; + let rollup_data: Vec = vec![seq0.clone()] + .iter() + .map(|item| RollupData::SequencedData(item.clone().data).to_raw()) + .collect(); + + let soft_parent_hash = [1; 64]; + let soft_block_number = 1; + let soft_block_hash = [2; 64]; + + mount_get_commitment_state!( + test_executor, + firm: ( number: 1, hash: [1; 64], parent: [0; 64], ), + soft: ( number: soft_block_number, hash: soft_block_hash, parent: soft_parent_hash, ), + base_celestia_height: 1, + ); + + let execute_block = mount_executed_block!(test_executor, + mock_name: "execute_block", + number: soft_block_number, + hash: soft_block_hash, + included_transactions: rollup_data.clone(), + parent: soft_parent_hash.to_vec(), + ); + // make sure at least one block has passed so that the executor will submit the bundle // despite it not being full time::pause(); @@ -461,7 +596,10 @@ async fn bundle_triggered_by_block_timer() { // wait for the mock sequencer to receive the signed transaction tokio::time::timeout( Duration::from_millis(100), - response_guard.wait_until_satisfied(), + join( + response_guard.wait_until_satisfied(), + execute_block.wait_until_satisfied(), + ), ) .await .unwrap(); @@ -475,24 +613,54 @@ async fn bundle_triggered_by_block_timer() { let signed_tx = signed_tx_from_request(&requests[0]); let actions = signed_tx.actions(); + assert_eq!(actions.len(), 1); + + let seq_action = actions.iter().next().unwrap().as_sequence().unwrap(); + let proto_builder_bundle_packet = + BuilderBundlePacket::decode(&mut seq_action.data.clone()).unwrap(); + let builder_bundle_packet = astria_core::composer::v1alpha1::BuilderBundlePacket::try_from_raw( + proto_builder_bundle_packet.clone(), + ) + .unwrap(); + + assert_eq!(builder_bundle_packet.bundle().transactions().len(), 1); + assert_eq!( - actions.len(), - expected_seq_actions.len(), - "received more than one action, one was supposed to fill the bundle" + builder_bundle_packet.bundle().parent_hash().to_vec(), + soft_block_hash.to_vec() ); - for (action, expected_seq_action) in actions.iter().zip(expected_seq_actions.iter()) { - let seq_action = action.as_sequence().unwrap(); - assert_eq!( - seq_action.rollup_id, expected_seq_action.rollup_id, - "chain id does not match. actual {:?} expected {:?}", - seq_action.rollup_id, expected_seq_action.rollup_id - ); - assert_eq!( - seq_action.data, expected_seq_action.data, - "data does not match expected data for action with rollup_id {:?}", - seq_action.rollup_id, - ); + // ensure that the seq_action of the BuilderBundlePacket and the expected sequence actions have + // the same rollup id and fee asset + + for (action, expected_action) in expected_seq_actions.iter().zip(actions) { + let expected_seq_action = expected_action.as_sequence().unwrap(); + assert_eq!(action.rollup_id, expected_seq_action.rollup_id); + assert_eq!(action.fee_asset, expected_seq_action.fee_asset); + } + + for (action, expected_seq_action) in builder_bundle_packet + .bundle() + .transactions() + .iter() + .zip(expected_seq_actions.iter()) + { + match action.clone() { + RollupData::SequencedData(data) => { + assert_eq!( + data, expected_seq_action.data, + "data does not match expected data for action with rollup_id {:?}", + expected_seq_action.rollup_id + ) + } + _ => { + assert!( + true, + "expected RollupData::SequencedData, but got {:?}", + action + ) + } + } } } @@ -501,10 +669,14 @@ async fn bundle_triggered_by_block_timer() { #[tokio::test] async fn two_seq_actions_single_bundle() { // set up the executor, channel for writing seq actions, and the sequencer mock - let (sequencer, cfg, _keyfile) = setup().await; + let (sequencer, cfg, _keyfile, test_executor) = setup().await; let shutdown_token = CancellationToken::new(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&cfg).unwrap())); mount_genesis(&sequencer, &cfg.sequencer_chain_id).await; + let (filtered_block_sender, filtered_block_receiver) = + tokio::sync::mpsc::channel::(10); + let (finalized_hash_sender, finalized_hash_receiver) = + tokio::sync::mpsc::channel::(10); let (executor, executor_handle) = executor::Builder { sequencer_url: cfg.sequencer_url.clone(), sequencer_chain_id: cfg.sequencer_chain_id.clone(), @@ -514,11 +686,19 @@ async fn two_seq_actions_single_bundle() { max_bytes_per_bundle: cfg.max_bytes_per_bundle, bundle_queue_capacity: cfg.bundle_queue_capacity, shutdown_token: shutdown_token.clone(), + execution_api_url: cfg.execution_api_url, + chain_name: cfg.rollup.clone(), + fee_asset: cfg.fee_asset.clone(), + max_bundle_size: cfg.max_bundle_size, + filtered_block_receiver, + finalized_block_hash_receiver: finalized_hash_receiver, metrics, } .build() .unwrap(); + let rollup_id = RollupId::from_unhashed_bytes(cfg.rollup.clone()); + let nonce_guard = mount_default_nonce_query_mock(&sequencer).await; let status = executor.subscribe(); let _executor_task = tokio::spawn(executor.run_until_stopped()); @@ -532,15 +712,38 @@ async fn two_seq_actions_single_bundle() { // without filling it let seq0 = SequenceAction { data: vec![0u8; cfg.max_bytes_per_bundle / 4].into(), - ..sequence_action() + ..sequence_action(rollup_id.clone(), cfg.fee_asset.clone()) }; let seq1 = SequenceAction { - rollup_id: RollupId::new([1; ROLLUP_ID_LEN]), data: vec![1u8; cfg.max_bytes_per_bundle / 4].into(), - ..sequence_action() + ..sequence_action(rollup_id.clone(), cfg.fee_asset.clone()) }; + let rollup_data: Vec = vec![seq0.clone(), seq1.clone()] + .iter() + .map(|item| RollupData::SequencedData(item.clone().data).to_raw()) + .collect(); + + let soft_parent_hash = [1; 64]; + let soft_block_number = 1; + let soft_block_hash = [2; 64]; + + mount_get_commitment_state!( + test_executor, + firm: ( number: 1, hash: [1; 64], parent: [0; 64], ), + soft: ( number: soft_block_number, hash: soft_block_hash, parent: soft_parent_hash, ), + base_celestia_height: 1, + ); + + let execute_block = mount_executed_block!(test_executor, + mock_name: "execute_block", + number: soft_block_number, + hash: soft_block_hash, + included_transactions: rollup_data.clone(), + parent: soft_parent_hash.to_vec(), + ); + // make sure at least one block has passed so that the executor will submit the bundle // despite it not being full time::pause(); @@ -558,7 +761,10 @@ async fn two_seq_actions_single_bundle() { // wait for the mock sequencer to receive the signed transaction tokio::time::timeout( Duration::from_millis(100), - response_guard.wait_until_satisfied(), + join( + response_guard.wait_until_satisfied(), + execute_block.wait_until_satisfied(), + ), ) .await .unwrap(); @@ -572,24 +778,45 @@ async fn two_seq_actions_single_bundle() { let signed_tx = signed_tx_from_request(&requests[0]); let actions = signed_tx.actions(); + assert_eq!(actions.len(), 1); + + let seq_action = actions.iter().next().unwrap().as_sequence().unwrap(); + let proto_builder_bundle_packet = + BuilderBundlePacket::decode(&mut seq_action.data.clone()).unwrap(); + let builder_bundle_packet = astria_core::composer::v1alpha1::BuilderBundlePacket::try_from_raw( + proto_builder_bundle_packet.clone(), + ) + .unwrap(); + + let bundle_txs = builder_bundle_packet.bundle().transactions(); + + assert_eq!(builder_bundle_packet.bundle().transactions().len(), 2); assert_eq!( - actions.len(), - expected_seq_actions.len(), - "received more than one action, one was supposed to fill the bundle" + builder_bundle_packet.bundle().parent_hash().to_vec(), + soft_block_hash.to_vec() ); - for (action, expected_seq_action) in actions.iter().zip(expected_seq_actions.iter()) { - let seq_action = action.as_sequence().unwrap(); - assert_eq!( - seq_action.rollup_id, expected_seq_action.rollup_id, - "chain id does not match. actual {:?} expected {:?}", - seq_action.rollup_id, expected_seq_action.rollup_id - ); - assert_eq!( - seq_action.data, expected_seq_action.data, - "data does not match expected data for action with rollup_id {:?}", - seq_action.rollup_id, - ); + for (action, expected_action) in expected_seq_actions.iter().zip(actions) { + let expected_seq_action = expected_action.as_sequence().unwrap(); + assert_eq!(action.rollup_id, expected_seq_action.rollup_id); + assert_eq!(action.fee_asset, expected_seq_action.fee_asset); + } + + for (action, expected_seq_action) in bundle_txs.iter().zip(expected_seq_actions.iter()) { + match action.clone() { + RollupData::SequencedData(data) => { + assert_eq!( + data, expected_seq_action.data, + "data does not match expected data for action with rollup_id {:?}", + expected_seq_action.rollup_id + ) + } + _ => assert!( + true, + "expected RollupData::SequencedData, but got {:?}", + action + ), + } } } @@ -600,9 +827,15 @@ async fn chain_id_mismatch_returns_error() { use tendermint::chain::Id; // set up sequencer mock - let (sequencer, cfg, _keyfile) = setup().await; + let (sequencer, cfg, _keyfile, _test_executor) = setup().await; let shutdown_token = CancellationToken::new(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&cfg).unwrap())); + let rollup_name = RollupId::new([0; ROLLUP_ID_LEN]); + + let (filtered_block_sender, filtered_block_receiver) = + tokio::sync::mpsc::channel::(10); + let (finalized_hash_sender, finalized_hash_receiver) = + tokio::sync::mpsc::channel::(10); // mount a status response with an incorrect chain_id mount_genesis(&sequencer, "bad-chain-id").await; @@ -617,6 +850,12 @@ async fn chain_id_mismatch_returns_error() { max_bytes_per_bundle: cfg.max_bytes_per_bundle, bundle_queue_capacity: cfg.bundle_queue_capacity, shutdown_token: shutdown_token.clone(), + execution_api_url: cfg.execution_api_url, + chain_name: rollup_name.to_string(), + fee_asset: cfg.fee_asset, + max_bundle_size: cfg.max_bundle_size, + filtered_block_receiver, + finalized_block_hash_receiver: finalized_hash_receiver, metrics, } .build() diff --git a/crates/astria-composer/src/grpc.rs b/crates/astria-composer/src/grpc.rs index 8a24dcf8c..4f5814302 100644 --- a/crates/astria-composer/src/grpc.rs +++ b/crates/astria-composer/src/grpc.rs @@ -9,7 +9,10 @@ use std::net::SocketAddr; use astria_core::{ - generated::composer::v1alpha1::grpc_collector_service_server::GrpcCollectorServiceServer, + generated::composer::v1alpha1::{ + grpc_collector_service_server::GrpcCollectorServiceServer, + sequencer_hooks_service_server::SequencerHooksServiceServer, + }, primitive::v1::asset, }; use astria_eyre::{ @@ -21,12 +24,16 @@ use tokio::{ net::TcpListener, }; use tokio_util::sync::CancellationToken; -use tracing::instrument; +use tracing::{ + info, + instrument, +}; use crate::{ collectors, executor, metrics::Metrics, + sequencer_hooks::SequencerHooks, }; /// Listens for incoming gRPC requests and sends the Rollup transactions to the @@ -36,6 +43,7 @@ use crate::{ pub(crate) struct GrpcServer { listener: TcpListener, grpc_collector: collectors::Grpc, + sequencer_hooks: SequencerHooks, shutdown_token: CancellationToken, } @@ -45,6 +53,7 @@ pub(crate) struct Builder { pub(crate) shutdown_token: CancellationToken, pub(crate) metrics: &'static Metrics, pub(crate) fee_asset: asset::Denom, + pub(crate) sequencer_hooks: SequencerHooks, } impl Builder { @@ -56,6 +65,7 @@ impl Builder { shutdown_token, metrics, fee_asset, + sequencer_hooks, } = self; let listener = TcpListener::bind(grpc_addr) @@ -66,6 +76,7 @@ impl Builder { Ok(GrpcServer { listener, grpc_collector, + sequencer_hooks, shutdown_token, }) } @@ -80,16 +91,22 @@ impl GrpcServer { } pub(crate) async fn run_until_stopped(self) -> eyre::Result<()> { + info!("launching grpc server with grpc collector and sequencer hooks!"); let (mut health_reporter, health_service) = tonic_health::server::health_reporter(); let composer_service = GrpcCollectorServiceServer::new(self.grpc_collector); + let sequencer_hooks_service = SequencerHooksServiceServer::new(self.sequencer_hooks); let grpc_server = tonic::transport::Server::builder() .add_service(health_service) - .add_service(composer_service); + .add_service(composer_service) + .add_service(sequencer_hooks_service); health_reporter .set_serving::>() .await; + health_reporter + .set_serving::>() + .await; grpc_server .serve_with_incoming_shutdown( diff --git a/crates/astria-composer/src/lib.rs b/crates/astria-composer/src/lib.rs index cbf59addc..41f4655b9 100644 --- a/crates/astria-composer/src/lib.rs +++ b/crates/astria-composer/src/lib.rs @@ -46,7 +46,8 @@ pub mod config; mod executor; mod grpc; pub(crate) mod metrics; -mod rollup; +mod mock_grpc; +pub(crate) mod sequencer_hooks; #[cfg(test)] pub(crate) mod test_utils; pub(crate) mod utils; diff --git a/crates/astria-composer/src/metrics.rs b/crates/astria-composer/src/metrics.rs index 28aff1dde..039dd7b12 100644 --- a/crates/astria-composer/src/metrics.rs +++ b/crates/astria-composer/src/metrics.rs @@ -112,13 +112,13 @@ impl telemetry::Metrics for Metrics { where Self: Sized, { - let rollups = config - .parse_rollups() - .map_err(|error| Error::External(Box::new(error)))?; + let rollup = config.rollup.clone(); let (geth_txs_received, grpc_txs_received) = - register_txs_received(builder, rollups.keys())?; - let (geth_txs_dropped, grpc_txs_dropped) = register_txs_dropped(builder, rollups.keys())?; - let txs_dropped_too_large = register_txs_dropped_too_large(builder, rollups.keys())?; + register_txs_received(builder, vec![rollup.clone()].iter())?; + let (geth_txs_dropped, grpc_txs_dropped) = + register_txs_dropped(builder, vec![rollup.clone()].iter())?; + let txs_dropped_too_large = + register_txs_dropped_too_large(builder, vec![rollup.clone()].iter())?; let nonce_fetch_count = builder .new_counter_factory( diff --git a/crates/astria-composer/src/mock_grpc.rs b/crates/astria-composer/src/mock_grpc.rs new file mode 100644 index 000000000..c332cfed0 --- /dev/null +++ b/crates/astria-composer/src/mock_grpc.rs @@ -0,0 +1,263 @@ +#![allow(dead_code)] +use std::{ + net::SocketAddr, + sync::Arc, +}; + +use astria_core::generated::execution::v1alpha2::{ + execution_service_server::{ + ExecutionService, + ExecutionServiceServer, + }, + BatchGetBlocksRequest, + BatchGetBlocksResponse, + Block, + CommitmentState, + ExecuteBlockRequest, + ExecuteBlockResponse, + GenesisInfo, + GetBlockRequest, + GetCommitmentStateRequest, + GetGenesisInfoRequest, + UpdateCommitmentStateRequest, +}; +use astria_eyre::eyre::{ + self, + WrapErr as _, +}; +use astria_grpc_mock::{ + matcher::message_partial_pbjson, + MockServer, +}; +use tokio::task::JoinHandle; +use tonic::transport::Server; + +pub(crate) struct MockGrpc { + _server: JoinHandle>, + pub(crate) mock_server: MockServer, + pub(crate) local_addr: SocketAddr, +} + +impl MockGrpc { + pub(crate) async fn spawn() -> Self { + use tokio_stream::wrappers::TcpListenerStream; + + let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let local_addr = listener.local_addr().unwrap(); + + let mock_server = MockServer::new(); + + let server = { + let execution_service = ExecutionServiceImpl::new(mock_server.clone()); + tokio::spawn(async move { + Server::builder() + .add_service(ExecutionServiceServer::new(execution_service)) + .serve_with_incoming(TcpListenerStream::new(listener)) + .await + .wrap_err("gRPC server failed") + }) + }; + MockGrpc { + _server: server, + mock_server, + local_addr, + } + } +} + +macro_rules! define_and_impl_service { + (impl $trait:ident for $target:ident { $( ($rpc:ident: $request:ty => $response:ty) )* }) => { + struct $target { + mock_server: ::astria_grpc_mock::MockServer, + } + + impl $target { + fn new(mock_server: ::astria_grpc_mock::MockServer) -> Self { + Self { mock_server, } + } + } + + #[tonic::async_trait] + impl $trait for $target { + $( + async fn $rpc(self: Arc, request: ::tonic::Request<$request>) -> ::tonic::Result<::tonic::Response<$response>> { + self.mock_server.handle_request(stringify!($rpc), request).await + } + )+ + } + } +} + +define_and_impl_service!(impl ExecutionService for ExecutionServiceImpl { + (execute_block: ExecuteBlockRequest => ExecuteBlockResponse) + (get_commitment_state: GetCommitmentStateRequest => CommitmentState) + (get_block: GetBlockRequest => Block) + (get_genesis_info: GetGenesisInfoRequest => GenesisInfo) + (batch_get_blocks: BatchGetBlocksRequest => BatchGetBlocksResponse) + (update_commitment_state: UpdateCommitmentStateRequest => CommitmentState) +}); + +#[macro_export] +macro_rules! execute_block_response { + (number: $number:expr,hash: $hash:expr,parent: $parent:expr $(,)?, included_transactions: $included_transactions:expr $(,)?) => { + ::astria_core::generated::execution::v1alpha2::ExecuteBlockResponse { + block: Some($crate::block!( + number: $number, + hash: $hash, + parent: $parent, + )), + included_transactions: $included_transactions, + } + }; +} + +#[macro_export] +macro_rules! block { + (number: $number:expr,hash: $hash:expr,parent: $parent:expr $(,)?) => { + ::astria_core::generated::execution::v1alpha2::Block { + number: $number, + hash: ::bytes::Bytes::from(Vec::from($hash)), + parent_block_hash: ::bytes::Bytes::from(Vec::from($parent)), + timestamp: Some(::pbjson_types::Timestamp { + seconds: 1, + nanos: 1, + }), + } + }; +} + +#[macro_export] +macro_rules! commitment_state { + ( + firm: (number: $firm_number:expr,hash: $firm_hash:expr,parent: $firm_parent:expr $(,)?), + soft: (number: $soft_number:expr,hash: $soft_hash:expr,parent: $soft_parent:expr $(,)?), + base_celestia_height: $base_celestia_height:expr $(,)? + ) => { + ::astria_core::generated::execution::v1alpha2::CommitmentState { + firm: Some($crate::block!( + number: $firm_number, + hash: $firm_hash, + parent: $firm_parent, + )), + soft: Some($crate::block!( + number: $soft_number, + hash: $soft_hash, + parent: $soft_parent, + )), + base_celestia_height: $base_celestia_height, + } + }; +} + +#[macro_export] +macro_rules! mount_get_commitment_state { + ( + $test_env:ident, + firm: ( number: $firm_number:expr, hash: $firm_hash:expr, parent: $firm_parent:expr$(,)? ), + soft: ( number: $soft_number:expr, hash: $soft_hash:expr, parent: $soft_parent:expr$(,)? ), + base_celestia_height: $base_celestia_height:expr + $(,)? + ) => { + $test_env + .mount_get_commitment_state($crate::commitment_state!( + firm: ( + number: $firm_number, + hash: $firm_hash, + parent: $firm_parent, + ), + soft: ( + number: $soft_number, + hash: $soft_hash, + parent: $soft_parent, + ), + base_celestia_height: $base_celestia_height, + )) + .await + }; +} + +#[macro_export] +macro_rules! mount_executed_block { + ( + $test_env:ident, + mock_name: $mock_name:expr, + number: $number:expr, + hash: $hash:expr, + included_transactions: $included_transactions:expr, + parent: $parent:expr $(,)?, + ) => {{ + $test_env.mount_execute_block( + $mock_name.into(), + ::serde_json::json!({ + // TODO - figure out why its not matching? + // "prevBlockHash": BASE64_STANDARD.encode($parent), + // "simulateOnly": true, + // "transactions": $included_transactions, + }), + $crate::execute_block_response!( + number: $number, + hash: $hash, + parent: $parent, + included_transactions: $included_transactions + ) + ) + .await + }}; + ( + $test_env:ident, + number: $number:expr, + hash: $hash:expr, + included_transactions: $included_transactions:expr, + parent: $parent:expr $(,)? + ) => { + mount_executed_block!( + $test_env, + mock_name: None, + number: $number, + hash: $hash, + parent: $parent, + included_transactions: $included_transactions + ) + }; +} + +pub(crate) struct TestExecutor { + pub(crate) mock_grpc: MockGrpc, +} + +impl TestExecutor { + pub(crate) async fn mount_get_commitment_state(&self, commitment_state: CommitmentState) { + astria_grpc_mock::Mock::for_rpc_given( + "get_commitment_state", + astria_grpc_mock::matcher::message_type::(), + ) + .respond_with(astria_grpc_mock::response::constant_response( + commitment_state, + )) + .expect(1..) + .mount(&self.mock_grpc.mock_server) + .await; + } + + pub(crate) async fn mount_execute_block( + &self, + mock_name: Option<&str>, + expected_pbjson: S, + response: ExecuteBlockResponse, + ) -> astria_grpc_mock::MockGuard { + use astria_grpc_mock::{ + response::constant_response, + Mock, + }; + + let mut mock = + Mock::for_rpc_given("execute_block", message_partial_pbjson(&expected_pbjson)) + .respond_with(constant_response(response)); + if let Some(name) = mock_name { + mock = mock.with_name(name); + } + mock.expect(1) + .mount_as_scoped(&self.mock_grpc.mock_server) + .await + } +} diff --git a/crates/astria-composer/src/sequencer_hooks.rs b/crates/astria-composer/src/sequencer_hooks.rs new file mode 100644 index 000000000..973ab0833 --- /dev/null +++ b/crates/astria-composer/src/sequencer_hooks.rs @@ -0,0 +1,170 @@ +use std::{ + sync::Arc, + time::Duration, +}; + +use astria_core::{ + generated::composer::v1alpha1::{ + sequencer_hooks_service_server::SequencerHooksService, + SendFinalizedHashRequest, + SendFinalizedHashResponse, + SendOptimisticBlockRequest, + SendOptimisticBlockResponse, + }, + protocol::transaction::v1alpha1::action::SequenceAction, + Protobuf, +}; +use astria_eyre::eyre::WrapErr; +use bytes::Bytes; +use pbjson_types::Timestamp; +use tokio::sync::{ + mpsc, + mpsc::error::SendTimeoutError, +}; +use tonic::{ + Request, + Response, + Status, +}; +use tracing::info; + +const SEND_TIMEOUT: u64 = 2; + +pub(crate) struct OptimisticBlockInfo { + block_hash: Bytes, + seq_actions: Vec, + time: Timestamp, +} + +impl OptimisticBlockInfo { + pub(crate) fn new( + block_hash: Bytes, + seq_actions: Vec, + time: Timestamp, + ) -> Self { + Self { + block_hash, + seq_actions, + time, + } + } + + pub(crate) fn block_hash(&self) -> Bytes { + self.block_hash.clone() + } + + pub(crate) fn seq_actions(&self) -> Vec { + self.seq_actions.clone() + } + + pub(crate) fn time(&self) -> Timestamp { + self.time.clone() + } +} + +pub(crate) struct FinalizedHashInfo { + block_hash: Bytes, +} + +impl FinalizedHashInfo { + pub(crate) fn new(block_hash: Bytes) -> Self { + Self { + block_hash, + } + } + + pub(crate) fn block_hash(&self) -> Bytes { + self.block_hash.clone() + } +} + +pub(crate) struct SequencerHooks { + optimistic_block_sender: mpsc::Sender, + finalized_hash_sender: mpsc::Sender, +} + +impl SequencerHooks { + pub(crate) fn new( + optimistic_block_sender: mpsc::Sender, + finalized_hash_sender: mpsc::Sender, + ) -> Self { + Self { + optimistic_block_sender, + finalized_hash_sender, + } + } + + pub(crate) async fn send_optimistic_block_with_timeout( + &self, + req: OptimisticBlockInfo, + ) -> Result<(), SendTimeoutError> { + self.optimistic_block_sender + .send_timeout(req, Duration::from_secs(SEND_TIMEOUT)) + .await + } + + pub(crate) async fn send_finalized_hash_with_timeout( + &self, + req: FinalizedHashInfo, + ) -> Result<(), SendTimeoutError> { + self.finalized_hash_sender + .send_timeout(req, Duration::from_secs(SEND_TIMEOUT)) + .await + } +} + +#[async_trait::async_trait] +impl SequencerHooksService for SequencerHooks { + async fn send_optimistic_block( + self: Arc, + request: Request, + ) -> Result, Status> { + let inner = request.into_inner(); + + let mut seq_actions = vec![]; + for action in &inner.seq_action { + match SequenceAction::try_from_raw_ref(action) { + Ok(action) => seq_actions.push(action), + Err(e) => { + info!("Failed to convert sequence action: {:?}", e); + return Err(Status::invalid_argument("invalid sequence action")); + } + } + } + + return match self + .send_optimistic_block_with_timeout(OptimisticBlockInfo::new( + inner.block_hash, + seq_actions, + inner.time.unwrap(), + )) + .await + .wrap_err("unable to send optimistic block to executor") + { + Ok(()) => Ok(Response::new(SendOptimisticBlockResponse {})), + Err(e) => { + info!("Failed to send optimistic block: {:?}", e); + return Err(Status::internal("Failed to send optimistic block")); + } + }; + } + + async fn send_finalized_hash( + self: Arc, + request: Request, + ) -> Result, Status> { + let inner = request.into_inner(); + + return match self + .send_finalized_hash_with_timeout(FinalizedHashInfo::new(inner.block_hash)) + .await + .wrap_err("unable to send finalized block hash to executor") + { + Ok(()) => Ok(Response::new(SendFinalizedHashResponse {})), + Err(e) => { + info!("Failed to send finalized_block hash: {:?}", e); + return Err(Status::internal("Failed to send finalized block hash")); + } + }; + } +} diff --git a/crates/astria-composer/tests/blackbox/api.rs b/crates/astria-composer/tests/blackbox/api.rs index c65e2ff95..fa333c19a 100644 --- a/crates/astria-composer/tests/blackbox/api.rs +++ b/crates/astria-composer/tests/blackbox/api.rs @@ -4,13 +4,5 @@ async fn readyz_with_one_rollup() { // spawn_composer hits `/readyz` as part of starting the test // environment. If this future return then `readyz` must have // returned `status: ok`. - let _test_composer = spawn_composer(&["test1"]).await; -} - -#[tokio::test] -async fn readyz_with_two_rollups() { - // spawn_composer hits `/readyz` as part of starting the test - // environment. If this future return then `readyz` must have - // returned `status: ok`. - let _test_composer = spawn_composer(&["test1", "test2"]).await; + let _test_composer = spawn_composer("test1").await; } diff --git a/crates/astria-composer/tests/blackbox/geth_collector.rs b/crates/astria-composer/tests/blackbox/geth_collector.rs index e6bce832e..6ba886287 100644 --- a/crates/astria-composer/tests/blackbox/geth_collector.rs +++ b/crates/astria-composer/tests/blackbox/geth_collector.rs @@ -1,10 +1,17 @@ use std::time::Duration; +use astria_composer::{ + mount_executed_block, + mount_get_commitment_state, +}; use astria_core::{ generated::protocol::accounts::v1alpha1::NonceResponse, primitive::v1::RollupId, + sequencerblock::v1alpha1::block::RollupData, + Protobuf, }; use ethers::types::Transaction; +use futures::future::join; use crate::helper::{ mount_broadcast_tx_sync_invalid_nonce_mock, @@ -18,7 +25,7 @@ use crate::helper::{ async fn tx_from_one_rollup_is_received_by_sequencer() { // Spawn a composer with a mock sequencer and a mock rollup node // Initial nonce is 0 - let test_composer = spawn_composer(&["test1"]).await; + let test_composer = spawn_composer("test1").await; tokio::time::timeout( Duration::from_millis(100), test_composer.setup_guard.wait_until_satisfied(), @@ -29,14 +36,41 @@ async fn tx_from_one_rollup_is_received_by_sequencer() { let expected_rollup_ids = vec![RollupId::from_unhashed_bytes("test1")]; let mock_guard = mount_broadcast_tx_sync_mock(&test_composer.sequencer, expected_rollup_ids, vec![0]).await; - test_composer.rollup_nodes["test1"] - .push_tx(Transaction::default()) - .unwrap(); + + let tx = Transaction::default(); + let data = tx.rlp().to_vec(); + let rollup_data = vec![RollupData::SequencedData(data.into()).to_raw()]; + + let soft_parent_hash = [1; 64]; + let soft_block_number = 1; + let soft_block_hash = [2; 64]; + + let test_executor = test_composer.test_executor; + + mount_get_commitment_state!( + test_executor, + firm: ( number: 1, hash: [1; 64], parent: [0; 64], ), + soft: ( number: soft_block_number, hash: soft_block_hash, parent: soft_parent_hash, ), + base_celestia_height: 1, + ); + + let execute_block = mount_executed_block!(test_executor, + mock_name: "execute_block", + number: soft_block_number, + hash: soft_block_hash, + included_transactions: rollup_data.clone(), + parent: soft_parent_hash.to_vec(), + ); + + test_composer.rollup_nodes["test1"].push_tx(tx).unwrap(); // wait for 1 sequencer block time to make sure the bundle is preempted tokio::time::timeout( Duration::from_millis(test_composer.cfg.block_time_ms), - mock_guard.wait_until_satisfied(), + join( + mock_guard.wait_until_satisfied(), + execute_block.wait_until_satisfied(), + ), ) .await .expect("mocked sequencer should have received a broadcast message from composer"); @@ -46,7 +80,7 @@ async fn tx_from_one_rollup_is_received_by_sequencer() { async fn collector_restarts_after_exit() { // Spawn a composer with a mock sequencer and a mock rollup node // Initial nonce is 0 - let test_composer = spawn_composer(&["test1"]).await; + let test_composer = spawn_composer("test1").await; tokio::time::timeout( Duration::from_millis(100), test_composer.setup_guard.wait_until_satisfied(), @@ -67,9 +101,33 @@ async fn collector_restarts_after_exit() { let expected_rollup_ids = vec![RollupId::from_unhashed_bytes("test1")]; let mock_guard = mount_broadcast_tx_sync_mock(&test_composer.sequencer, expected_rollup_ids, vec![0]).await; - test_composer.rollup_nodes["test1"] - .push_tx(Transaction::default()) - .unwrap(); + + let test_executor = test_composer.test_executor; + + let soft_parent_hash = [1; 64]; + let soft_block_number = 1; + let soft_block_hash = [2; 64]; + + mount_get_commitment_state!( + test_executor, + firm: ( number: 1, hash: [1; 64], parent: [0; 64], ), + soft: ( number: soft_block_number, hash: soft_block_hash, parent: soft_parent_hash, ), + base_celestia_height: 1, + ); + + let tx = Transaction::default(); + let data = tx.rlp().to_vec(); + let rollup_data = vec![RollupData::SequencedData(data.into()).to_raw()]; + + let _execute_block = mount_executed_block!(test_executor, + mock_name: "execute_block", + number: soft_block_number, + hash: soft_block_hash, + included_transactions: rollup_data.clone(), + parent: soft_parent_hash.to_vec(), + ); + + test_composer.rollup_nodes["test1"].push_tx(tx).unwrap(); // wait for 1 sequencer block time to make sure the bundle is preempted // we added an extra 1000ms to the block time to make sure the collector has restarted @@ -88,7 +146,7 @@ async fn invalid_nonce_causes_resubmission_under_different_nonce() { // Spawn a composer with a mock sequencer and a mock rollup node // Initial nonce is 0 - let test_composer = spawn_composer(&["test1"]).await; + let test_composer = spawn_composer("test1").await; tokio::time::timeout( Duration::from_millis(100), test_composer.setup_guard.wait_until_satisfied(), @@ -119,11 +177,34 @@ async fn invalid_nonce_causes_resubmission_under_different_nonce() { let valid_nonce_guard = mount_broadcast_tx_sync_mock(&test_composer.sequencer, expected_rollup_ids, vec![1]).await; + let test_executor = test_composer.test_executor; + + let soft_parent_hash = [1; 64]; + let soft_block_number = 1; + let soft_block_hash = [2; 64]; + + mount_get_commitment_state!( + test_executor, + firm: ( number: 1, hash: [1; 64], parent: [0; 64], ), + soft: ( number: soft_block_number, hash: soft_block_hash, parent: soft_parent_hash, ), + base_celestia_height: 1, + ); + + let tx = Transaction::default(); + let data = tx.rlp().to_vec(); + let rollup_data = vec![RollupData::SequencedData(data.into()).to_raw()]; + + let _execute_block = mount_executed_block!(test_executor, + mock_name: "execute_block", + number: soft_block_number, + hash: soft_block_hash, + included_transactions: rollup_data.clone(), + parent: soft_parent_hash.to_vec(), + ); + // Push a tx to the rollup node so that it is picked up by the composer and submitted with the // stored nonce of 0, triggering the nonce refetch process - test_composer.rollup_nodes["test1"] - .push_tx(Transaction::default()) - .unwrap(); + test_composer.rollup_nodes["test1"].push_tx(tx).unwrap(); // wait for 1 sequencer block time to make sure the bundle is preempted tokio::time::timeout( @@ -152,7 +233,7 @@ async fn invalid_nonce_causes_resubmission_under_different_nonce() { async fn single_rollup_tx_payload_integrity() { // Spawn a composer with a mock sequencer and a mock rollup node // Initial nonce is 0 - let test_composer = spawn_composer(&["test1"]).await; + let test_composer = spawn_composer("test1").await; tokio::time::timeout( Duration::from_millis(100), test_composer.setup_guard.wait_until_satisfied(), @@ -164,12 +245,39 @@ async fn single_rollup_tx_payload_integrity() { let mock_guard = mount_matcher_verifying_tx_integrity(&test_composer.sequencer, tx.clone()).await; + let soft_parent_hash = [1; 64]; + let soft_block_number = 1; + let soft_block_hash = [2; 64]; + + let test_executor = test_composer.test_executor; + + mount_get_commitment_state!( + test_executor, + firm: ( number: 1, hash: [1; 64], parent: [0; 64], ), + soft: ( number: soft_block_number, hash: soft_block_hash, parent: soft_parent_hash, ), + base_celestia_height: 1, + ); + + let data = tx.rlp().to_vec(); + let rollup_data = vec![RollupData::SequencedData(data.into()).to_raw()]; + + let execute_block = mount_executed_block!(test_executor, + mock_name: "execute_block", + number: soft_block_number, + hash: soft_block_hash, + included_transactions: rollup_data.clone(), + parent: soft_parent_hash.to_vec(), + ); + test_composer.rollup_nodes["test1"].push_tx(tx).unwrap(); // wait for 1 sequencer block time to make sure the bundle is preempted tokio::time::timeout( Duration::from_millis(test_composer.cfg.block_time_ms), - mock_guard.wait_until_satisfied(), + join( + mock_guard.wait_until_satisfied(), + execute_block.wait_until_satisfied(), + ), ) .await .expect("mocked sequencer should have received a broadcast message from composer"); diff --git a/crates/astria-composer/tests/blackbox/grpc_collector.rs b/crates/astria-composer/tests/blackbox/grpc_collector.rs index 8a4a963ac..d608e4a96 100644 --- a/crates/astria-composer/tests/blackbox/grpc_collector.rs +++ b/crates/astria-composer/tests/blackbox/grpc_collector.rs @@ -1,5 +1,9 @@ use std::time::Duration; +use astria_composer::{ + mount_executed_block, + mount_get_commitment_state, +}; use astria_core::{ generated::{ composer::v1alpha1::{ @@ -9,9 +13,11 @@ use astria_core::{ protocol::accounts::v1alpha1::NonceResponse, }, primitive::v1::RollupId, + sequencerblock::v1alpha1::block::RollupData, + Protobuf, }; +use bytes::Bytes; use ethers::prelude::Transaction; -use prost::bytes::Bytes; use crate::helper::{ mount_broadcast_tx_sync_invalid_nonce_mock, @@ -23,7 +29,7 @@ use crate::helper::{ #[tokio::test] async fn tx_from_one_rollup_is_received_by_sequencer() { - let test_composer = spawn_composer(&[]).await; + let test_composer = spawn_composer("test1").await; tokio::time::timeout( Duration::from_millis(100), test_composer.setup_guard.wait_until_satisfied(), @@ -37,6 +43,30 @@ async fn tx_from_one_rollup_is_received_by_sequencer() { mount_broadcast_tx_sync_mock(&test_composer.sequencer, expected_chain_ids, vec![0]).await; let tx = Transaction::default(); + let test_executor = test_composer.test_executor; + + let soft_parent_hash = [1; 64]; + let soft_block_number = 1; + let soft_block_hash = [2; 64]; + + mount_get_commitment_state!( + test_executor, + firm: ( number: 1, hash: [1; 64], parent: [0; 64], ), + soft: ( number: soft_block_number, hash: soft_block_hash, parent: soft_parent_hash, ), + base_celestia_height: 1, + ); + + let data = tx.rlp().to_vec(); + let rollup_data = vec![RollupData::SequencedData(Bytes::from(data)).to_raw()]; + + let _execute_block = mount_executed_block!(test_executor, + mock_name: "execute_block", + number: soft_block_number, + hash: soft_block_hash, + included_transactions: rollup_data.clone(), + parent: soft_parent_hash.to_vec(), + ); + // send sequence action request to the grpc collector let mut composer_client = GrpcCollectorServiceClient::connect(format!( "http://{}", @@ -67,14 +97,14 @@ async fn invalid_nonce_causes_resubmission_under_different_nonce() { // Spawn a composer with a mock sequencer and a mock rollup node // Initial nonce is 0 - let rollup_id = RollupId::from_unhashed_bytes("test1"); - let test_composer = spawn_composer(&[]).await; + let test_composer = spawn_composer("test1").await; tokio::time::timeout( Duration::from_millis(100), test_composer.setup_guard.wait_until_satisfied(), ) .await .expect("composer and sequencer should have been setup successfully"); + let rollup_id = RollupId::from_unhashed_bytes(test_composer.cfg.rollup.clone()); // Reject the first transaction for invalid nonce let invalid_nonce_guard = @@ -99,6 +129,31 @@ async fn invalid_nonce_causes_resubmission_under_different_nonce() { // Send a tx to the composer so that it is picked up by the grpc collector and submitted with // the stored nonce of 0, triggering the nonce refetch process let tx = Transaction::default(); + + let soft_parent_hash = [1; 64]; + let soft_block_number = 1; + let soft_block_hash = [2; 64]; + + let test_executor = test_composer.test_executor; + + mount_get_commitment_state!( + test_executor, + firm: ( number: 1, hash: [1; 64], parent: [0; 64], ), + soft: ( number: soft_block_number, hash: soft_block_hash, parent: soft_parent_hash, ), + base_celestia_height: 1, + ); + + let data = tx.rlp().to_vec(); + let rollup_data = vec![RollupData::SequencedData(Bytes::from(data)).to_raw()]; + + let _execute_block = mount_executed_block!(test_executor, + mock_name: "execute_block", + number: soft_block_number, + hash: soft_block_hash, + included_transactions: rollup_data.clone(), + parent: soft_parent_hash.to_vec(), + ); + // send sequence action request to the grpc collector let mut composer_client = GrpcCollectorServiceClient::connect(format!( "http://{}", @@ -141,19 +196,43 @@ async fn invalid_nonce_causes_resubmission_under_different_nonce() { async fn single_rollup_tx_payload_integrity() { // Spawn a composer with a mock sequencer and a mock rollup node // Initial nonce is 0 - let rollup_id = RollupId::from_unhashed_bytes("test1"); - let test_composer = spawn_composer(&[]).await; + let test_composer = spawn_composer("test1").await; tokio::time::timeout( Duration::from_millis(100), test_composer.setup_guard.wait_until_satisfied(), ) .await .expect("composer and sequencer should have been setup successfully"); + let rollup_id = RollupId::from_unhashed_bytes(test_composer.cfg.rollup.clone()); let tx: Transaction = serde_json::from_str(TEST_ETH_TX_JSON).unwrap(); let mock_guard = mount_matcher_verifying_tx_integrity(&test_composer.sequencer, tx.clone()).await; + let soft_parent_hash = [1; 64]; + let soft_block_number = 1; + let soft_block_hash = [2; 64]; + + let test_executor = test_composer.test_executor; + + mount_get_commitment_state!( + test_executor, + firm: ( number: 1, hash: [1; 64], parent: [0; 64], ), + soft: ( number: soft_block_number, hash: soft_block_hash, parent: soft_parent_hash, ), + base_celestia_height: 1, + ); + + let data = tx.rlp().to_vec(); + let rollup_data = vec![RollupData::SequencedData(Bytes::from(data)).to_raw()]; + + let _execute_block = mount_executed_block!(test_executor, + mock_name: "execute_block", + number: soft_block_number, + hash: soft_block_hash, + included_transactions: rollup_data.clone(), + parent: soft_parent_hash.to_vec(), + ); + // send sequence action request to the grpc generic collector let mut composer_client = GrpcCollectorServiceClient::connect(format!( "http://{}", diff --git a/crates/astria-composer/tests/blackbox/helper/mock_grpc.rs b/crates/astria-composer/tests/blackbox/helper/mock_grpc.rs new file mode 100644 index 000000000..00b2c6e9b --- /dev/null +++ b/crates/astria-composer/tests/blackbox/helper/mock_grpc.rs @@ -0,0 +1,330 @@ +use std::{ + net::SocketAddr, + sync::Arc, +}; + +use astria_core::generated::{ + execution::v1alpha2::{ + execution_service_server::{ + ExecutionService, + ExecutionServiceServer, + }, + BatchGetBlocksRequest, + BatchGetBlocksResponse, + Block, + CommitmentState, + ExecuteBlockRequest, + ExecuteBlockResponse, + GenesisInfo, + GetBlockRequest, + GetCommitmentStateRequest, + GetGenesisInfoRequest, + UpdateCommitmentStateRequest, + }, + sequencerblock::v1alpha1::{ + sequencer_service_server::{ + SequencerService, + SequencerServiceServer, + }, + FilteredSequencerBlock, + GetFilteredSequencerBlockRequest, + GetPendingNonceRequest, + GetPendingNonceResponse, + GetSequencerBlockRequest, + SequencerBlock, + }, +}; +use astria_eyre::eyre::{ + self, + WrapErr as _, +}; +use astria_grpc_mock::{ + AnyMessage, + Match, + MockServer, +}; +use tokio::task::JoinHandle; +use tonic::{ + transport::Server, + Request, + Response, +}; + +pub struct MockGrpc { + _server: JoinHandle>, + pub mock_server: MockServer, + pub local_addr: SocketAddr, +} + +impl MockGrpc { + pub async fn spawn() -> Self { + use tokio_stream::wrappers::TcpListenerStream; + + let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let local_addr = listener.local_addr().unwrap(); + + let mock_server = MockServer::new(); + + let server = { + let execution_service = ExecutionServiceImpl::new(mock_server.clone()); + let sequencer_service = SequencerServiceImpl::new(mock_server.clone()); + tokio::spawn(async move { + Server::builder() + .add_service(ExecutionServiceServer::new(execution_service)) + .add_service(SequencerServiceServer::new(sequencer_service)) + .serve_with_incoming(TcpListenerStream::new(listener)) + .await + .wrap_err("gRPC server failed") + }) + }; + Self { + _server: server, + mock_server, + local_addr, + } + } +} + +struct SequencerServiceImpl { + mock_server: MockServer, +} + +impl SequencerServiceImpl { + fn new(mock_server: MockServer) -> Self { + Self { + mock_server, + } + } +} + +// XXX: Manually implementing this trait instead of using the `define_and_impl_service!` macro +// because `GetSequencerBlockRequest` and `SequencerBlock` don't currently implement +// `serde::Serialize`. +#[tonic::async_trait] +impl SequencerService for SequencerServiceImpl { + async fn get_sequencer_block( + self: Arc, + _request: Request, + ) -> tonic::Result> { + unimplemented!() + } + + async fn get_filtered_sequencer_block( + self: Arc, + request: Request, + ) -> tonic::Result> { + self.mock_server + .handle_request("get_filtered_sequencer_block", request) + .await + } + + async fn get_pending_nonce( + self: Arc, + _request: Request, + ) -> tonic::Result> { + unimplemented!() + } +} + +macro_rules! define_and_impl_service { + (impl $trait:ident for $target:ident { $( ($rpc:ident: $request:ty => $response:ty) )* }) => { + struct $target { + mock_server: ::astria_grpc_mock::MockServer, + } + + impl $target { + fn new(mock_server: ::astria_grpc_mock::MockServer) -> Self { + Self { mock_server, } + } + } + + #[tonic::async_trait] + impl $trait for $target { + $( + async fn $rpc(self: Arc, request: ::tonic::Request<$request>) -> ::tonic::Result<::tonic::Response<$response>> { + self.mock_server.handle_request(stringify!($rpc), request).await + } + )+ + } + } +} + +define_and_impl_service!(impl ExecutionService for ExecutionServiceImpl { + (execute_block: ExecuteBlockRequest => ExecuteBlockResponse) + (get_commitment_state: GetCommitmentStateRequest => CommitmentState) + (get_block: GetBlockRequest => Block) + (get_genesis_info: GetGenesisInfoRequest => GenesisInfo) + (batch_get_blocks: BatchGetBlocksRequest => BatchGetBlocksResponse) + (update_commitment_state: UpdateCommitmentStateRequest => CommitmentState) +}); + +#[macro_export] +macro_rules! execute_block_response { + (number: $number:expr,hash: $hash:expr,parent: $parent:expr $(,)?, included_transactions: $included_transactions:expr $(,)?) => { + ::astria_core::generated::execution::v1alpha2::ExecuteBlockResponse { + block: Some($crate::block!( + number: $number, + hash: $hash, + parent: $parent, + )), + included_transactions: $included_transactions, + } + }; +} + +#[macro_export] +macro_rules! block { + (number: $number:expr,hash: $hash:expr,parent: $parent:expr $(,)?) => { + ::astria_core::generated::execution::v1alpha2::Block { + number: $number, + hash: ::bytes::Bytes::from(Vec::from($hash)), + parent_block_hash: ::bytes::Bytes::from(Vec::from($parent)), + timestamp: Some(::pbjson_types::Timestamp { + seconds: 1, + nanos: 1, + }), + } + }; +} + +#[macro_export] +macro_rules! commitment_state { + ( + firm: (number: $firm_number:expr,hash: $firm_hash:expr,parent: $firm_parent:expr $(,)?), + soft: (number: $soft_number:expr,hash: $soft_hash:expr,parent: $soft_parent:expr $(,)?), + base_celestia_height: $base_celestia_height:expr $(,)? + ) => { + ::astria_core::generated::execution::v1alpha2::CommitmentState { + firm: Some($crate::block!( + number: $firm_number, + hash: $firm_hash, + parent: $firm_parent, + )), + soft: Some($crate::block!( + number: $soft_number, + hash: $soft_hash, + parent: $soft_parent, + )), + base_celestia_height: $base_celestia_height, + } + }; +} + +#[macro_export] +macro_rules! mount_get_commitment_state { + ( + $test_env:ident, + firm: ( number: $firm_number:expr, hash: $firm_hash:expr, parent: $firm_parent:expr$(,)? ), + soft: ( number: $soft_number:expr, hash: $soft_hash:expr, parent: $soft_parent:expr$(,)? ), + base_celestia_height: $base_celestia_height:expr + $(,)? + ) => { + $test_env + .mount_get_commitment_state($crate::commitment_state!( + firm: ( + number: $firm_number, + hash: $firm_hash, + parent: $firm_parent, + ), + soft: ( + number: $soft_number, + hash: $soft_hash, + parent: $soft_parent, + ), + base_celestia_height: $base_celestia_height, + )) + .await + }; +} + +#[macro_export] +macro_rules! mount_executed_block { + ( + $test_env:ident, + mock_name: $mock_name:expr, + number: $number:expr, + hash: $hash:expr, + included_transactions: $included_transactions:expr, + parent: $parent:expr $(,)?, + ) => {{ + $test_env.mount_execute_block( + $mock_name.into(), + ::serde_json::json!({ + "prevBlockHash": $parent, + "transactions": $included_transactions, + }), + $crate::execute_block_response!( + number: $number, + hash: $hash, + parent: $parent, + included_transactions: $included_transactions + ) + ) + .await + }}; + ( + $test_env:ident, + number: $number:expr, + hash: $hash:expr, + included_transactions: $included_transactions:expr, + parent: $parent:expr $(,)? + ) => { + mount_executed_block!( + $test_env, + mock_name: None, + number: $number, + hash: $hash, + parent: $parent, + included_transactions: $included_transactions + ) + }; +} + +pub struct TestExecutor { + pub mock_grpc: MockGrpc, +} + +impl TestExecutor { + pub async fn mount_get_commitment_state(&self, commitment_state: CommitmentState) { + astria_grpc_mock::Mock::for_rpc_given( + "get_commitment_state", + astria_grpc_mock::matcher::message_type::(), + ) + .respond_with(astria_grpc_mock::response::constant_response( + commitment_state, + )) + .expect(1..) + .mount(&self.mock_grpc.mock_server) + .await; + } + + pub async fn mount_execute_block( + &self, + mock_name: Option<&str>, + _expected_pbjson: S, + response: ExecuteBlockResponse, + ) -> astria_grpc_mock::MockGuard { + use astria_grpc_mock::{ + response::constant_response, + Mock, + }; + + let mut mock = Mock::for_rpc_given("execute_block", AlwaysMatches {}) + .respond_with(constant_response(response)); + if let Some(name) = mock_name { + mock = mock.with_name(name); + } + mock.expect(1) + .mount_as_scoped(&self.mock_grpc.mock_server) + .await + } +} + +// TODO - this is a hack to bypass request body matching. Fix this +struct AlwaysMatches(); + +impl Match for AlwaysMatches { + fn matches(&self, _req: &Request) -> bool { + true + } +} diff --git a/crates/astria-composer/tests/blackbox/helper/mod.rs b/crates/astria-composer/tests/blackbox/helper/mod.rs index 8b564b300..9fc036922 100644 --- a/crates/astria-composer/tests/blackbox/helper/mod.rs +++ b/crates/astria-composer/tests/blackbox/helper/mod.rs @@ -14,6 +14,8 @@ use astria_composer::{ Metrics, }; use astria_core::{ + composer::v1alpha1::BuilderBundle, + generated::composer::v1alpha1::BuilderBundlePacket, primitive::v1::{ asset::{ Denom, @@ -25,11 +27,12 @@ use astria_core::{ abci::AbciErrorCode, transaction::v1alpha1::SignedTransaction, }, + sequencerblock::v1alpha1::block::RollupData, + Protobuf, }; use astria_eyre::eyre; use ethers::prelude::Transaction; use once_cell::sync::Lazy; -use telemetry::metrics; use tempfile::NamedTempFile; use tendermint_rpc::{ endpoint::broadcast::tx_sync, @@ -48,6 +51,12 @@ use wiremock::{ ResponseTemplate, }; +use crate::helper::mock_grpc::{ + MockGrpc, + TestExecutor, +}; + +pub mod mock_grpc; pub mod mock_sequencer; static TELEMETRY: Lazy<()> = Lazy::new(|| { @@ -58,7 +67,8 @@ static TELEMETRY: Lazy<()> = Lazy::new(|| { api_listen_addr: SocketAddr::new(IpAddr::from([0, 0, 0, 0]), 0), sequencer_url: String::new(), sequencer_chain_id: String::new(), - rollups: String::new(), + rollup: "".to_string(), + rollup_websocket_url: "".to_string(), private_key_file: String::new(), sequencer_address_prefix: String::new(), block_time_ms: 0, @@ -71,6 +81,8 @@ static TELEMETRY: Lazy<()> = Lazy::new(|| { pretty_print: false, grpc_addr: SocketAddr::new(IpAddr::from([0, 0, 0, 0]), 0), fee_asset: Denom::IbcPrefixed(IbcPrefixed::new([0; 32])), + execution_api_url: "".to_string(), + max_bundle_size: 0, }; if std::env::var_os("TEST_LOG").is_some() { let filter_directives = std::env::var("RUST_LOG").unwrap_or_else(|_| "info".into()); @@ -99,6 +111,7 @@ pub struct TestComposer { pub setup_guard: MockGuard, pub grpc_collector_addr: SocketAddr, pub metrics_handle: metrics::Handle, + pub test_executor: TestExecutor, } /// Spawns composer in a test environment. @@ -106,17 +119,17 @@ pub struct TestComposer { /// # Panics /// There is no explicit error handling in favour of panicking loudly /// and early. -pub async fn spawn_composer(rollup_ids: &[&str]) -> TestComposer { +pub async fn spawn_composer(rollup_name: &str) -> TestComposer { Lazy::force(&TELEMETRY); + let geth = Geth::spawn().await; + let rollup_websocket_url = format!("ws://{}", geth.local_addr()); + let mut rollup_nodes = HashMap::new(); - let mut rollups = String::new(); - for id in rollup_ids { - let geth = Geth::spawn().await; - let execution_url = format!("ws://{}", geth.local_addr()); - rollup_nodes.insert((*id).to_string(), geth); - rollups.push_str(&format!("{id}::{execution_url},")); - } + rollup_nodes.insert(rollup_name.to_string(), geth); + + let mock_execution_api_server = MockGrpc::spawn().await; + let (sequencer, sequencer_setup_guard) = mock_sequencer::start().await; let sequencer_url = sequencer.uri(); let keyfile = NamedTempFile::new().unwrap(); @@ -127,7 +140,7 @@ pub async fn spawn_composer(rollup_ids: &[&str]) -> TestComposer { log: String::new(), api_listen_addr: "127.0.0.1:0".parse().unwrap(), sequencer_chain_id: "test-chain-1".to_string(), - rollups, + rollup: rollup_name.to_string(), sequencer_url, private_key_file: keyfile.path().to_string_lossy().to_string(), sequencer_address_prefix: "astria".into(), @@ -141,6 +154,9 @@ pub async fn spawn_composer(rollup_ids: &[&str]) -> TestComposer { pretty_print: true, grpc_addr: "127.0.0.1:0".parse().unwrap(), fee_asset: "nria".parse().unwrap(), + execution_api_url: format!("http://{}", mock_execution_api_server.local_addr), + max_bundle_size: 200000, + rollup_websocket_url: rollup_websocket_url.to_string(), }; let (metrics, metrics_handle) = metrics::ConfigBuilder::new() @@ -166,6 +182,9 @@ pub async fn spawn_composer(rollup_ids: &[&str]) -> TestComposer { setup_guard: sequencer_setup_guard, grpc_collector_addr, metrics_handle, + test_executor: TestExecutor { + mock_grpc: mock_execution_api_server, + }, } } @@ -241,10 +260,19 @@ pub async fn mount_matcher_verifying_tx_integrity( .unwrap() .as_sequence() .unwrap(); + let seq_action_data = sequence_action.clone().data; + // unmarshall to BuilderBundlePacket + let builder_bundle_packet = BuilderBundlePacket::decode(seq_action_data).unwrap(); + let builder_bundle = + BuilderBundle::try_from_raw(builder_bundle_packet.bundle.unwrap()).unwrap(); + let transaction = builder_bundle.transactions().first().unwrap(); - let expected_rlp = expected_rlp.rlp().to_vec(); - - expected_rlp == sequence_action.data + if let RollupData::SequencedData(data) = transaction { + let expected_rlp = expected_rlp.rlp().to_vec(); + expected_rlp == data.clone() + } else { + false + } }; let jsonrpc_rsp = response::Wrapper::new_with_id( Id::Num(1), diff --git a/crates/astria-conductor/src/conductor.rs b/crates/astria-conductor/src/conductor.rs index 2fc3ac7b9..8d3e01358 100644 --- a/crates/astria-conductor/src/conductor.rs +++ b/crates/astria-conductor/src/conductor.rs @@ -161,6 +161,8 @@ impl Conductor { tasks.spawn(Self::CELESTIA, reader.run_until_stopped()); }; + info!("BHARATH: CONDUCTOR INITED!"); + Ok(Self { shutdown, tasks, diff --git a/crates/astria-conductor/src/executor/client.rs b/crates/astria-conductor/src/executor/client.rs index 6f9f9fcdb..fc815ae20 100644 --- a/crates/astria-conductor/src/executor/client.rs +++ b/crates/astria-conductor/src/executor/client.rs @@ -4,6 +4,7 @@ use astria_core::{ execution::v1alpha2::{ Block, CommitmentState, + ExecuteBlockResponse, GenesisInfo, }, generated::{ @@ -119,7 +120,7 @@ impl Client { prev_block_hash: Bytes, transactions: Vec, timestamp: Timestamp, - ) -> eyre::Result { + ) -> eyre::Result { use prost::Message; let transactions = transactions @@ -132,6 +133,7 @@ impl Client { prev_block_hash, transactions, timestamp: Some(timestamp), + simulate_only: false, }; let response = tryhard::retry_fn(|| { let mut client = self.inner.clone(); @@ -146,9 +148,12 @@ impl Client { code or because number of retries were exhausted", )? .into_inner(); - let block = Block::try_from_raw(response) - .wrap_err("failed converting raw response to validated block")?; - Ok(block) + let execute_block_response = ExecuteBlockResponse::try_from_raw(response) + .wrap_err("failed converting raw response to validated execute block response")?; + + // let block = Block::try_from_raw(response) + // .wrap_err("failed converting raw response to validated block")?; + Ok(execute_block_response) } /// Calls remote procedure `astria.execution.v1alpha2.GetCommitmentState` diff --git a/crates/astria-conductor/src/executor/mod.rs b/crates/astria-conductor/src/executor/mod.rs index 916a951e2..cb5e61043 100644 --- a/crates/astria-conductor/src/executor/mod.rs +++ b/crates/astria-conductor/src/executor/mod.rs @@ -4,6 +4,7 @@ use astria_core::{ execution::v1alpha2::{ Block, CommitmentState, + ExecuteBlockResponse, }, primitive::v1::RollupId, sequencerblock::v1alpha1::block::{ @@ -437,15 +438,15 @@ impl Executor { .await .wrap_err("failed to execute block")?; - self.does_block_response_fulfill_contract(ExecutionKind::Soft, &executed_block) + self.does_block_response_fulfill_contract(ExecutionKind::Soft, executed_block.block()) .wrap_err("execution API server violated contract")?; - self.update_commitment_state(Update::OnlySoft(executed_block.clone())) + self.update_commitment_state(Update::OnlySoft(executed_block.block().clone())) .await .wrap_err("failed to update soft commitment state")?; self.blocks_pending_finalization - .insert(block_number, executed_block); + .insert(block_number, executed_block.block().clone()); // XXX: We set an absolute number value here to avoid any potential issues of the remote // rollup state and the local state falling out of lock-step. @@ -488,9 +489,9 @@ impl Executor { .execute_block(parent_hash, executable_block) .await .wrap_err("failed to execute block")?; - self.does_block_response_fulfill_contract(ExecutionKind::Firm, &executed_block) + self.does_block_response_fulfill_contract(ExecutionKind::Firm, executed_block.block()) .wrap_err("execution API server violated contract")?; - Update::ToSame(executed_block, celestia_height) + Update::ToSame(executed_block.block().clone(), celestia_height) } else if let Some(block) = self.blocks_pending_finalization.remove(&block_number) { debug!( block_number, @@ -546,7 +547,7 @@ impl Executor { &mut self, parent_hash: Bytes, block: ExecutableBlock, - ) -> eyre::Result { + ) -> eyre::Result { let ExecutableBlock { transactions, timestamp, @@ -565,8 +566,8 @@ impl Executor { .record_transactions_per_executed_block(n_transactions); info!( - executed_block.hash = %telemetry::display::base64(&executed_block.hash()), - executed_block.number = executed_block.number(), + executed_block.hash = %telemetry::display::base64(&executed_block.block().hash()), + executed_block.number = executed_block.block().number(), "executed block", ); diff --git a/crates/astria-conductor/tests/blackbox/helpers/macros.rs b/crates/astria-conductor/tests/blackbox/helpers/macros.rs index e5a675627..3ff8f80ac 100644 --- a/crates/astria-conductor/tests/blackbox/helpers/macros.rs +++ b/crates/astria-conductor/tests/blackbox/helpers/macros.rs @@ -13,6 +13,20 @@ macro_rules! block { }; } +#[macro_export] +macro_rules! execute_block_response { + (number: $number:expr,hash: $hash:expr,parent: $parent:expr) => { + ::astria_core::generated::execution::v1alpha2::ExecuteBlockResponse { + block: Some($crate::block!( + number: $number, + hash: $hash, + parent: $parent, + )), + included_transactions: vec![], + } + }; +} + #[macro_export] macro_rules! celestia_network_head { (height: $height:expr) => { @@ -244,10 +258,10 @@ macro_rules! mount_executed_block { "prevBlockHash": BASE64_STANDARD.encode($parent), "transactions": [{"sequencedData": BASE64_STANDARD.encode($crate::helpers::data())}], }), - $crate::block!( + $crate::execute_block_response!( number: $number, hash: $hash, - parent: $parent, + parent: $parent ) ) .await diff --git a/crates/astria-conductor/tests/blackbox/helpers/mock_grpc.rs b/crates/astria-conductor/tests/blackbox/helpers/mock_grpc.rs index bf0b5807e..64be2aa99 100644 --- a/crates/astria-conductor/tests/blackbox/helpers/mock_grpc.rs +++ b/crates/astria-conductor/tests/blackbox/helpers/mock_grpc.rs @@ -1,3 +1,4 @@ +#[allow(dead_code)] use std::{ net::SocketAddr, sync::Arc, @@ -14,6 +15,7 @@ use astria_core::generated::{ Block, CommitmentState, ExecuteBlockRequest, + ExecuteBlockResponse, GenesisInfo, GetBlockRequest, GetCommitmentStateRequest, @@ -148,7 +150,7 @@ define_and_impl_service!(impl ExecutionService for ExecutionServiceImpl { (get_block: GetBlockRequest => Block) (get_genesis_info: GetGenesisInfoRequest => GenesisInfo) (batch_get_blocks: BatchGetBlocksRequest => BatchGetBlocksResponse) - (execute_block: ExecuteBlockRequest => Block) + (execute_block: ExecuteBlockRequest => ExecuteBlockResponse) (get_commitment_state: GetCommitmentStateRequest => CommitmentState) (update_commitment_state: UpdateCommitmentStateRequest => CommitmentState) }); diff --git a/crates/astria-conductor/tests/blackbox/helpers/mod.rs b/crates/astria-conductor/tests/blackbox/helpers/mod.rs index 3dfdf28c4..e17561c8c 100644 --- a/crates/astria-conductor/tests/blackbox/helpers/mod.rs +++ b/crates/astria-conductor/tests/blackbox/helpers/mod.rs @@ -11,7 +11,6 @@ use astria_core::{ brotli::compress_bytes, generated::{ execution::v1alpha2::{ - Block, CommitmentState, GenesisInfo, }, @@ -36,6 +35,7 @@ use telemetry::metrics; #[macro_use] mod macros; mod mock_grpc; +use astria_core::generated::execution::v1alpha2::ExecuteBlockResponse; use astria_eyre; pub use mock_grpc::MockGrpc; use serde_json::json; @@ -380,7 +380,7 @@ impl TestConductor { &self, mock_name: Option<&str>, expected_pbjson: S, - response: Block, + response: ExecuteBlockResponse, ) -> astria_grpc_mock::MockGuard { use astria_grpc_mock::{ matcher::message_partial_pbjson, diff --git a/crates/astria-core/src/composer/mod.rs b/crates/astria-core/src/composer/mod.rs new file mode 100644 index 000000000..32a5a9d4f --- /dev/null +++ b/crates/astria-core/src/composer/mod.rs @@ -0,0 +1 @@ +pub mod v1alpha1; diff --git a/crates/astria-core/src/composer/v1alpha1/mod.rs b/crates/astria-core/src/composer/v1alpha1/mod.rs new file mode 100644 index 000000000..a8dc6a27f --- /dev/null +++ b/crates/astria-core/src/composer/v1alpha1/mod.rs @@ -0,0 +1,165 @@ +use bytes::Bytes; + +use crate::{ + sequencerblock::v1alpha1::block::{ + RollupData, + RollupDataError, + }, + Protobuf, +}; + +#[derive(Debug, thiserror::Error)] +#[error(transparent)] +pub struct BuilderBundleError(BuilderBundleErrorKind); + +impl BuilderBundleError { + fn invalid_rollup_data(error: RollupDataError) -> Self { + Self(BuilderBundleErrorKind::InvalidRollupData(error)) + } +} + +#[derive(Debug, thiserror::Error)] +enum BuilderBundleErrorKind { + #[error("{0} invalid rollup data")] + InvalidRollupData(#[source] RollupDataError), +} + +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize))] +#[cfg_attr( + feature = "serde", + serde(into = "crate::generated::composer::v1alpha1::BuilderBundle") +)] +pub struct BuilderBundle { + transactions: Vec, + parent_hash: Bytes, +} + +impl BuilderBundle { + pub fn transactions(&self) -> &[RollupData] { + self.transactions.as_slice() + } + + pub fn parent_hash(&self) -> Bytes { + self.parent_hash.clone() + } +} + +impl From for crate::generated::composer::v1alpha1::BuilderBundle { + fn from(value: BuilderBundle) -> Self { + value.to_raw() + } +} + +impl Protobuf for BuilderBundle { + type Error = BuilderBundleError; + type Raw = crate::generated::composer::v1alpha1::BuilderBundle; + + fn try_from_raw_ref(raw: &Self::Raw) -> Result { + let crate::generated::composer::v1alpha1::BuilderBundle { + transactions, + parent_hash, + } = raw; + + let mut rollup_data_transactions = vec![]; + for transaction in transactions { + let rollup_data = RollupData::try_from_raw_ref(transaction) + .map_err(BuilderBundleError::invalid_rollup_data)?; + rollup_data_transactions.push(rollup_data); + } + + Ok(BuilderBundle { + transactions: rollup_data_transactions, + parent_hash: parent_hash.clone(), + }) + } + + fn to_raw(&self) -> Self::Raw { + crate::generated::composer::v1alpha1::BuilderBundle { + transactions: self.transactions.iter().map(Protobuf::to_raw).collect(), + parent_hash: self.parent_hash.clone(), + } + } +} + +#[derive(Debug, thiserror::Error)] +#[error(transparent)] +pub struct BuilderBundlePacketError(BuilderBundlePacketErrorKind); + +#[derive(Debug, thiserror::Error)] +enum BuilderBundlePacketErrorKind { + #[error("{0} field not set")] + FieldNotSet(&'static str), + #[error("{0} invalid bundle")] + InvalidBundle(#[source] BuilderBundleError), +} + +impl BuilderBundlePacketError { + fn field_not_set(field: &'static str) -> Self { + Self(BuilderBundlePacketErrorKind::FieldNotSet(field)) + } + + fn invalid_bundle(error: BuilderBundleError) -> Self { + Self(BuilderBundlePacketErrorKind::InvalidBundle(error)) + } +} + +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize))] +#[cfg_attr( + feature = "serde", + serde(into = "crate::generated::composer::v1alpha1::BuilderBundlePacket") +)] +pub struct BuilderBundlePacket { + bundle: BuilderBundle, + signature: Bytes, +} + +impl BuilderBundlePacket { + pub fn bundle(&self) -> &BuilderBundle { + &self.bundle + } + + pub fn signature(&self) -> Bytes { + self.signature.clone() + } +} + +impl From for crate::generated::composer::v1alpha1::BuilderBundlePacket { + fn from(value: BuilderBundlePacket) -> Self { + value.to_raw() + } +} + +impl Protobuf for BuilderBundlePacket { + type Error = BuilderBundlePacketError; + type Raw = crate::generated::composer::v1alpha1::BuilderBundlePacket; + + fn try_from_raw_ref(raw: &Self::Raw) -> Result { + let crate::generated::composer::v1alpha1::BuilderBundlePacket { + bundle, + signature, + } = raw; + + let bundle = { + let Some(bundle) = bundle else { + return Err(BuilderBundlePacketError::field_not_set("bundle")); + }; + + BuilderBundle::try_from_raw_ref(bundle) + .map_err(BuilderBundlePacketError::invalid_bundle)? + }; + + Ok(BuilderBundlePacket { + bundle, + signature: signature.clone(), + }) + } + + fn to_raw(&self) -> Self::Raw { + crate::generated::composer::v1alpha1::BuilderBundlePacket { + bundle: Some(self.bundle.to_raw()), + signature: self.signature.clone(), + } + } +} diff --git a/crates/astria-core/src/execution/v1alpha2/mod.rs b/crates/astria-core/src/execution/v1alpha2/mod.rs index 7ceb0c17e..f3498189b 100644 --- a/crates/astria-core/src/execution/v1alpha2/mod.rs +++ b/crates/astria-core/src/execution/v1alpha2/mod.rs @@ -7,6 +7,10 @@ use crate::{ IncorrectRollupIdLength, RollupId, }, + sequencerblock::v1alpha1::block::{ + RollupData, + RollupDataError, + }, Protobuf, }; @@ -233,6 +237,105 @@ impl Protobuf for Block { } } +#[derive(Debug, thiserror::Error)] +#[error(transparent)] +pub struct ExecuteBlockResponseError(ExecuteBlockResponseErrorKind); + +impl ExecuteBlockResponseError { + fn field_not_set(field: &'static str) -> Self { + Self(ExecuteBlockResponseErrorKind::FieldNotSet(field)) + } + + fn invalid_rollup_data(source: RollupDataError) -> Self { + Self(ExecuteBlockResponseErrorKind::InvalidRollupData(source)) + } +} + +#[derive(Debug, thiserror::Error)] +enum ExecuteBlockResponseErrorKind { + #[error("{0} field not set")] + FieldNotSet(&'static str), + #[error("{0} invalid rollup data")] + InvalidRollupData(#[source] RollupDataError), +} + +#[derive(Clone, Debug, PartialEq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize))] +#[cfg_attr( + feature = "serde", + serde(into = "crate::generated::execution::v1alpha2::ExecuteBlockResponse") +)] +pub struct ExecuteBlockResponse { + block: Block, + included_transactions: Vec, +} + +impl ExecuteBlockResponse { + #[must_use] + pub fn block(&self) -> &Block { + &self.block + } + + #[must_use] + pub fn included_transactions(&self) -> &[RollupData] { + &self.included_transactions + } +} + +impl From for raw::ExecuteBlockResponse { + fn from(value: ExecuteBlockResponse) -> Self { + value.to_raw() + } +} + +impl Protobuf for ExecuteBlockResponse { + type Error = ExecuteBlockResponseError; + type Raw = raw::ExecuteBlockResponse; + + fn try_from_raw_ref(raw: &Self::Raw) -> Result { + let raw::ExecuteBlockResponse { + block, + included_transactions, + } = raw; + let block = { + let Some(block) = block else { + return Err(Self::Error::field_not_set(".block")); + }; + if let Ok(parsed_block) = Block::try_from_raw_ref(block) { + Ok(parsed_block) + } else { + return Err(Self::Error::field_not_set(".block")); + } + }?; + + let included_transactions = included_transactions + .iter() + .map(RollupData::try_from_raw_ref) + .collect::, _>>() + .map_err(Self::Error::invalid_rollup_data)?; + + Ok(Self { + block, + included_transactions, + }) + } + + fn to_raw(&self) -> Self::Raw { + let Self { + block, + included_transactions, + } = self; + let block = block.to_raw(); + + let included_transactions = included_transactions.iter().map(Protobuf::to_raw).collect(); + + Self::Raw { + block: Some(block), + included_transactions, + } + } +} + #[derive(Debug, thiserror::Error)] #[error(transparent)] pub struct CommitmentStateError(CommitmentStateErrorKind); diff --git a/crates/astria-core/src/generated/astria.composer.v1alpha1.rs b/crates/astria-core/src/generated/astria.composer.v1alpha1.rs index 3a50686cc..0d59d033f 100644 --- a/crates/astria-core/src/generated/astria.composer.v1alpha1.rs +++ b/crates/astria-core/src/generated/astria.composer.v1alpha1.rs @@ -1,3 +1,6 @@ + +use crate::generated::v1alpha1; + /// SubmitRollupTransactionRequest contains a rollup transaction to be submitted to the Shared Sequencer Network /// via the Composer #[allow(clippy::derive_partial_eq_without_eq)] @@ -29,6 +32,58 @@ impl ::prost::Name for SubmitRollupTransactionResponse { ::prost::alloc::format!("astria.composer.v1alpha1.{}", Self::NAME) } } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SendOptimisticBlockRequest { + #[prost(bytes = "bytes", tag = "1")] + pub block_hash: ::prost::bytes::Bytes, + #[prost(message, repeated, tag = "2")] + pub seq_action: ::prost::alloc::vec::Vec< + v1alpha1::SequenceAction, + >, + #[prost(message, optional, tag = "3")] + pub time: ::core::option::Option<::pbjson_types::Timestamp>, +} +impl ::prost::Name for SendOptimisticBlockRequest { + const NAME: &'static str = "SendOptimisticBlockRequest"; + const PACKAGE: &'static str = "astria.composer.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.composer.v1alpha1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SendOptimisticBlockResponse {} +impl ::prost::Name for SendOptimisticBlockResponse { + const NAME: &'static str = "SendOptimisticBlockResponse"; + const PACKAGE: &'static str = "astria.composer.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.composer.v1alpha1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SendFinalizedHashRequest { + #[prost(bytes = "bytes", tag = "1")] + pub block_hash: ::prost::bytes::Bytes, +} +impl ::prost::Name for SendFinalizedHashRequest { + const NAME: &'static str = "SendFinalizedHashRequest"; + const PACKAGE: &'static str = "astria.composer.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.composer.v1alpha1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SendFinalizedHashResponse {} +impl ::prost::Name for SendFinalizedHashResponse { + const NAME: &'static str = "SendFinalizedHashResponse"; + const PACKAGE: &'static str = "astria.composer.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.composer.v1alpha1.{}", Self::NAME) + } +} /// Generated client implementations. #[cfg(feature = "client")] pub mod grpc_collector_service_client { @@ -150,6 +205,154 @@ pub mod grpc_collector_service_client { } } } +/// Generated client implementations. +#[cfg(feature = "client")] +pub mod sequencer_hooks_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct SequencerHooksServiceClient { + inner: tonic::client::Grpc, + } + impl SequencerHooksServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl SequencerHooksServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> SequencerHooksServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + SequencerHooksServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn send_optimistic_block( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/astria.composer.v1alpha1.SequencerHooksService/SendOptimisticBlock", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "astria.composer.v1alpha1.SequencerHooksService", + "SendOptimisticBlock", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn send_finalized_hash( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/astria.composer.v1alpha1.SequencerHooksService/SendFinalizedHash", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "astria.composer.v1alpha1.SequencerHooksService", + "SendFinalizedHash", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} /// Generated server implementations. #[cfg(feature = "server")] pub mod grpc_collector_service_server { @@ -345,3 +548,288 @@ pub mod grpc_collector_service_server { const NAME: &'static str = "astria.composer.v1alpha1.GrpcCollectorService"; } } +/// Generated server implementations. +#[cfg(feature = "server")] +pub mod sequencer_hooks_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with SequencerHooksServiceServer. + #[async_trait] + pub trait SequencerHooksService: Send + Sync + 'static { + async fn send_optimistic_block( + self: std::sync::Arc, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn send_finalized_hash( + self: std::sync::Arc, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct SequencerHooksServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl SequencerHooksServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> + for SequencerHooksServiceServer + where + T: SequencerHooksService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/astria.composer.v1alpha1.SequencerHooksService/SendOptimisticBlock" => { + #[allow(non_camel_case_types)] + struct SendOptimisticBlockSvc(pub Arc); + impl< + T: SequencerHooksService, + > tonic::server::UnaryService + for SendOptimisticBlockSvc { + type Response = super::SendOptimisticBlockResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::send_optimistic_block( + inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = SendOptimisticBlockSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/astria.composer.v1alpha1.SequencerHooksService/SendFinalizedHash" => { + #[allow(non_camel_case_types)] + struct SendFinalizedHashSvc(pub Arc); + impl< + T: SequencerHooksService, + > tonic::server::UnaryService + for SendFinalizedHashSvc { + type Response = super::SendFinalizedHashResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::send_finalized_hash( + inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = SendFinalizedHashSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for SequencerHooksServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService + for SequencerHooksServiceServer { + const NAME: &'static str = "astria.composer.v1alpha1.SequencerHooksService"; + } +} +/// BuilderBundle contains a bundle of RollupData transactions which are created by a trusted builder +/// It contains the transactions and the parent hash on top of which the bundles were simulated. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BuilderBundle { + /// transactions in the bundle + #[prost(message, repeated, tag = "1")] + pub transactions: ::prost::alloc::vec::Vec< + super::super::sequencerblock::v1alpha1::RollupData, + >, + /// parent hash of the bundle + #[prost(bytes = "bytes", tag = "2")] + pub parent_hash: ::prost::bytes::Bytes, +} +impl ::prost::Name for BuilderBundle { + const NAME: &'static str = "BuilderBundle"; + const PACKAGE: &'static str = "astria.composer.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.composer.v1alpha1.{}", Self::NAME) + } +} +/// BuilderBundlePacket is a message that represents a bundle of RollupData transactions and the signature +/// of the BuilderBundle by the trusted builder. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BuilderBundlePacket { + /// the bundle of transactions + #[prost(message, optional, tag = "1")] + pub bundle: ::core::option::Option, + /// the signature of the bundle signed by the trusted builder + #[prost(bytes = "bytes", tag = "3")] + pub signature: ::prost::bytes::Bytes, +} +impl ::prost::Name for BuilderBundlePacket { + const NAME: &'static str = "BuilderBundlePacket"; + const PACKAGE: &'static str = "astria.composer.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.composer.v1alpha1.{}", Self::NAME) + } +} diff --git a/crates/astria-core/src/generated/astria.composer.v1alpha1.serde.rs b/crates/astria-core/src/generated/astria.composer.v1alpha1.serde.rs index 91e8502f9..f9ab1af71 100644 --- a/crates/astria-core/src/generated/astria.composer.v1alpha1.serde.rs +++ b/crates/astria-core/src/generated/astria.composer.v1alpha1.serde.rs @@ -1,3 +1,593 @@ +impl serde::Serialize for BuilderBundle { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if !self.transactions.is_empty() { + len += 1; + } + if !self.parent_hash.is_empty() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("astria.composer.v1alpha1.BuilderBundle", len)?; + if !self.transactions.is_empty() { + struct_ser.serialize_field("transactions", &self.transactions)?; + } + if !self.parent_hash.is_empty() { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("parentHash", pbjson::private::base64::encode(&self.parent_hash).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for BuilderBundle { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "transactions", + "parent_hash", + "parentHash", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Transactions, + ParentHash, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "transactions" => Ok(GeneratedField::Transactions), + "parentHash" | "parent_hash" => Ok(GeneratedField::ParentHash), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = BuilderBundle; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.composer.v1alpha1.BuilderBundle") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut transactions__ = None; + let mut parent_hash__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Transactions => { + if transactions__.is_some() { + return Err(serde::de::Error::duplicate_field("transactions")); + } + transactions__ = Some(map_.next_value()?); + } + GeneratedField::ParentHash => { + if parent_hash__.is_some() { + return Err(serde::de::Error::duplicate_field("parentHash")); + } + parent_hash__ = + Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) + ; + } + } + } + Ok(BuilderBundle { + transactions: transactions__.unwrap_or_default(), + parent_hash: parent_hash__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("astria.composer.v1alpha1.BuilderBundle", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for BuilderBundlePacket { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.bundle.is_some() { + len += 1; + } + if !self.signature.is_empty() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("astria.composer.v1alpha1.BuilderBundlePacket", len)?; + if let Some(v) = self.bundle.as_ref() { + struct_ser.serialize_field("bundle", v)?; + } + if !self.signature.is_empty() { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("signature", pbjson::private::base64::encode(&self.signature).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for BuilderBundlePacket { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "bundle", + "signature", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Bundle, + Signature, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "bundle" => Ok(GeneratedField::Bundle), + "signature" => Ok(GeneratedField::Signature), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = BuilderBundlePacket; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.composer.v1alpha1.BuilderBundlePacket") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut bundle__ = None; + let mut signature__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Bundle => { + if bundle__.is_some() { + return Err(serde::de::Error::duplicate_field("bundle")); + } + bundle__ = map_.next_value()?; + } + GeneratedField::Signature => { + if signature__.is_some() { + return Err(serde::de::Error::duplicate_field("signature")); + } + signature__ = + Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) + ; + } + } + } + Ok(BuilderBundlePacket { + bundle: bundle__, + signature: signature__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("astria.composer.v1alpha1.BuilderBundlePacket", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for SendFinalizedHashRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if !self.block_hash.is_empty() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("astria.composer.v1alpha1.SendFinalizedHashRequest", len)?; + if !self.block_hash.is_empty() { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("blockHash", pbjson::private::base64::encode(&self.block_hash).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for SendFinalizedHashRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "block_hash", + "blockHash", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + BlockHash, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "blockHash" | "block_hash" => Ok(GeneratedField::BlockHash), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = SendFinalizedHashRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.composer.v1alpha1.SendFinalizedHashRequest") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut block_hash__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::BlockHash => { + if block_hash__.is_some() { + return Err(serde::de::Error::duplicate_field("blockHash")); + } + block_hash__ = + Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) + ; + } + } + } + Ok(SendFinalizedHashRequest { + block_hash: block_hash__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("astria.composer.v1alpha1.SendFinalizedHashRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for SendFinalizedHashResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let len = 0; + let struct_ser = serializer.serialize_struct("astria.composer.v1alpha1.SendFinalizedHashResponse", len)?; + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for SendFinalizedHashResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + Err(serde::de::Error::unknown_field(value, FIELDS)) + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = SendFinalizedHashResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.composer.v1alpha1.SendFinalizedHashResponse") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + while map_.next_key::()?.is_some() { + let _ = map_.next_value::()?; + } + Ok(SendFinalizedHashResponse { + }) + } + } + deserializer.deserialize_struct("astria.composer.v1alpha1.SendFinalizedHashResponse", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for SendOptimisticBlockRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if !self.block_hash.is_empty() { + len += 1; + } + if !self.seq_action.is_empty() { + len += 1; + } + if self.time.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("astria.composer.v1alpha1.SendOptimisticBlockRequest", len)?; + if !self.block_hash.is_empty() { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("blockHash", pbjson::private::base64::encode(&self.block_hash).as_str())?; + } + if !self.seq_action.is_empty() { + struct_ser.serialize_field("seqAction", &self.seq_action)?; + } + if let Some(v) = self.time.as_ref() { + struct_ser.serialize_field("time", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for SendOptimisticBlockRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "block_hash", + "blockHash", + "seq_action", + "seqAction", + "time", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + BlockHash, + SeqAction, + Time, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "blockHash" | "block_hash" => Ok(GeneratedField::BlockHash), + "seqAction" | "seq_action" => Ok(GeneratedField::SeqAction), + "time" => Ok(GeneratedField::Time), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = SendOptimisticBlockRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.composer.v1alpha1.SendOptimisticBlockRequest") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut block_hash__ = None; + let mut seq_action__ = None; + let mut time__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::BlockHash => { + if block_hash__.is_some() { + return Err(serde::de::Error::duplicate_field("blockHash")); + } + block_hash__ = + Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) + ; + } + GeneratedField::SeqAction => { + if seq_action__.is_some() { + return Err(serde::de::Error::duplicate_field("seqAction")); + } + seq_action__ = Some(map_.next_value()?); + } + GeneratedField::Time => { + if time__.is_some() { + return Err(serde::de::Error::duplicate_field("time")); + } + time__ = map_.next_value()?; + } + } + } + Ok(SendOptimisticBlockRequest { + block_hash: block_hash__.unwrap_or_default(), + seq_action: seq_action__.unwrap_or_default(), + time: time__, + }) + } + } + deserializer.deserialize_struct("astria.composer.v1alpha1.SendOptimisticBlockRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for SendOptimisticBlockResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let len = 0; + let struct_ser = serializer.serialize_struct("astria.composer.v1alpha1.SendOptimisticBlockResponse", len)?; + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for SendOptimisticBlockResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + Err(serde::de::Error::unknown_field(value, FIELDS)) + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = SendOptimisticBlockResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.composer.v1alpha1.SendOptimisticBlockResponse") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + while map_.next_key::()?.is_some() { + let _ = map_.next_value::()?; + } + Ok(SendOptimisticBlockResponse { + }) + } + } + deserializer.deserialize_struct("astria.composer.v1alpha1.SendOptimisticBlockResponse", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for SubmitRollupTransactionRequest { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result diff --git a/crates/astria-core/src/generated/astria.execution.v1alpha2.rs b/crates/astria-core/src/generated/astria.execution.v1alpha2.rs index f050cdcd2..4b61c29ea 100644 --- a/crates/astria-core/src/generated/astria.execution.v1alpha2.rs +++ b/crates/astria-core/src/generated/astria.execution.v1alpha2.rs @@ -144,6 +144,9 @@ pub struct ExecuteBlockRequest { /// Timestamp to be used for new block. #[prost(message, optional, tag = "3")] pub timestamp: ::core::option::Option<::pbjson_types::Timestamp>, + /// If true, the block will be created but not persisted. + #[prost(bool, tag = "4")] + pub simulate_only: bool, } impl ::prost::Name for ExecuteBlockRequest { const NAME: &'static str = "ExecuteBlockRequest"; @@ -152,6 +155,27 @@ impl ::prost::Name for ExecuteBlockRequest { ::prost::alloc::format!("astria.execution.v1alpha2.{}", Self::NAME) } } +/// ExecuteBlockResponse contains the new block and the transactions that were +/// included in the block. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecuteBlockResponse { + /// The new block that was created. + #[prost(message, optional, tag = "1")] + pub block: ::core::option::Option, + /// The transactions that were included in the block. + #[prost(message, repeated, tag = "2")] + pub included_transactions: ::prost::alloc::vec::Vec< + super::super::sequencerblock::v1alpha1::RollupData, + >, +} +impl ::prost::Name for ExecuteBlockResponse { + const NAME: &'static str = "ExecuteBlockResponse"; + const PACKAGE: &'static str = "astria.execution.v1alpha2"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("astria.execution.v1alpha2.{}", Self::NAME) + } +} /// The CommitmentState holds the block at each stage of sequencer commitment /// level /// @@ -389,7 +413,10 @@ pub mod execution_service_client { pub async fn execute_block( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { self.inner .ready() .await @@ -510,7 +537,10 @@ pub mod execution_service_server { async fn execute_block( self: std::sync::Arc, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// GetCommitmentState fetches the current CommitmentState of the chain. async fn get_commitment_state( self: std::sync::Arc, @@ -754,7 +784,7 @@ pub mod execution_service_server { T: ExecutionService, > tonic::server::UnaryService for ExecuteBlockSvc { - type Response = super::Block; + type Response = super::ExecuteBlockResponse; type Future = BoxFuture< tonic::Response, tonic::Status, diff --git a/crates/astria-core/src/generated/astria.execution.v1alpha2.serde.rs b/crates/astria-core/src/generated/astria.execution.v1alpha2.serde.rs index 2fbadc3e3..c3b2eac76 100644 --- a/crates/astria-core/src/generated/astria.execution.v1alpha2.serde.rs +++ b/crates/astria-core/src/generated/astria.execution.v1alpha2.serde.rs @@ -587,6 +587,9 @@ impl serde::Serialize for ExecuteBlockRequest { if self.timestamp.is_some() { len += 1; } + if self.simulate_only { + len += 1; + } let mut struct_ser = serializer.serialize_struct("astria.execution.v1alpha2.ExecuteBlockRequest", len)?; if !self.prev_block_hash.is_empty() { #[allow(clippy::needless_borrow)] @@ -598,6 +601,9 @@ impl serde::Serialize for ExecuteBlockRequest { if let Some(v) = self.timestamp.as_ref() { struct_ser.serialize_field("timestamp", v)?; } + if self.simulate_only { + struct_ser.serialize_field("simulateOnly", &self.simulate_only)?; + } struct_ser.end() } } @@ -612,6 +618,8 @@ impl<'de> serde::Deserialize<'de> for ExecuteBlockRequest { "prevBlockHash", "transactions", "timestamp", + "simulate_only", + "simulateOnly", ]; #[allow(clippy::enum_variant_names)] @@ -619,6 +627,7 @@ impl<'de> serde::Deserialize<'de> for ExecuteBlockRequest { PrevBlockHash, Transactions, Timestamp, + SimulateOnly, } impl<'de> serde::Deserialize<'de> for GeneratedField { fn deserialize(deserializer: D) -> std::result::Result @@ -643,6 +652,7 @@ impl<'de> serde::Deserialize<'de> for ExecuteBlockRequest { "prevBlockHash" | "prev_block_hash" => Ok(GeneratedField::PrevBlockHash), "transactions" => Ok(GeneratedField::Transactions), "timestamp" => Ok(GeneratedField::Timestamp), + "simulateOnly" | "simulate_only" => Ok(GeneratedField::SimulateOnly), _ => Err(serde::de::Error::unknown_field(value, FIELDS)), } } @@ -665,6 +675,7 @@ impl<'de> serde::Deserialize<'de> for ExecuteBlockRequest { let mut prev_block_hash__ = None; let mut transactions__ = None; let mut timestamp__ = None; + let mut simulate_only__ = None; while let Some(k) = map_.next_key()? { match k { GeneratedField::PrevBlockHash => { @@ -687,18 +698,134 @@ impl<'de> serde::Deserialize<'de> for ExecuteBlockRequest { } timestamp__ = map_.next_value()?; } + GeneratedField::SimulateOnly => { + if simulate_only__.is_some() { + return Err(serde::de::Error::duplicate_field("simulateOnly")); + } + simulate_only__ = Some(map_.next_value()?); + } } } Ok(ExecuteBlockRequest { prev_block_hash: prev_block_hash__.unwrap_or_default(), transactions: transactions__.unwrap_or_default(), timestamp: timestamp__, + simulate_only: simulate_only__.unwrap_or_default(), }) } } deserializer.deserialize_struct("astria.execution.v1alpha2.ExecuteBlockRequest", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for ExecuteBlockResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.block.is_some() { + len += 1; + } + if !self.included_transactions.is_empty() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("astria.execution.v1alpha2.ExecuteBlockResponse", len)?; + if let Some(v) = self.block.as_ref() { + struct_ser.serialize_field("block", v)?; + } + if !self.included_transactions.is_empty() { + struct_ser.serialize_field("includedTransactions", &self.included_transactions)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for ExecuteBlockResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "block", + "included_transactions", + "includedTransactions", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Block, + IncludedTransactions, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "block" => Ok(GeneratedField::Block), + "includedTransactions" | "included_transactions" => Ok(GeneratedField::IncludedTransactions), + _ => Err(serde::de::Error::unknown_field(value, FIELDS)), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = ExecuteBlockResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct astria.execution.v1alpha2.ExecuteBlockResponse") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut block__ = None; + let mut included_transactions__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Block => { + if block__.is_some() { + return Err(serde::de::Error::duplicate_field("block")); + } + block__ = map_.next_value()?; + } + GeneratedField::IncludedTransactions => { + if included_transactions__.is_some() { + return Err(serde::de::Error::duplicate_field("includedTransactions")); + } + included_transactions__ = Some(map_.next_value()?); + } + } + } + Ok(ExecuteBlockResponse { + block: block__, + included_transactions: included_transactions__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("astria.execution.v1alpha2.ExecuteBlockResponse", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for GenesisInfo { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result diff --git a/crates/astria-core/src/generated/mod.rs b/crates/astria-core/src/generated/mod.rs index 4b78c07fe..350c98fa2 100644 --- a/crates/astria-core/src/generated/mod.rs +++ b/crates/astria-core/src/generated/mod.rs @@ -11,6 +11,8 @@ //! [`buf`]: https://buf.build //! [`tools/protobuf-compiler`]: ../../../../tools/protobuf-compiler +use crate::generated::protocol::transactions::v1alpha1; + #[path = ""] pub mod astria_vendored { #[path = ""] @@ -136,8 +138,16 @@ pub mod sequencerblock { #[path = ""] pub mod composer { - #[path = "astria.composer.v1alpha1.rs"] - pub mod v1alpha1; + + pub mod v1alpha1 { + include!("astria.composer.v1alpha1.rs"); + + #[cfg(feature = "serde")] + mod _serde_impl { + use super::*; + include!("astria.composer.v1alpha1.serde.rs"); + } + } } #[path = ""] diff --git a/crates/astria-core/src/lib.rs b/crates/astria-core/src/lib.rs index 4fecf8ddd..d411d2b00 100644 --- a/crates/astria-core/src/lib.rs +++ b/crates/astria-core/src/lib.rs @@ -18,6 +18,7 @@ pub mod sequencerblock; pub mod brotli; #[cfg(feature = "celestia")] pub mod celestia; +pub mod composer; #[cfg(feature = "serde")] pub(crate) mod serde; diff --git a/crates/astria-core/src/protocol/mod.rs b/crates/astria-core/src/protocol/mod.rs index e285ce4d6..a4a6b7f31 100644 --- a/crates/astria-core/src/protocol/mod.rs +++ b/crates/astria-core/src/protocol/mod.rs @@ -2,7 +2,10 @@ use bytes::Bytes; use indexmap::IndexMap; use transaction::v1alpha1::SignedTransaction; -use crate::primitive::v1::RollupId; +use crate::{ + primitive::v1::RollupId, + Protobuf, +}; pub mod abci; pub mod account; diff --git a/crates/astria-core/src/protocol/test_utils.rs b/crates/astria-core/src/protocol/test_utils.rs index 1f399a422..5f745ccb9 100644 --- a/crates/astria-core/src/protocol/test_utils.rs +++ b/crates/astria-core/src/protocol/test_utils.rs @@ -23,6 +23,7 @@ use crate::{ block::Deposit, SequencerBlock, }, + Protobuf, }; #[derive(Default)] @@ -131,7 +132,7 @@ impl ConfigureSequencerBlock { .or_default() .extend(deposit.into_iter().map(|deposit| { RollupData::Deposit(Box::new(deposit)) - .into_raw() + .to_raw() .encode_to_vec() .into() })); diff --git a/crates/astria-core/src/sequencerblock/v1alpha1/block.rs b/crates/astria-core/src/sequencerblock/v1alpha1/block.rs index a6304c8ba..d3f8154cd 100644 --- a/crates/astria-core/src/sequencerblock/v1alpha1/block.rs +++ b/crates/astria-core/src/sequencerblock/v1alpha1/block.rs @@ -35,7 +35,7 @@ use crate::{ SignedTransaction, SignedTransactionError, }, - Protobuf as _, + Protobuf, }; #[derive(Debug, thiserror::Error)] @@ -1454,18 +1454,15 @@ pub enum RollupData { Deposit(Box), } -impl RollupData { - #[must_use] - pub fn into_raw(self) -> raw::RollupData { - match self { - Self::SequencedData(data) => raw::RollupData { - value: Some(raw::rollup_data::Value::SequencedData(data)), - }, - Self::Deposit(deposit) => raw::RollupData { - value: Some(raw::rollup_data::Value::Deposit(deposit.into_raw())), - }, - } +impl From for raw::RollupData { + fn from(value: RollupData) -> Self { + value.to_raw() } +} + +impl Protobuf for RollupData { + type Error = RollupDataError; + type Raw = raw::RollupData; /// Attempts to transform the `RollupData` from its raw representation. /// @@ -1473,19 +1470,34 @@ impl RollupData { /// /// - if the `data` field is not set /// - if the variant is `Deposit` but a `Deposit` cannot be constructed from the raw proto - pub fn try_from_raw(raw: raw::RollupData) -> Result { + fn try_from_raw_ref(raw: &raw::RollupData) -> Result { let raw::RollupData { value, } = raw; match value { - Some(raw::rollup_data::Value::SequencedData(data)) => Ok(Self::SequencedData(data)), - Some(raw::rollup_data::Value::Deposit(deposit)) => Deposit::try_from_raw(deposit) - .map(Box::new) - .map(Self::Deposit) - .map_err(RollupDataError::deposit), + Some(raw::rollup_data::Value::SequencedData(data)) => { + Ok(Self::SequencedData(data.clone())) + } + Some(raw::rollup_data::Value::Deposit(deposit)) => { + Deposit::try_from_raw(deposit.clone()) + .map(Box::new) + .map(Self::Deposit) + .map_err(RollupDataError::deposit) + } None => Err(RollupDataError::field_not_set("data")), } } + + fn to_raw(&self) -> Self::Raw { + match self { + Self::SequencedData(data) => raw::RollupData { + value: Some(raw::rollup_data::Value::SequencedData(data.clone())), + }, + Self::Deposit(deposit) => raw::RollupData { + value: Some(raw::rollup_data::Value::Deposit(deposit.clone().into_raw())), + }, + } + } } #[derive(Debug, thiserror::Error)] diff --git a/crates/astria-sequencer/Cargo.toml b/crates/astria-sequencer/Cargo.toml index 5efcf0af6..fdb93b357 100644 --- a/crates/astria-sequencer/Cargo.toml +++ b/crates/astria-sequencer/Cargo.toml @@ -15,13 +15,18 @@ name = "astria-sequencer" benchmark = ["divan"] [dependencies] -astria-core = { path = "../astria-core", features = ["server", "serde"] } +astria-core = { path = "../astria-core", features = [ + "client", + "server", + "serde", +] } astria-build-info = { path = "../astria-build-info", features = ["runtime"] } config = { package = "astria-config", path = "../astria-config" } merkle = { package = "astria-merkle", path = "../astria-merkle" } telemetry = { package = "astria-telemetry", path = "../astria-telemetry", features = [ "display", ] } +pbjson-types = { workspace = true } anyhow = "1" borsh = { version = "1", features = ["derive"] } @@ -59,6 +64,7 @@ tracing = { workspace = true } [dev-dependencies] astria-core = { path = "../astria-core", features = [ + "client", "server", "serde", "test-utils", diff --git a/crates/astria-sequencer/local.env.example b/crates/astria-sequencer/local.env.example index 05cb7937a..87d928237 100644 --- a/crates/astria-sequencer/local.env.example +++ b/crates/astria-sequencer/local.env.example @@ -34,6 +34,13 @@ ASTRIA_SEQUENCER_METRICS_HTTP_LISTENER_ADDR="127.0.0.1:9000" # `ASTRIA_SEQUENCER_FORCE_STDOUT` is set to `true`. ASTRIA_SEQUENCER_PRETTY_PRINT=false +# Address of the composer node +ASTRIA_SEQUENCER_COMPOSER_HOOK="" + +# whether the composer hook is enabled or not. +# TODO - rename these +ASTRIA_SEQUENCER_COMPOSER_HOOK_ENABLED=false + # If set to any non-empty value removes ANSI escape characters from the pretty # printed output. Note that this does nothing unless `ASTRIA_SEQUENCER_PRETTY_PRINT` # is set to `true`. diff --git a/crates/astria-sequencer/src/app/mod.rs b/crates/astria-sequencer/src/app/mod.rs index 4dca9057c..d34990d2b 100644 --- a/crates/astria-sequencer/src/app/mod.rs +++ b/crates/astria-sequencer/src/app/mod.rs @@ -35,7 +35,9 @@ use astria_core::{ }, }, sequencerblock::v1alpha1::block::SequencerBlock, + Protobuf, }; +use bytes::Bytes; use cnidarium::{ ArcStateDeltaExt, Snapshot, @@ -63,6 +65,7 @@ use tendermint::{ }; use tracing::{ debug, + error, info, instrument, }; @@ -93,6 +96,7 @@ use crate::{ StateReadExt as _, StateWriteExt as _, }, + client::SequencerHooksClient, component::Component as _, ibc::component::IbcComponent, mempool::{ @@ -170,6 +174,8 @@ pub(crate) struct App { #[allow(clippy::struct_field_names)] app_hash: AppHash, + sequencer_hooks_client: SequencerHooksClient, + metrics: &'static Metrics, } @@ -177,6 +183,8 @@ impl App { pub(crate) async fn new( snapshot: Snapshot, mempool: Mempool, + composer_uri: String, + composer_hook_enabled: bool, metrics: &'static Metrics, ) -> anyhow::Result { debug!("initializing App instance"); @@ -194,6 +202,10 @@ impl App { // there should be no unexpected copies elsewhere. let state = Arc::new(StateDelta::new(snapshot)); + let sequencer_hooks_client = + SequencerHooksClient::connect_lazy(&composer_uri, composer_hook_enabled) + .context("failed to connect to sequencer hooks service")?; + Ok(Self { state, mempool, @@ -203,6 +215,7 @@ impl App { write_batch: None, app_hash, metrics, + sequencer_hooks_client, }) } @@ -348,6 +361,7 @@ impl App { process_proposal: abci::request::ProcessProposal, storage: Storage, ) -> anyhow::Result<()> { + info!("BHARATH: Processing proposal!"); // if we proposed this block (ie. prepare_proposal was called directly before this), then // we skip execution for this `process_proposal` call. // @@ -453,7 +467,33 @@ impl App { "chain IDs commitment does not match expected", ); - self.executed_proposal_hash = process_proposal.hash; + let block_hash = process_proposal.hash; + self.executed_proposal_hash = block_hash; + + // get a list of sequence actions from the signed_txs + let sequence_actions = signed_txs + .iter() + .flat_map(|tx| tx.unsigned_transaction().actions.iter()) + .filter_map(Action::as_sequence) + .map(|seq| seq.to_raw().clone()) + .collect::>(); + let time = process_proposal.time; + + info!("BHARATH: Sending optimistic block to composer!"); + if let Err(e) = self + .sequencer_hooks_client + .send_optimistic_block( + Bytes::from(block_hash.as_bytes().to_vec()), + sequence_actions, + time, + ) + .await + .context("failed to send optimistic block to composer") + { + error!(error = %e, "failed to send optimistic block to composer"); + } else { + info!("Sent optimistic block to composer!"); + } Ok(()) } @@ -905,6 +945,21 @@ impl App { .await .context("failed to prepare commit")?; + // update the priority of any txs in the mempool based on the updated app state + update_mempool_after_finalization(&mut self.mempool, self.state.as_ref()).await; + + if let Err(e) = self + .sequencer_hooks_client + .send_finalized_block_hash(Bytes::from(block_hash.to_vec())) + .await + .context("failed to send finalized block hash to composer") + { + // do not fail the entire method if this fails + error!(error = %e, "failed to send finalized block hash to composer"); + } else { + info!("Sent finalized block hash to composer!"); + } + Ok(abci::response::FinalizeBlock { events: end_block.events, validator_updates: end_block.validator_updates, diff --git a/crates/astria-sequencer/src/app/test_utils.rs b/crates/astria-sequencer/src/app/test_utils.rs index 3acd71ed3..60dd8773d 100644 --- a/crates/astria-sequencer/src/app/test_utils.rs +++ b/crates/astria-sequencer/src/app/test_utils.rs @@ -140,7 +140,16 @@ pub(crate) async fn initialize_app_with_storage( let snapshot = storage.latest_snapshot(); let mempool = Mempool::new(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); - let mut app = App::new(snapshot, mempool, metrics).await.unwrap(); + // TODO - temp addr + let mut app = App::new( + snapshot, + mempool, + "127.0.0.1:232".to_string(), + false, + metrics, + ) + .await + .unwrap(); let genesis_state = genesis_state.unwrap_or_else(self::genesis_state); diff --git a/crates/astria-sequencer/src/app/tests_app.rs b/crates/astria-sequencer/src/app/tests_app.rs index 3974aa67e..4a8c6f529 100644 --- a/crates/astria-sequencer/src/app/tests_app.rs +++ b/crates/astria-sequencer/src/app/tests_app.rs @@ -18,6 +18,7 @@ use astria_core::{ }, }, sequencerblock::v1alpha1::block::Deposit, + Protobuf, }; use cnidarium::StateDelta; use prost::{ diff --git a/crates/astria-sequencer/src/client.rs b/crates/astria-sequencer/src/client.rs new file mode 100644 index 000000000..81b7098d3 --- /dev/null +++ b/crates/astria-sequencer/src/client.rs @@ -0,0 +1,112 @@ +use anyhow::Context; +use astria_core::generated::{ + composer::v1alpha1::{ + sequencer_hooks_service_client::SequencerHooksServiceClient, + SendFinalizedHashRequest, + SendFinalizedHashResponse, + SendOptimisticBlockRequest, + SendOptimisticBlockResponse, + }, + protocol::transactions::v1alpha1::SequenceAction, +}; +use bytes::Bytes; +use tendermint::Time; +use tendermint_proto::google::protobuf::Timestamp; +use tonic::transport::{ + Channel, + Endpoint, + Uri, +}; +use tracing::{ + info, + instrument, +}; + +/// A newtype wrapper around [`SequencerHooksServiceClient`] to work with +/// idiomatic types. +#[derive(Clone)] +pub(crate) struct SequencerHooksClient { + uri: Uri, + enabled: bool, + inner: SequencerHooksServiceClient, +} + +impl SequencerHooksClient { + pub(crate) fn connect_lazy(uri: &str, enabled: bool) -> anyhow::Result { + let uri: Uri = uri + .parse() + .context("failed to parse provided string as uri")?; + let endpoint = Endpoint::from(uri.clone()).connect_lazy(); + let inner = SequencerHooksServiceClient::new(endpoint); + Ok(Self { + uri, + enabled, + inner, + }) + } + + // pub(crate) fn uri(&self) -> String { + // self.uri.to_string() + // } + + #[instrument(skip_all, fields(uri = % self.uri), err)] + pub(super) async fn send_optimistic_block( + &self, + block_hash: Bytes, + seq_actions: Vec, + time: Time, + ) -> anyhow::Result { + if !self.enabled { + info!("BHARATH: optimistic block sending is disabled"); + return Ok(SendOptimisticBlockResponse::default()); + } + info!( + "BHARATH: sending optimistic block hash to {:?}", + self.uri.to_string() + ); + + let Timestamp { + seconds, + nanos, + } = time.into(); + + info!("BHARATH: seconds: {:?}, nanos: {:?}", seconds, nanos); + + let request = SendOptimisticBlockRequest { + block_hash, + seq_action: seq_actions, + time: Some(pbjson_types::Timestamp { + seconds, + nanos, + }), + }; + + let mut client = self.inner.clone(); + let response = client.send_optimistic_block(request).await?; + + Ok(response.into_inner()) + } + + #[instrument(skip_all, fields(uri = % self.uri), err)] + pub(super) async fn send_finalized_block_hash( + &self, + finalized_block_hash: Bytes, + ) -> anyhow::Result { + if !self.enabled { + info!("BHARATH: finalized block hash sending is disabled"); + return Ok(SendFinalizedHashResponse::default()); + } + info!( + "BHARATH: sending finalized block hash to {:?}", + self.uri.to_string() + ); + let request = SendFinalizedHashRequest { + block_hash: finalized_block_hash, + }; + + let mut client = self.inner.clone(); + let response = client.send_finalized_hash(request).await?; + + Ok(response.into_inner()) + } +} diff --git a/crates/astria-sequencer/src/config.rs b/crates/astria-sequencer/src/config.rs index 234a67ca5..bafa1181b 100644 --- a/crates/astria-sequencer/src/config.rs +++ b/crates/astria-sequencer/src/config.rs @@ -31,6 +31,8 @@ pub struct Config { pub metrics_http_listener_addr: String, /// Writes a human readable format to stdout instead of JSON formatted OTEL trace data. pub pretty_print: bool, + pub composer_hook: String, + pub composer_hook_enabled: bool, } impl config::Config for Config { diff --git a/crates/astria-sequencer/src/lib.rs b/crates/astria-sequencer/src/lib.rs index d35e9e078..aa6197230 100644 --- a/crates/astria-sequencer/src/lib.rs +++ b/crates/astria-sequencer/src/lib.rs @@ -8,6 +8,7 @@ pub(crate) mod authority; pub(crate) mod benchmark_utils; pub(crate) mod bridge; mod build_info; +pub(crate) mod client; pub(crate) mod component; pub mod config; pub(crate) mod fee_asset_change; diff --git a/crates/astria-sequencer/src/proposal/commitment.rs b/crates/astria-sequencer/src/proposal/commitment.rs index 1578b4720..088ff2755 100644 --- a/crates/astria-sequencer/src/proposal/commitment.rs +++ b/crates/astria-sequencer/src/proposal/commitment.rs @@ -10,6 +10,7 @@ use astria_core::{ Deposit, RollupData, }, + Protobuf, }; use bytes::Bytes; @@ -69,7 +70,7 @@ pub(crate) fn generate_rollup_datas_commitment( .or_default() .extend(deposit.into_iter().map(|deposit| { RollupData::Deposit(Box::new(deposit)) - .into_raw() + .to_raw() .encode_to_vec() .into() })); diff --git a/crates/astria-sequencer/src/sequencer.rs b/crates/astria-sequencer/src/sequencer.rs index ac5661ed7..b1e3cce39 100644 --- a/crates/astria-sequencer/src/sequencer.rs +++ b/crates/astria-sequencer/src/sequencer.rs @@ -80,9 +80,15 @@ impl Sequencer { let snapshot = storage.latest_snapshot(); let mempool = Mempool::new(); - let app = App::new(snapshot, mempool.clone(), metrics) - .await - .context("failed to initialize app")?; + let app = App::new( + snapshot, + mempool.clone(), + config.composer_hook, + config.composer_hook_enabled, + metrics, + ) + .await + .context("failed to initialize app")?; let consensus_service = tower::ServiceBuilder::new() .layer(request_span::layer(|req: &ConsensusRequest| { diff --git a/crates/astria-sequencer/src/service/consensus.rs b/crates/astria-sequencer/src/service/consensus.rs index 4a4b290cd..8f93e82ee 100644 --- a/crates/astria-sequencer/src/service/consensus.rs +++ b/crates/astria-sequencer/src/service/consensus.rs @@ -465,7 +465,16 @@ mod test { let snapshot = storage.latest_snapshot(); let mempool = Mempool::new(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); - let mut app = App::new(snapshot, mempool.clone(), metrics).await.unwrap(); + // TODO - temp addr + let mut app = App::new( + snapshot, + mempool.clone(), + "127.0.0.1:34".to_string(), + false, + metrics, + ) + .await + .unwrap(); app.init_chain(storage.clone(), genesis_state, vec![], "test".to_string()) .await .unwrap(); diff --git a/dev/values/rollup/dev.yaml b/dev/values/rollup/dev.yaml index e53a8c73e..40b00b2f4 100644 --- a/dev/values/rollup/dev.yaml +++ b/dev/values/rollup/dev.yaml @@ -8,6 +8,16 @@ global: sequencerChainId: sequencer-test-chain-0 evm-rollup: + images: + geth: + repo: + + tag: 0.14.0 + devTag: tb + conductor: + repo: astria-conductor + tag: "0.20.0" + devTag: tb genesis: ## These values are used to configure the genesis block of the rollup chain ## no defaults as they are unique to each chain @@ -56,7 +66,7 @@ evm-rollup: eip1559Params: {} # 1: # minBaseFee: 0 - # elasticityMultiplier: 2 + # elasticityMultiplier: 2 # baseFeeChangeDenominator: 8 ## Standard Eth Genesis config values @@ -79,7 +89,15 @@ evm-rollup: value: balance: "0" code: "0x60806040526004361061004a5760003560e01c80637eb6dec71461004f578063a996e0201461009d578063b6476c7e146100b2578063bab916d0146100d4578063db97dc98146100e7575b600080fd5b34801561005b57600080fd5b506100837f000000000000000000000000000000000000000000000000000000000000000981565b60405163ffffffff90911681526020015b60405180910390f35b6100b06100ab366004610315565b6100fc565b005b3480156100be57600080fd5b506100c761019e565b6040516100949190610381565b6100b06100e23660046103cf565b61022c565b3480156100f357600080fd5b506100c76102bf565b3460006101297f000000000000000000000000000000000000000000000000000000003b9aca0083610411565b1161014f5760405162461bcd60e51b815260040161014690610433565b60405180910390fd5b34336001600160a01b03167f0c64e29a5254a71c7f4e52b3d2d236348c80e00a00ba2e1961962bd2827c03fb8787878760405161018f94939291906104ea565b60405180910390a35050505050565b600180546101ab9061051c565b80601f01602080910402602001604051908101604052809291908181526020018280546101d79061051c565b80156102245780601f106101f957610100808354040283529160200191610224565b820191906000526020600020905b81548152906001019060200180831161020757829003601f168201915b505050505081565b3460006102597f000000000000000000000000000000000000000000000000000000003b9aca0083610411565b116102765760405162461bcd60e51b815260040161014690610433565b34336001600160a01b03167f0f4961cab7530804898499aa89f5ec81d1a73102e2e4a1f30f88e5ae3513ba2a85856040516102b2929190610556565b60405180910390a3505050565b600080546101ab9061051c565b60008083601f8401126102de57600080fd5b50813567ffffffffffffffff8111156102f657600080fd5b60208301915083602082850101111561030e57600080fd5b9250929050565b6000806000806040858703121561032b57600080fd5b843567ffffffffffffffff8082111561034357600080fd5b61034f888389016102cc565b9096509450602087013591508082111561036857600080fd5b50610375878288016102cc565b95989497509550505050565b600060208083528351808285015260005b818110156103ae57858101830151858201604001528201610392565b506000604082860101526040601f19601f8301168501019250505092915050565b600080602083850312156103e257600080fd5b823567ffffffffffffffff8111156103f957600080fd5b610405858286016102cc565b90969095509350505050565b60008261042e57634e487b7160e01b600052601260045260246000fd5b500490565b60208082526062908201527f417374726961576974686472617765723a20696e73756666696369656e74207660408201527f616c75652c206d7573742062652067726561746572207468616e203130202a2a60608201527f20283138202d20424153455f434841494e5f41535345545f505245434953494f6080820152614e2960f01b60a082015260c00190565b81835281816020850137506000828201602090810191909152601f909101601f19169091010190565b6040815260006104fe6040830186886104c1565b82810360208401526105118185876104c1565b979650505050505050565b600181811c9082168061053057607f821691505b60208210810361055057634e487b7160e01b600052602260045260246000fd5b50919050565b60208152600061056a6020830184866104c1565b94935050505056fea264697066735822122047a7ef84c0be4640572989abfc01decbc1ae143d4659f1b32047978c67ebc9c864736f6c63430008150033" - + - address: "0x85F9c4C67dD0283c9DC176fd6Ff1Faa85a2F818b" + value: + balance: "1000000000000000000000" + - address: "0xba77d35ba45e26858fE871622707e98dd66EB76b" + value: + balance: "1000000000000000000000" + - address: "0xcC322501052B0d81263027e6c638687c25CE12D6" + value: + balance: "1000000000000000000000" config: # The level at which core astria components will log out @@ -91,10 +109,10 @@ evm-rollup: # - "SoftOnly" -> blocks are only pulled from the sequencer # - "FirmOnly" -> blocks are only pulled from DA # - "SoftAndFirm" -> blocks are pulled from both the sequencer and DA - executionCommitLevel: 'SoftAndFirm' + executionCommitLevel: 'SoftOnly' # The expected fastest block time possible from sequencer, determines polling # rate. - sequencerBlockTimeMs: 2000 + sequencerBlockTimeMs: 100 # The maximum number of requests to make to the sequencer per second sequencerRequestsPerSecond: 500 @@ -102,21 +120,21 @@ evm-rollup: rpc: "http://celestia-service.astria-dev-cluster.svc.cluster.local:26658" token: "" - resources: - conductor: - requests: - cpu: 0.01 - memory: 1Mi - limits: - cpu: 0.1 - memory: 20Mi - geth: - requests: - cpu: 0.25 - memory: 256Mi - limits: - cpu: 2 - memory: 1Gi + resources: {} +# conductor: +# requests: +# cpu: 0.01 +# memory: 1Mi +# limits: +# cpu: 0.1 +# memory: 20Mi +# geth: +# requests: +# cpu: 0.25 +# memory: 256Mi +# limits: +# cpu: 2 +# memory: 1Gi storage: enabled: false @@ -128,7 +146,7 @@ evm-rollup: enabled: true ws: enabled: true - + celestia-node: enabled: false @@ -244,4 +262,4 @@ blockscout-stack: value: "none" ingress: enabled: true - hostname: explorer.astria.localdev.me + hostname: explorer.astria.localdev.me \ No newline at end of file diff --git a/dev/values/validators/all.yml b/dev/values/validators/all.yml index 0b482a4e6..6ad4e24fb 100644 --- a/dev/values/validators/all.yml +++ b/dev/values/validators/all.yml @@ -1,4 +1,4 @@ -global: +iglobal: dev: true genesis: diff --git a/dev/values/validators/node0.yml b/dev/values/validators/node0.yml index 9dd3f4ddf..2804706d9 100644 --- a/dev/values/validators/node0.yml +++ b/dev/values/validators/node0.yml @@ -2,13 +2,23 @@ global: namespaceOverride: astria-dev-cluster +images: + cometBFT: + repo: docker.io/cometbft/cometbft + tag: v0.38.8 + devTag: v0.38.8 + sequencer: + repo: astria-sequencer + tag: "0.16.0" + devTag: tb + moniker: node0 genesis: validators: - - name: node0 - power: '1' - address: 091E47761C58C474534F4D414AF104A6CAF90C22 - pubKey: lV57+rGs2vac7mvkGHP1oBFGHPJM3a+WoAzeFDCJDNU= +# - name: node0 +# power: '1' +# address: 091E47761C58C474534F4D414AF104A6CAF90C22 +# pubKey: lV57+rGs2vac7mvkGHP1oBFGHPJM3a+WoAzeFDCJDNU= - name: node1 address: E82D827830B163D5179291FB27BB58E605DF2FA2 pubKey: NDE9F44v3l4irmkZxNmrZkywoGmggLlaBo5rE/Cis8M= @@ -39,6 +49,10 @@ cometbft: - 96c652f63b5d5d5027b42e9af906082ee7c598d9@node1-sequencer-p2p-service.astria-validator-node1.svc.cluster.local:26656 - 4a4345939744d64ca370dff266e2913dd41b4e88@node2-sequencer-p2p-service.astria-validator-node2.svc.cluster.local:26656 +sequencer: + composerHook: + enabled: true + ingress: rpc: enabled: true diff --git a/dev/values/validators/node1.yml b/dev/values/validators/node1.yml index 5e6fe80ea..7f5d804b1 100644 --- a/dev/values/validators/node1.yml +++ b/dev/values/validators/node1.yml @@ -1,11 +1,23 @@ # Override value example for second validator from main chart + + +images: + cometBFT: + repo: docker.io/cometbft/cometbft + tag: v0.38.8 + devTag: v0.38.8 + sequencer: + repo: astria-sequencer + tag: "0.16.0" + devTag: tb + moniker: 'node1' genesis: validators: - - name: node0 - power: '1' - address: 091E47761C58C474534F4D414AF104A6CAF90C22 - pubKey: lV57+rGs2vac7mvkGHP1oBFGHPJM3a+WoAzeFDCJDNU= +# - name: node0 +# power: '1' +# address: 091E47761C58C474534F4D414AF104A6CAF90C22 +# pubKey: lV57+rGs2vac7mvkGHP1oBFGHPJM3a+WoAzeFDCJDNU= - name: node1 address: E82D827830B163D5179291FB27BB58E605DF2FA2 pubKey: NDE9F44v3l4irmkZxNmrZkywoGmggLlaBo5rE/Cis8M= @@ -42,3 +54,7 @@ ingress: enabled: false grpc: enabled: false + +sequencer: + composerHook: + enabled: false \ No newline at end of file diff --git a/dev/values/validators/node2.yml b/dev/values/validators/node2.yml index f883b0714..fb8fb6b67 100644 --- a/dev/values/validators/node2.yml +++ b/dev/values/validators/node2.yml @@ -1,11 +1,23 @@ # Override value example for second validator from main chart + + +images: + cometBFT: + repo: docker.io/cometbft/cometbft + tag: v0.38.8 + devTag: v0.38.8 + sequencer: + repo: astria-sequencer + tag: "0.16.0" + devTag: tb + moniker: 'node2' genesis: validators: - - name: node0 - power: '1' - address: 091E47761C58C474534F4D414AF104A6CAF90C22 - pubKey: lV57+rGs2vac7mvkGHP1oBFGHPJM3a+WoAzeFDCJDNU= +# - name: node0 +# power: '1' +# address: 091E47761C58C474534F4D414AF104A6CAF90C22 +# pubKey: lV57+rGs2vac7mvkGHP1oBFGHPJM3a+WoAzeFDCJDNU= - name: node1 address: E82D827830B163D5179291FB27BB58E605DF2FA2 pubKey: NDE9F44v3l4irmkZxNmrZkywoGmggLlaBo5rE/Cis8M= @@ -41,3 +53,7 @@ ingress: enabled: false grpc: enabled: false + +sequencer: + composerHook: + enabled: false diff --git a/proto/composerapis/astria/composer/v1alpha1/grpc_collector.proto b/proto/composerapis/astria/composer/v1alpha1/grpc_collector.proto index fa20d378b..3b328fa4a 100644 --- a/proto/composerapis/astria/composer/v1alpha1/grpc_collector.proto +++ b/proto/composerapis/astria/composer/v1alpha1/grpc_collector.proto @@ -2,6 +2,9 @@ syntax = 'proto3'; package astria.composer.v1alpha1; +import "astria/protocol/transactions/v1alpha1/types.proto"; +import "google/protobuf/timestamp.proto"; + // SubmitRollupTransactionRequest contains a rollup transaction to be submitted to the Shared Sequencer Network // via the Composer message SubmitRollupTransactionRequest { @@ -15,9 +18,28 @@ message SubmitRollupTransactionRequest { // It's currently an empty response which can be evolved in the future to include more information message SubmitRollupTransactionResponse {} +message SendOptimisticBlockRequest { + bytes block_hash = 1; + repeated astria.protocol.transactions.v1alpha1.SequenceAction seq_action = 2; + google.protobuf.Timestamp time = 3; +} + +message SendOptimisticBlockResponse {} + +message SendFinalizedHashRequest { + bytes block_hash = 1; +} + +message SendFinalizedHashResponse {} + // GrpcCollectorService is a service that defines the gRPC collector of the Composer service GrpcCollectorService { // SubmitRollupTransaction submits a rollup transactions to the Composer. // The transaction sent is bundled up with other transactions and submitted to the Shared Sequencer Network. rpc SubmitRollupTransaction(SubmitRollupTransactionRequest) returns (SubmitRollupTransactionResponse) {} } + +service SequencerHooksService { + rpc SendOptimisticBlock(SendOptimisticBlockRequest) returns (SendOptimisticBlockResponse) {} + rpc SendFinalizedHash(SendFinalizedHashRequest) returns (SendFinalizedHashResponse) {} +} diff --git a/proto/composerapis/astria/composer/v1alpha1/trusted_builder.proto b/proto/composerapis/astria/composer/v1alpha1/trusted_builder.proto new file mode 100644 index 000000000..2b5a8d549 --- /dev/null +++ b/proto/composerapis/astria/composer/v1alpha1/trusted_builder.proto @@ -0,0 +1,23 @@ +syntax = 'proto3'; + +package astria.composer.v1alpha1; + +import "astria/sequencerblock/v1alpha1/block.proto"; + +// BuilderBundle contains a bundle of RollupData transactions which are created by a trusted builder +// It contains the transactions and the parent hash on top of which the bundles were simulated. +message BuilderBundle { + // transactions in the bundle + repeated astria.sequencerblock.v1alpha1.RollupData transactions = 1; + // parent hash of the bundle + bytes parent_hash = 2; +} + +// BuilderBundlePacket is a message that represents a bundle of RollupData transactions and the signature +// of the BuilderBundle by the trusted builder. +message BuilderBundlePacket { + // the bundle of transactions + BuilderBundle bundle = 1; + // the signature of the bundle signed by the trusted builder + bytes signature = 3; +} diff --git a/proto/executionapis/astria/execution/v1alpha2/execution.proto b/proto/executionapis/astria/execution/v1alpha2/execution.proto index 687b32d01..d11bce2f8 100644 --- a/proto/executionapis/astria/execution/v1alpha2/execution.proto +++ b/proto/executionapis/astria/execution/v1alpha2/execution.proto @@ -70,6 +70,17 @@ message ExecuteBlockRequest { repeated astria.sequencerblock.v1alpha1.RollupData transactions = 2; // Timestamp to be used for new block. google.protobuf.Timestamp timestamp = 3; + // If true, the block will be created but not persisted. + bool simulate_only = 4; +} + +// ExecuteBlockResponse contains the new block and the transactions that were +// included in the block. +message ExecuteBlockResponse { + // The new block that was created. + Block block = 1; + // The transactions that were included in the block. + repeated astria.sequencerblock.v1alpha1.RollupData included_transactions = 2; } // The CommitmentState holds the block at each stage of sequencer commitment @@ -115,7 +126,7 @@ service ExecutionService { // ExecuteBlock is called to deterministically derive a rollup block from // filtered sequencer block information. - rpc ExecuteBlock(ExecuteBlockRequest) returns (Block); + rpc ExecuteBlock(ExecuteBlockRequest) returns (ExecuteBlockResponse); // GetCommitmentState fetches the current CommitmentState of the chain. rpc GetCommitmentState(GetCommitmentStateRequest) returns (CommitmentState);