diff --git a/crates/composer/src/build_context.rs b/crates/composer/src/build_context.rs
new file mode 100644
index 0000000..71b3f17
--- /dev/null
+++ b/crates/composer/src/build_context.rs
@@ -0,0 +1,99 @@
+use std::{collections::HashMap, fs::File, io::Write, path::Path};
+
+use eyre::Result;
+use flate2::{write::GzEncoder, Compression};
+
+/// A Docker build context containing all necessary info to build a Docker image
+/// from scratch. Files and directories are copied into the build context
+/// archive from the local filesystem.
+#[derive(Debug)]
+pub struct BuildContext
{
+ /// The Dockerfile contents
+ pub(crate) dockerfile: String,
+ /// Files to be included in the build context
+ pub(crate) files: Vec>,
+ /// Directories to be included in the build context
+ /// (recursively copied with all their contents)
+ pub(crate) dirs: Vec>,
+ /// Build args to be passed to the Docker build command
+ pub(crate) buildargs: HashMap,
+}
+
+/// A single file or directory to be included in the build context.
+#[derive(Debug)]
+pub(crate) struct BuildContextObject {
+ /// The source path on the local filesystem
+ pub(crate) src: P,
+ /// The destination path in the docker image context
+ pub(crate) dest: P,
+}
+
+impl> BuildContext {
+ /// Create a new build context from a Dockerfile string.
+ pub fn from_dockerfile(dockerfile: &str) -> Self {
+ Self {
+ dockerfile: dockerfile.to_string(),
+ files: Vec::new(),
+ dirs: Vec::new(),
+ buildargs: HashMap::new(),
+ }
+ }
+
+ /// Add a file to the build context.
+ pub fn add_file(mut self, src: P, dest: impl Into
) -> Self {
+ let dest = dest.into();
+ self.files.push(BuildContextObject { src, dest });
+ self
+ }
+
+ /// Add a directory to the build context (recursively with all its contents).
+ pub fn add_dir(mut self, src: P, dest: impl Into
) -> Self {
+ let dest = dest.into();
+ self.dirs.push(BuildContextObject { src, dest });
+ self
+ }
+
+ /// Add a build arg to the build context.
+ pub fn add_build_arg(mut self, key: S, value: S) -> Self
+ where
+ S: Into,
+ {
+ self.buildargs.insert(key.into(), value.into());
+ self
+ }
+
+ /// Create a tarball and gzip the tarball. Returns the compressed output bytes.
+ /// Consumes the build context.
+ ///
+ /// # Errors
+ ///
+ /// Returns an error if the tarball cannot be created or compressed.
+ pub fn create_archive(self) -> Result> {
+ // First create a Dockerfile tarball
+ let mut header = tar::Header::new_gnu();
+ header.set_path("Dockerfile")?;
+ header.set_size(self.dockerfile.len() as u64);
+ header.set_mode(0o755);
+ header.set_cksum();
+ let mut tar = tar::Builder::new(Vec::new());
+ tar.append(&header, self.dockerfile.as_bytes())?;
+
+ // Append any additional files
+ for file in self.files {
+ let mut f = File::open(file.src)?;
+ tar.append_file(file.dest, &mut f)?;
+ }
+
+ // Append any additional directories
+ for dir in self.dirs {
+ tar.append_dir_all(dir.dest, dir.src)?;
+ }
+
+ let uncompressed = tar.into_inner()?;
+
+ // Finally, gzip the tarball
+ let mut c = GzEncoder::new(Vec::new(), Compression::default());
+ c.write_all(&uncompressed)?;
+ c.finish().map_err(Into::into)
+ }
+}
diff --git a/crates/composer/src/lib.rs b/crates/composer/src/lib.rs
index d8c3766..a8129d9 100644
--- a/crates/composer/src/lib.rs
+++ b/crates/composer/src/lib.rs
@@ -8,7 +8,7 @@
#![deny(unused_must_use, rust_2018_idioms)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
-use std::{collections::HashMap, fmt::Debug};
+use std::{collections::HashMap, fmt::Debug, path::Path};
use bollard::{
container::{
@@ -28,10 +28,10 @@ pub use bollard::container::Config;
pub use bollard::image::CreateImageOptions;
pub use bollard::service::HostConfig;
pub use bollard::volume::CreateVolumeOptions;
-pub use utils::bind_host_port;
+pub use build_context::BuildContext;
-/// Utilities for Docker operations
-mod utils;
+/// Utilities for building Docker images
+mod build_context;
/// The Composer is responsible for managing the OP-UP docker containers.
#[derive(Debug)]
@@ -108,24 +108,27 @@ impl Composer {
/// Build a Docker image from the specified Dockerfile and build context files.
pub async fn build_image(
&self,
- name: &str,
- dockerfile: &str,
- build_context_files: &[(&str, &[u8])],
+ name: impl Into,
+ build_context: BuildContext>,
) -> Result<()> {
let build_options = BuildImageOptions {
- t: name,
- dockerfile: "Dockerfile",
+ t: name.into(),
+ dockerfile: "Dockerfile".to_string(),
+ buildargs: build_context.buildargs.clone(),
pull: true,
..Default::default()
};
- let files = utils::create_dockerfile_build_context(dockerfile, build_context_files)?;
+ let build_context = build_context.create_archive()?;
let mut image_build_stream =
self.daemon
- .build_image(build_options, None, Some(files.into()));
+ .build_image(build_options, None, Some(build_context.into()));
while let Some(build_info) = image_build_stream.next().await {
- let res = build_info?;
+ let res = match build_info {
+ Ok(build_info) => build_info,
+ Err(e) => eyre::bail!("Error building docker image: {:?}", e),
+ };
tracing::debug!(target: "composer", "Build info: {:?}", res);
}
@@ -181,7 +184,7 @@ impl Composer {
if overwrite {
self.daemon
- .remove_container(&id, None::)
+ .remove_container(name, None::)
.await?;
tracing::debug!(target: "composer", "Removed existing docker container {}", name);
} else {
@@ -307,3 +310,11 @@ impl Composer {
}
}
}
+
+/// Given a host port, bind it to the container.
+pub fn bind_host_port(host_port: u16) -> Option> {
+ Some(vec![bollard::service::PortBinding {
+ host_ip: None,
+ host_port: Some(host_port.to_string()),
+ }])
+}
diff --git a/crates/composer/src/utils.rs b/crates/composer/src/utils.rs
deleted file mode 100644
index 0ec6f04..0000000
--- a/crates/composer/src/utils.rs
+++ /dev/null
@@ -1,45 +0,0 @@
-use std::io::Write;
-
-use bollard::service::PortBinding;
-use eyre::Result;
-use flate2::{write::GzEncoder, Compression};
-
-/// Given a dockerfile string and any number of files as `(filename, file contents)`,
-/// create a tarball and gzip the tarball. Returns the compressed output bytes.
-pub(crate) fn create_dockerfile_build_context(
- dockerfile: &str,
- files: &[(&str, &[u8])],
-) -> Result> {
- // First create a Dockerfile tarball
- let mut header = tar::Header::new_gnu();
- header.set_path("Dockerfile")?;
- header.set_size(dockerfile.len() as u64);
- header.set_mode(0o755);
- header.set_cksum();
- let mut tar = tar::Builder::new(Vec::new());
- tar.append(&header, dockerfile.as_bytes())?;
-
- // Then append any additional files
- for (filename, contents) in files {
- let mut header = tar::Header::new_gnu();
- header.set_path(filename)?;
- header.set_size(contents.len() as u64);
- header.set_mode(0o755);
- header.set_cksum();
- tar.append(&header, *contents)?;
- }
-
- // Finally, gzip the tarball
- let uncompressed = tar.into_inner()?;
- let mut c = GzEncoder::new(Vec::new(), Compression::default());
- c.write_all(&uncompressed)?;
- c.finish().map_err(Into::into)
-}
-
-/// Given a host port, bind it to the container.
-pub fn bind_host_port(host_port: u16) -> Option> {
- Some(vec![PortBinding {
- host_ip: Some("127.0.0.1".to_string()),
- host_port: Some(host_port.to_string()),
- }])
-}
diff --git a/crates/primitives/src/artifacts.rs b/crates/primitives/src/artifacts.rs
index 576f7b1..da013b0 100644
--- a/crates/primitives/src/artifacts.rs
+++ b/crates/primitives/src/artifacts.rs
@@ -44,11 +44,25 @@ impl Artifacts {
self.path().join("genesis-l1.json")
}
+ /// Returns the l2 genesis fle path.
+ pub fn l2_genesis(&self) -> PathBuf {
+ self.path().join("genesis-l2.json")
+ }
+
+ /// Returns the genesis rollup file path.
+ pub fn rollup_genesis(&self) -> PathBuf {
+ self.path().join("genesis-rollup.json")
+ }
+
/// Returns the jwt secret file path.
pub fn jwt_secret(&self) -> PathBuf {
self.path().join("jwt-secret.txt")
}
+ pub fn p2p_node_key(&self) -> PathBuf {
+ self.path().join("p2p-node-key.txt")
+ }
+
/// Create the artifacts directory if it does not exist.
pub fn create(&self) -> Result<()> {
if !self.pwd.exists() {
diff --git a/crates/primitives/src/monorepo.rs b/crates/primitives/src/monorepo.rs
index 412d926..4bc7c08 100644
--- a/crates/primitives/src/monorepo.rs
+++ b/crates/primitives/src/monorepo.rs
@@ -92,11 +92,6 @@ impl Monorepo {
self.devnet().join("genesis-l1.json")
}
- /// Returns the L2 genesis file.
- pub fn l2_genesis(&self) -> PathBuf {
- self.devnet().join("genesis-l2.json")
- }
-
/// Contracts directory.
pub fn contracts(&self) -> PathBuf {
self.path().join("packages/contracts-bedrock")
@@ -131,11 +126,6 @@ impl Monorepo {
pub fn op_node_dir(&self) -> PathBuf {
self.path().join("op-node")
}
-
- /// Returns the genesis rollup file.
- pub fn genesis_rollup(&self) -> PathBuf {
- self.devnet().join("rollup.json")
- }
}
impl Monorepo {
diff --git a/crates/stages/src/stages.rs b/crates/stages/src/stages.rs
index 7ec1e9e..f7115ca 100644
--- a/crates/stages/src/stages.rs
+++ b/crates/stages/src/stages.rs
@@ -65,7 +65,10 @@ impl Stages<'_> {
Arc::clone(&artifacts),
Arc::clone(&monorepo),
)),
- Box::new(prestate::Prestate::new(Arc::clone(&monorepo))),
+ Box::new(prestate::Prestate::new(
+ Arc::clone(&monorepo),
+ Arc::clone(&artifacts),
+ )),
Box::new(allocs::Allocs::new(
Arc::clone(&artifacts),
Arc::clone(&monorepo),
@@ -82,21 +85,27 @@ impl Stages<'_> {
Box::new(l1_exec::Executor::new(
self.config.l1_client_port,
self.config.l1_client,
- composer,
+ Arc::clone(&composer),
Arc::clone(&artifacts),
)),
Box::new(l2_genesis::L2Genesis::new(
self.config.l1_client_url.clone(),
Arc::clone(&monorepo),
+ Arc::clone(&artifacts),
)),
Box::new(contracts::Contracts::new()),
Box::new(l2_exec::Executor::new(
self.config.l2_client_port,
self.config.l2_client,
+ Arc::clone(&composer),
+ Arc::clone(&artifacts),
)),
Box::new(rollup::Rollup::new(
self.config.rollup_client_port,
self.config.rollup_client,
+ Arc::clone(&composer),
+ Arc::clone(&monorepo),
+ Arc::clone(&artifacts),
)),
Box::new(proposer::Proposer::new(Arc::clone(&artifacts))),
Box::new(batcher::Batcher::new(
diff --git a/crates/stages/src/stages/allocs.rs b/crates/stages/src/stages/allocs.rs
index b2f63bc..10892a4 100644
--- a/crates/stages/src/stages/allocs.rs
+++ b/crates/stages/src/stages/allocs.rs
@@ -17,7 +17,7 @@ impl crate::Stage for Allocs {
async fn execute(&self) -> Result<()> {
tracing::info!(target: "stages", "Executing allocs stage");
- let l2_genesis_file = self.monorepo.l2_genesis();
+ let l2_genesis_file = self.artifacts.l2_genesis();
if l2_genesis_file.exists() {
tracing::info!(target: "stages", "l2 genesis file already found");
return Ok(());
diff --git a/crates/stages/src/stages/batcher.rs b/crates/stages/src/stages/batcher.rs
index a25c133..7f16f3b 100644
--- a/crates/stages/src/stages/batcher.rs
+++ b/crates/stages/src/stages/batcher.rs
@@ -25,7 +25,7 @@ impl crate::Stage for Batcher {
let addresses_json = self.artifacts.l1_deployments();
let addresses = crate::json::read_json(&addresses_json)?;
- let genesis_rollup_file = self.monorepo.genesis_rollup();
+ let genesis_rollup_file = self.artifacts.rollup_genesis();
let rollup_config = crate::json::read_json(&genesis_rollup_file)?;
let start_batcher = Command::new("docker-compose")
.args(["up", "-d", "--no-deps", "--build", "batcher"])
diff --git a/crates/stages/src/stages/l1_exec.rs b/crates/stages/src/stages/l1_exec.rs
index 298d757..22174d2 100644
--- a/crates/stages/src/stages/l1_exec.rs
+++ b/crates/stages/src/stages/l1_exec.rs
@@ -5,7 +5,9 @@ use std::sync::Arc;
use async_trait::async_trait;
-use op_composer::{bind_host_port, Composer, Config, CreateVolumeOptions, HostConfig};
+use op_composer::{
+ bind_host_port, BuildContext, Composer, Config, CreateVolumeOptions, HostConfig,
+};
use op_primitives::Artifacts;
/// L1 Execution Client Stage
@@ -17,6 +19,8 @@ pub struct Executor {
artifacts: Arc,
}
+const CONTAINER_NAME: &str = "opup-l1";
+
#[async_trait]
impl crate::Stage for Executor {
/// Executes the L1 Executor Stage.
@@ -24,11 +28,9 @@ impl crate::Stage for Executor {
tracing::info!(target: "stages", "Executing l1 execution client stage");
match self.l1_client {
- L1Client::Geth => self.start_geth().await?,
+ L1Client::Geth => self.start_geth().await,
_ => unimplemented!("l1 client not implemented: {}", self.l1_client),
}
-
- Ok(())
}
}
@@ -48,9 +50,9 @@ impl Executor {
}
}
- /// Starts Geth in a docker container.
+ /// Starts Geth in a Docker container.
pub async fn start_geth(&self) -> Result<()> {
- let image_name = "opup-geth".to_string();
+ let image_name = "opup-l1-geth".to_string();
let working_dir = project_root::get_project_root()?.join("docker");
let l1_genesis = self.artifacts.l1_genesis();
let l1_genesis = l1_genesis.to_string_lossy();
@@ -65,11 +67,9 @@ impl Executor {
ENTRYPOINT ["/bin/sh", "/geth-entrypoint.sh"]
"#;
- let geth_entrypoint = std::fs::read(working_dir.join("geth-entrypoint.sh"))?;
- let build_context_files = [("geth-entrypoint.sh", geth_entrypoint.as_slice())];
- self.l1_exec
- .build_image(&image_name, dockerfile, &build_context_files)
- .await?;
+ let context = BuildContext::from_dockerfile(dockerfile)
+ .add_file(working_dir.join("geth-entrypoint.sh"), "geth-entrypoint.sh");
+ self.l1_exec.build_image(&image_name, context).await?;
let l1_data_volume = CreateVolumeOptions {
name: "l1_data",
@@ -91,7 +91,7 @@ impl Executor {
port_bindings: Some(hashmap! {
"8545".to_string() => bind_host_port(8545),
"8546".to_string() => bind_host_port(8546),
- "6060".to_string() => bind_host_port(7060), // TODO: double check this port
+ "6060".to_string() => bind_host_port(7060),
}),
binds: Some(vec![
"l1_data:/db".to_string(),
@@ -105,7 +105,7 @@ impl Executor {
let container_id = self
.l1_exec
- .create_container(&self.l1_client.to_string(), config, true)
+ .create_container(CONTAINER_NAME, config, true)
.await?
.id;
@@ -120,10 +120,6 @@ impl Executor {
crate::net::wait_up(l1_port, 10, 3)?;
tracing::info!(target: "stages", "l1 container started on port: {}", l1_port);
- // todo: do we need to do block here
- // can we wait for the l1 client to be ready by polling?
- std::thread::sleep(std::time::Duration::from_secs(10));
-
Ok(())
}
}
diff --git a/crates/stages/src/stages/l2_exec.rs b/crates/stages/src/stages/l2_exec.rs
index aa7c8e0..4dc230b 100644
--- a/crates/stages/src/stages/l2_exec.rs
+++ b/crates/stages/src/stages/l2_exec.rs
@@ -1,48 +1,115 @@
use async_trait::async_trait;
use eyre::Result;
-use op_primitives::L2Client;
-use std::process::Command;
+use maplit::hashmap;
+use op_composer::{
+ bind_host_port, BuildContext, Composer, Config, CreateVolumeOptions, HostConfig,
+};
+use op_primitives::{Artifacts, L2Client};
+use std::sync::Arc;
/// Layer 2 Execution Client Stage
-#[derive(Debug, Default, Clone, PartialEq)]
+#[derive(Debug)]
pub struct Executor {
l2_port: Option,
l2_client: L2Client,
+ l2_exec: Arc,
+ artifacts: Arc,
}
+const CONTAINER_NAME: &str = "opup-l2";
+
#[async_trait]
impl crate::Stage for Executor {
/// Executes the L2 Executor Stage.
async fn execute(&self) -> Result<()> {
tracing::info!(target: "stages", "Executing l2 execution client stage");
- // todo: this should be replaced with running the docker container inline through
- // the op-composer crate anyways so we won't need the docker directory at all.
- let proj_root = project_root::get_project_root()?;
- let docker_dir = proj_root.as_path().join("docker");
-
- let start_l2 = Command::new("docker-compose")
- .args(["up", "-d", "--no-deps", "--build", "l2"])
- .env("PWD", &docker_dir)
- .env("L2_CLIENT_CHOICE", &self.l2_client.to_string())
- .current_dir(docker_dir)
- .output()?;
-
- if !start_l2.status.success() {
- eyre::bail!(
- "failed to start l2 execution client: {}",
- String::from_utf8_lossy(&start_l2.stderr)
- );
+ match self.l2_client {
+ L2Client::OpGeth => self.start_op_geth().await,
+ _ => unimplemented!("l2 execution client not implemented: {}", self.l2_client),
}
-
- let l2_port = self.l2_port.unwrap_or(op_config::L2_PORT);
- crate::net::wait_up(l2_port, 10, 1)
}
}
impl Executor {
/// Creates a new stage.
- pub fn new(l2_port: Option, l2_client: L2Client) -> Self {
- Self { l2_port, l2_client }
+ pub fn new(
+ l2_port: Option,
+ l2_client: L2Client,
+ l2_exec: Arc,
+ artifacts: Arc,
+ ) -> Self {
+ Self {
+ l2_port,
+ l2_client,
+ l2_exec,
+ artifacts,
+ }
+ }
+
+ /// Starts Op-Geth in a Docker container.
+ pub async fn start_op_geth(&self) -> Result<()> {
+ let image_name = "opup-l2-geth".to_string();
+ let working_dir = project_root::get_project_root()?.join("docker");
+ let l2_genesis = self.artifacts.l2_genesis();
+ let l2_genesis = l2_genesis.to_string_lossy();
+ let jwt_secret = self.artifacts.jwt_secret();
+ let jwt_secret = jwt_secret.to_string_lossy();
+
+ let dockerfile = r#"
+ FROM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:optimism
+ RUN apk add --no-cache jq
+ COPY geth-entrypoint.sh /geth-entrypoint.sh
+ VOLUME ["/db"]
+ ENTRYPOINT ["/bin/sh", "/geth-entrypoint.sh"]
+ "#;
+
+ let context = BuildContext::from_dockerfile(dockerfile)
+ .add_file(working_dir.join("geth-entrypoint.sh"), "geth-entrypoint.sh");
+ self.l2_exec.build_image(&image_name, context).await?;
+
+ let l2_data_volume = CreateVolumeOptions {
+ name: "l2_data",
+ driver: "local",
+ ..Default::default()
+ };
+ self.l2_exec.create_volume(l2_data_volume).await?;
+
+ let config = Config {
+ image: Some(image_name),
+ working_dir: Some(working_dir.to_string_lossy().to_string()),
+ exposed_ports: Some(hashmap! {
+ "8545".to_string() => hashmap!{},
+ "6060".to_string() => hashmap!{},
+ }),
+ host_config: Some(HostConfig {
+ port_bindings: Some(hashmap! {
+ "8545".to_string() => bind_host_port(9545),
+ "6060".to_string() => bind_host_port(8060),
+ }),
+ binds: Some(vec![
+ "l2_data:/db".to_string(),
+ format!("{}:/genesis.json", l2_genesis),
+ format!("{}:/config/test-jwt-secret.txt", jwt_secret),
+ ]),
+ ..Default::default()
+ }),
+ ..Default::default()
+ };
+
+ let container_id = self
+ .l2_exec
+ .create_container(CONTAINER_NAME, config, true)
+ .await?
+ .id;
+ tracing::info!(target: "stages", "l2 container created: {}", container_id);
+
+ self.l2_exec.start_container(&container_id).await?;
+
+ let l2_port = self.l2_port.unwrap_or(op_config::L2_PORT);
+ crate::net::wait_up(l2_port, 10, 1)?;
+ tracing::info!(target: "stages", "l2 container started on port: {}", l2_port);
+
+ Ok(())
}
}
diff --git a/crates/stages/src/stages/l2_genesis.rs b/crates/stages/src/stages/l2_genesis.rs
index 44e9278..3b7365a 100644
--- a/crates/stages/src/stages/l2_genesis.rs
+++ b/crates/stages/src/stages/l2_genesis.rs
@@ -1,6 +1,6 @@
use async_trait::async_trait;
use eyre::Result;
-use op_primitives::{path_to_str, Monorepo};
+use op_primitives::{path_to_str, Artifacts, Monorepo};
use std::process::Command;
use std::sync::Arc;
@@ -9,6 +9,7 @@ use std::sync::Arc;
pub struct L2Genesis {
l1_url: Option,
monorepo: Arc,
+ artifacts: Arc,
}
#[async_trait]
@@ -17,24 +18,35 @@ impl crate::Stage for L2Genesis {
async fn execute(&self) -> Result<()> {
tracing::info!(target: "stages", "Executing l2 genesis stage");
+ // Artifacts paths
+ let l2_genesis_artifact = self.artifacts.l2_genesis();
+ let rollup_genesis_artifact = self.artifacts.rollup_genesis();
+ let p2p_node_key_artifact = self.artifacts.p2p_node_key();
+
+ // Monorepo paths
let deploy_config = self.monorepo.deploy_config();
let deploy_config = path_to_str!(deploy_config)?;
- let l2_genesis = self.monorepo.l2_genesis();
- let genesis_rollup = self.monorepo.genesis_rollup();
- let genesis_rollup = path_to_str!(genesis_rollup)?;
- // todo: this should not be hardcoded to devnet but
+ // TODO: this should not be hardcoded to devnet but
// the deployments dir should be chosen based on the network
// from the stack.toml config.
let devnet_deploys = self.monorepo.devnet_deploys();
let devnet_deploys = path_to_str!(devnet_deploys)?;
let op_node_dir = self.monorepo.op_node_dir();
- if l2_genesis.exists() {
- tracing::info!(target: "stages", "L2 genesis already found.");
+ if !p2p_node_key_artifact.exists() {
+ tracing::info!(target: "stages", "Creating p2p node key...");
+ // TODO: take this from the TOML stack config
+ let p2p_node_key = "dae4671006c60a3619556ace98eca6f6e092948d05b13070a27ac492a4fba419";
+ std::fs::write(&p2p_node_key_artifact, p2p_node_key)?;
+ }
+
+ if l2_genesis_artifact.exists() && rollup_genesis_artifact.exists() {
+ tracing::info!(target: "stages", "L2 and rollup genesis already found.");
return Ok(());
}
- let l2_genesis_str = path_to_str!(l2_genesis)?;
+ let l2_genesis_str = path_to_str!(l2_genesis_artifact)?;
+ let rollup_genesis_str = path_to_str!(rollup_genesis_artifact)?;
tracing::info!(target: "stages", "Creating L2 and rollup genesis...");
let l1_url = self.l1_url.clone().unwrap_or(op_config::L1_URL.to_owned());
@@ -44,7 +56,7 @@ impl crate::Stage for L2Genesis {
.args(["--deploy-config", deploy_config])
.args(["--deployment-dir", devnet_deploys])
.args(["--outfile.l2", l2_genesis_str])
- .args(["--outfile.rollup", genesis_rollup])
+ .args(["--outfile.rollup", rollup_genesis_str])
.current_dir(op_node_dir)
.output()?;
@@ -61,7 +73,11 @@ impl crate::Stage for L2Genesis {
impl L2Genesis {
/// Creates a new stage.
- pub fn new(l1_url: Option, monorepo: Arc) -> Self {
- Self { l1_url, monorepo }
+ pub fn new(l1_url: Option, monorepo: Arc, artifacts: Arc) -> Self {
+ Self {
+ l1_url,
+ monorepo,
+ artifacts,
+ }
}
}
diff --git a/crates/stages/src/stages/prestate.rs b/crates/stages/src/stages/prestate.rs
index 0549ac4..e753f54 100644
--- a/crates/stages/src/stages/prestate.rs
+++ b/crates/stages/src/stages/prestate.rs
@@ -1,6 +1,6 @@
use async_trait::async_trait;
use eyre::Result;
-use op_primitives::Monorepo;
+use op_primitives::{Artifacts, Monorepo};
use std::process::Command;
use std::sync::Arc;
@@ -8,6 +8,7 @@ use std::sync::Arc;
#[derive(Debug, Default, Clone, PartialEq)]
pub struct Prestate {
monorepo: Arc,
+ artifacts: Arc,
}
#[async_trait]
@@ -17,7 +18,7 @@ impl crate::Stage for Prestate {
tracing::info!(target: "stages", "Executing fault proof prestate stage");
let monorepo = self.monorepo.path();
- let l2_genesis_file = self.monorepo.l2_genesis();
+ let l2_genesis_file = self.artifacts.l2_genesis();
if l2_genesis_file.exists() {
tracing::info!(target: "stages", "l2 genesis file already found");
@@ -48,7 +49,10 @@ impl crate::Stage for Prestate {
impl Prestate {
/// Creates a new stage.
- pub fn new(monorepo: Arc) -> Self {
- Self { monorepo }
+ pub fn new(monorepo: Arc, artifacts: Arc) -> Self {
+ Self {
+ monorepo,
+ artifacts,
+ }
}
}
diff --git a/crates/stages/src/stages/rollup.rs b/crates/stages/src/stages/rollup.rs
index b334053..f40f218 100644
--- a/crates/stages/src/stages/rollup.rs
+++ b/crates/stages/src/stages/rollup.rs
@@ -1,52 +1,153 @@
use async_trait::async_trait;
use eyre::Result;
-use op_primitives::RollupClient;
-use std::process::Command;
+use maplit::hashmap;
+use op_composer::{
+ bind_host_port, BuildContext, Composer, Config, CreateVolumeOptions, HostConfig,
+};
+use op_primitives::{Artifacts, Monorepo, RollupClient};
+use std::sync::Arc;
/// Rollup Stage
-#[derive(Debug, Default, Clone, PartialEq)]
+#[derive(Debug)]
pub struct Rollup {
rollup_port: Option,
rollup_client: RollupClient,
+ rollup_exec: Arc,
+ monorepo: Arc,
+ artifacts: Arc,
}
+const CONTAINER_NAME: &str = "opup-rollup";
+
#[async_trait]
impl crate::Stage for Rollup {
/// Executes the [Rollup] stage.
async fn execute(&self) -> Result<()> {
tracing::info!(target: "stages", "Executing rollup stage");
- // todo: this should be replaced with running the docker container inline through
- // the op-composer crate anyways so we won't need the docker directory at all.
- let proj_root = project_root::get_project_root()?;
- let docker_dir = proj_root.as_path().join("docker");
-
- tracing::info!(target: "stages", "Starting rollup client {}", &self.rollup_client);
- let start_rollup = Command::new("docker-compose")
- .args(["up", "-d", "--no-deps", "--build", "rollup-client"])
- .env("PWD", &docker_dir)
- .env("ROLLUP_CLIENT_CHOICE", &self.rollup_client.to_string())
- .current_dir(docker_dir)
- .output()?;
-
- if !start_rollup.status.success() {
- eyre::bail!(
- "failed to start rollup client: {}",
- String::from_utf8_lossy(&start_rollup.stderr)
- );
+ match self.rollup_client {
+ RollupClient::OpNode => self.start_op_node().await,
+ _ => unimplemented!("rollup client not implemented: {}", self.rollup_client),
}
-
- let rollup_port = self.rollup_port.unwrap_or(op_config::ROLLUP_PORT);
- crate::net::wait_up(rollup_port, 30, 1)
}
}
impl Rollup {
/// Creates a new stage.
- pub fn new(rollup_port: Option, rollup_client: RollupClient) -> Self {
+ pub fn new(
+ rollup_port: Option,
+ rollup_client: RollupClient,
+ rollup_exec: Arc,
+ monorepo: Arc,
+ artifacts: Arc,
+ ) -> Self {
Self {
rollup_port,
rollup_client,
+ rollup_exec,
+ monorepo,
+ artifacts,
}
}
+
+ /// Starts Op-Node in a Docker container.
+ pub async fn start_op_node(&self) -> Result<()> {
+ let image_name = "opup-op-node".to_string();
+ let working_dir = project_root::get_project_root()?.join("docker");
+ let monorepo = self.monorepo.path();
+ let rollup_genesis = self.artifacts.rollup_genesis();
+ let rollup_genesis = rollup_genesis.to_string_lossy();
+ let jwt_secret = self.artifacts.jwt_secret();
+ let jwt_secret = jwt_secret.to_string_lossy();
+ let p2p_node_key = self.artifacts.p2p_node_key();
+ let p2p_node_key = p2p_node_key.to_string_lossy();
+
+ let dockerfile = r#"
+ ARG BUILDPLATFORM
+ FROM --platform=$BUILDPLATFORM golang:1.21.1-alpine3.18 as builder
+ ARG VERSION=v0.0.0
+ RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash
+ COPY ./go.mod /app/go.mod
+ COPY ./go.sum /app/go.sum
+ WORKDIR /app
+ RUN go mod download
+ # build op-node with the shared go.mod & go.sum files
+ COPY ./op-node /app/op-node
+ COPY ./op-chain-ops /app/op-chain-ops
+ COPY ./op-service /app/op-service
+ COPY ./op-bindings /app/op-bindings
+ WORKDIR /app/op-node
+ ARG TARGETOS TARGETARCH
+ RUN make op-node VERSION="$VERSION" GOOS=$TARGETOS GOARCH=$TARGETARCH
+ FROM alpine:3.18
+ COPY --from=builder /app/op-node/bin/op-node /usr/local/bin
+ COPY op-node-entrypoint.sh /op-node-entrypoint.sh
+ ENTRYPOINT ["/bin/sh", "/op-node-entrypoint.sh"]
+ "#;
+
+ let context = BuildContext::from_dockerfile(dockerfile)
+ .add_build_arg("BUILDPLATFORM", "linux/arm64") // TODO: this should be dynamic
+ .add_build_arg("TARGETOS", "linux")
+ .add_build_arg("TARGETARCH", "arm64")
+ .add_file(monorepo.join("go.mod"), "go.mod")
+ .add_file(monorepo.join("go.sum"), "go.sum")
+ .add_dir(monorepo.join("op-node"), "op-node")
+ .add_dir(monorepo.join("op-chain-ops"), "op-chain-ops")
+ .add_dir(monorepo.join("op-service"), "op-service")
+ .add_dir(monorepo.join("op-bindings"), "op-bindings")
+ .add_file(
+ working_dir.join("op-node-entrypoint.sh"),
+ "op-node-entrypoint.sh",
+ );
+ self.rollup_exec.build_image(&image_name, context).await?;
+
+ let op_log_volume = CreateVolumeOptions {
+ name: "op_log",
+ driver: "local",
+ ..Default::default()
+ };
+ self.rollup_exec.create_volume(op_log_volume).await?;
+
+ let config = Config {
+ image: Some(image_name),
+ working_dir: Some(working_dir.to_string_lossy().to_string()),
+ exposed_ports: Some(hashmap! {
+ "8545".to_string() => hashmap!{},
+ "6060".to_string() => hashmap!{},
+ "9003".to_string() => hashmap!{},
+ "7300".to_string() => hashmap!{},
+ }),
+ host_config: Some(HostConfig {
+ port_bindings: Some(hashmap! {
+ "8545".to_string() => bind_host_port(7545),
+ "6060".to_string() => bind_host_port(6060),
+ "9003".to_string() => bind_host_port(9003),
+ "7300".to_string() => bind_host_port(7300),
+ }),
+ binds: Some(vec![
+ "op_log:/op_log".to_string(),
+ format!("{}:/rollup.json", rollup_genesis),
+ format!("{}:/config/test-jwt-secret.txt", jwt_secret),
+ format!("{}:/config/p2p-node-key.txt", p2p_node_key),
+ ]),
+ ..Default::default()
+ }),
+ ..Default::default()
+ };
+
+ let container_id = self
+ .rollup_exec
+ .create_container(CONTAINER_NAME, config, true)
+ .await?
+ .id;
+ tracing::info!(target: "stages", "rollup container created: {}", container_id);
+
+ self.rollup_exec.start_container(&container_id).await?;
+
+ let rollup_port = self.rollup_port.unwrap_or(op_config::ROLLUP_PORT);
+ crate::net::wait_up(rollup_port, 30, 1)?;
+ tracing::info!(target: "stages", "rollup container started on port: {}", rollup_port);
+
+ Ok(())
+ }
}
diff --git a/docker/op-node-entrypoint.sh b/docker/op-node-entrypoint.sh
index b86f366..20c70f0 100644
--- a/docker/op-node-entrypoint.sh
+++ b/docker/op-node-entrypoint.sh
@@ -3,8 +3,8 @@
set -exu
exec op-node \
- --l1=ws://l1:8546 \
- --l2=http://l2:8551 \
+ --l1=ws://opup-l1:8546 \
+ --l2=http://opup-l2:8551 \
--l2.jwt-secret=/config/test-jwt-secret.txt \
--sequencer.enabled \
--sequencer.l1-confs=0 \