diff --git a/public/locales/en/translation.json b/public/locales/en/translation.json
index 342470a0b..90e8d95bf 100644
--- a/public/locales/en/translation.json
+++ b/public/locales/en/translation.json
@@ -118,24 +118,20 @@
"scrollArchitecture": "Scroll Architecture",
"scrollUpgrades": "Scroll Upgrades",
"overview": "Overview",
-
"scrollChain": "Scroll Chain",
"accountsAndState": "Accounts and State",
"blocks": "Blocks",
"transactions": "Transactions",
"rollupProcess": "Rollup Process",
"evmDifferencesFromEthereum": "EVM Differences from Ethereum",
-
"bridge": "Bridge",
"crossDomainMessaging": "Cross-Domain Messaging",
"depositGateways": "Deposit Gateways",
"withdrawGateways": "Withdraw Gateways",
-
"sequencer": "Sequencer",
"executionNode": "Execution Node",
"rollupNode": "Rollup Node",
"zkTrie": "zkTrie",
-
"zkevm": "zkEVM",
"introToZkevm": "Intro to zkEVM",
"zkevmOverview": "zkEVM Overview",
@@ -143,7 +139,6 @@
"prover": "Prover",
"proofGeneration": "Proof Generation",
"cpuProverRepo": "CPU Prover Repo",
-
"security": "Security",
"auditsAndBugBounty": "Audits & Bug Bounty",
"l2BeatAssessment": "L2Beat Assessment"
@@ -157,6 +152,30 @@
"polynomialCommitmentSchemes": "Polynomial Commitment Schemes",
"kzgCommitmentScheme": "KZG Commitment Scheme",
"additionalResources": "Additional Resources"
+ },
+ "sdk": {
+ "overview": "Overview",
+ "scrollSdk": "Scroll SDK Introduction",
+ "faq": "Scroll SDK FAQ",
+ "technicalStack": "Technical Stack",
+ "stackOverview": "Stack Overview",
+ "configuration": "Configuration",
+ "services": "Services",
+ "smartContracts": "Smart Contracts",
+ "proofGeneration": "Proof Generation",
+ "integrations": "Integrations",
+ "guides": "Guides",
+ "devnetDeployment": "Devnet Deployment",
+ "productionDeployment": "Production Deployment",
+ "digitalOcean": "Digital Ocean & ERC20 Gas Token Testnet",
+ "customizingSdkComponents": "Customizing SDK Components",
+ "awsDeployment": "AWS Deployment",
+ "operation": "Operating a Chain",
+ "gasAndFees": "Gas & Fee Management",
+ "monitoring": "Monitoring",
+ "security": "Security and Recovery",
+ "upgrades": "Upgrading",
+ "troubleshooting": "Troubleshooting"
}
},
"footer": {
diff --git a/src/assets/svgs/home/home-sdk.svg b/src/assets/svgs/home/home-sdk.svg
new file mode 100644
index 000000000..d63c492c7
--- /dev/null
+++ b/src/assets/svgs/home/home-sdk.svg
@@ -0,0 +1,3 @@
+
\ No newline at end of file
diff --git a/src/config/menu.ts b/src/config/menu.ts
index 9d9baeeee..7b426a6a3 100644
--- a/src/config/menu.ts
+++ b/src/config/menu.ts
@@ -16,6 +16,7 @@ export const MENU: MenuItems = {
{ text: "Developers", link: "/en/developers", section: "developers" },
{ text: "Technology", link: "/en/technology", section: "technology" },
{ text: "Learn", link: "/en/learn", section: "learn" },
+ { text: "SDK", link: "/en/sdk", section: "sdk" },
],
zh: [
{
diff --git a/src/config/sidebar.ts b/src/config/sidebar.ts
index f8d82980f..c2145bbcd 100644
--- a/src/config/sidebar.ts
+++ b/src/config/sidebar.ts
@@ -340,5 +340,99 @@ export const getSidebar = () => {
],
},
],
+ sdk: [
+ {
+ section: t("sidebar.sdk.overview"),
+ contents: [
+ {
+ title: t("sidebar.sdk.scrollSdk"),
+ url: "sdk/",
+ },
+ {
+ title: t("sidebar.sdk.faq"),
+ url: "sdk/sdk-faq",
+ },
+ ],
+ },
+ {
+ section: t("sidebar.sdk.technicalStack"),
+ contents: [
+ {
+ title: t("sidebar.sdk.stackOverview"),
+ url: formatUrl("sdk/technical-stack/"),
+ },
+ {
+ title: t("sidebar.sdk.configuration"),
+ url: formatUrl("sdk/technical-stack/configuration"),
+ },
+ {
+ title: t("sidebar.sdk.services"),
+ url: formatUrl("sdk/technical-stack/services"),
+ },
+ {
+ title: t("sidebar.sdk.smartContracts"),
+ url: formatUrl("sdk/technical-stack/contracts"),
+ },
+ {
+ title: t("sidebar.sdk.proofGeneration"),
+ url: formatUrl("sdk/technical-stack/proof-generation"),
+ },
+ {
+ title: t("sidebar.sdk.integrations"),
+ url: formatUrl("sdk/technical-stack/integrations"),
+ },
+ ],
+ },
+ {
+ section: t("sidebar.sdk.guides"),
+ contents: [
+ {
+ title: t("sidebar.sdk.devnetDeployment"),
+ url: formatUrl("sdk/guides/devnet-deployment"),
+ },
+ // {
+ // title: t("sidebar.sdk.productionDeployment"),
+ // url: formatUrl("sdk/guides/production-deployment"),
+ // },
+ {
+ title: t("sidebar.sdk.digitalOcean"),
+ url: formatUrl("sdk/guides/digital-ocean-alt-gas-token"),
+ },
+ {
+ title: t("sidebar.sdk.awsDeployment"),
+ url: formatUrl("sdk/guides/aws-deployment"),
+ },
+ {
+ title: t("sidebar.sdk.customizingSdkComponents"),
+ url: formatUrl("sdk/guides/customizing-sdk-components"),
+ },
+ ],
+ },
+ {
+ section: t("sidebar.sdk.operation"),
+ contents: [
+ {
+ title: t("sidebar.sdk.gasAndFees"),
+ url: formatUrl("sdk/operation/gas-and-fees"),
+ },
+ {
+ title: t("sidebar.sdk.monitoring"),
+ url: formatUrl("sdk/operation/monitoring"),
+ },
+ {
+ title: t("sidebar.sdk.upgrades"),
+ url: formatUrl("sdk/operation/upgrades"),
+ },
+ {
+ title: t("sidebar.sdk.troubleshooting"),
+ url: formatUrl("sdk/operation/troubleshooting"),
+ },
+ {
+ title: t("sidebar.sdk.security"),
+ url: formatUrl("sdk/operation/security-and-recovery"),
+ },
+ ],
+ },
+ ],
}
}
diff --git a/src/content/docs/en/sdk/guides/_images/AddNewWebhookToWorkspace.png b/src/content/docs/en/sdk/guides/_images/AddNewWebhookToWorkspace.png
new file mode 100644
index 000000000..49b18b478
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/AddNewWebhookToWorkspace.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/CopyWebhookURL.png b/src/content/docs/en/sdk/guides/_images/CopyWebhookURL.png
new file mode 100644
index 000000000..d80e2822c
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/CopyWebhookURL.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/CreateSlackApp.png b/src/content/docs/en/sdk/guides/_images/CreateSlackApp.png
new file mode 100644
index 000000000..5a9ad5bd5
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/CreateSlackApp.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/aws-cloudflare-dns-wildcards.png b/src/content/docs/en/sdk/guides/_images/aws-cloudflare-dns-wildcards.png
new file mode 100644
index 000000000..663b99fef
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/aws-cloudflare-dns-wildcards.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/aws-cloudflare-dns.png b/src/content/docs/en/sdk/guides/_images/aws-cloudflare-dns.png
new file mode 100644
index 000000000..ac3dfa039
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/aws-cloudflare-dns.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/aws-ebs-driver.png b/src/content/docs/en/sdk/guides/_images/aws-ebs-driver.png
new file mode 100644
index 000000000..3a7edf62f
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/aws-ebs-driver.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/cloudflare-dns-frontends.png b/src/content/docs/en/sdk/guides/_images/cloudflare-dns-frontends.png
new file mode 100644
index 000000000..9d6f5e2df
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/cloudflare-dns-frontends.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/cloudflare-dns.png b/src/content/docs/en/sdk/guides/_images/cloudflare-dns.png
new file mode 100644
index 000000000..e8515bddf
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/cloudflare-dns.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/do-cluster-connection.png b/src/content/docs/en/sdk/guides/_images/do-cluster-connection.png
new file mode 100644
index 000000000..468c7235f
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/do-cluster-connection.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/do-connection-info.png b/src/content/docs/en/sdk/guides/_images/do-connection-info.png
new file mode 100644
index 000000000..03cffe47a
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/do-connection-info.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/do-connection-pools.png b/src/content/docs/en/sdk/guides/_images/do-connection-pools.png
new file mode 100644
index 000000000..e88bfb850
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/do-connection-pools.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/do-database-restrictions-ip.png b/src/content/docs/en/sdk/guides/_images/do-database-restrictions-ip.png
new file mode 100644
index 000000000..37f095891
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/do-database-restrictions-ip.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/do-database-restrictions.png b/src/content/docs/en/sdk/guides/_images/do-database-restrictions.png
new file mode 100644
index 000000000..3b4c2077b
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/do-database-restrictions.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/do-database-setup.png b/src/content/docs/en/sdk/guides/_images/do-database-setup.png
new file mode 100644
index 000000000..b933ba826
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/do-database-setup.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/do-kubernetes-cluster-2.png b/src/content/docs/en/sdk/guides/_images/do-kubernetes-cluster-2.png
new file mode 100644
index 000000000..c4cc59cfd
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/do-kubernetes-cluster-2.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/do-kubernetes-cluster.png b/src/content/docs/en/sdk/guides/_images/do-kubernetes-cluster.png
new file mode 100644
index 000000000..410dbe253
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/do-kubernetes-cluster.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/do-load-balancer.png b/src/content/docs/en/sdk/guides/_images/do-load-balancer.png
new file mode 100644
index 000000000..ea3099ee2
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/do-load-balancer.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/do-marketplace-addons.png b/src/content/docs/en/sdk/guides/_images/do-marketplace-addons.png
new file mode 100644
index 000000000..744c68bde
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/do-marketplace-addons.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/do-vpc-network.png b/src/content/docs/en/sdk/guides/_images/do-vpc-network.png
new file mode 100644
index 000000000..ae09279cf
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/do-vpc-network.png differ
diff --git a/src/content/docs/en/sdk/guides/_images/grafana.png b/src/content/docs/en/sdk/guides/_images/grafana.png
new file mode 100644
index 000000000..147222599
Binary files /dev/null and b/src/content/docs/en/sdk/guides/_images/grafana.png differ
diff --git a/src/content/docs/en/sdk/guides/aws-deployment.mdx b/src/content/docs/en/sdk/guides/aws-deployment.mdx
new file mode 100644
index 000000000..9706a9673
--- /dev/null
+++ b/src/content/docs/en/sdk/guides/aws-deployment.mdx
@@ -0,0 +1,876 @@
+---
+section: sdk
+date: Last Modified
+title: "AWS EKS Deployment"
+lang: "en"
+permalink: "sdk/guides/aws-deployment"
+excerpt: "Learn how to deploy the Scroll SDK on AWS EKS."
+---
+
+import Aside from "../../../../../components/Aside.astro";
+import ClickToZoom from "../../../../../components/ClickToZoom.astro";
+import Steps from '../../../../../components/Steps/Steps.astro';
+import ToggleElement from "../../../../../components/ToggleElement.astro";
+import AWSCloudflareDNSWildcards from "./_images/aws-cloudflare-dns-wildcards.png";
+import AWSCloudflareDNS from "./_images/aws-cloudflare-dns.png";
+import AWSEBSDriver from "./_images/aws-ebs-driver.png";
+import CreateSlackApp from "./_images/CreateSlackApp.png"
+import AddNewWebhookToWorkspace from "./_images/AddNewWebhookToWorkspace.png"
+import CopyWebhookURL from "./_images/CopyWebhookURL.png"
+import GrafanaDashboard from "./_images/grafana.png"
+
+This guide demonstrates how to deploy a Scroll SDK on Amazon Web Services (AWS) using Elastic Kubernetes Service (EKS) and other managed services.
+
+This guide is designed for chain owners and developers who may not be DevOps experts but want to understand the process of setting up a more sophisticated cloud deployment compared to a local devnet.
+
+
+
+
+
+
+## Getting your machine ready
+
+### Installing Prerequisites
+
+Ensure you have the following tools installed on your local machine:
+
+- [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html)
+- [eksctl](https://eksctl.io/installation/)
+- [kubectl](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html)
+- [Helm](https://helm.sh/docs/intro/install/)
+- [Docker](https://docs.docker.com/get-docker/)
+- [Node.js](https://nodejs.org/en/download/) ≥ 18
+- [jq](https://jqlang.github.io/jq/download/)
+- [scroll-sdk-cli](https://www.npmjs.com/package/scroll-sdk-cli)
+- [k9s](https://k9scli.io/topics/install/) (optional, but recommended for cluster management)
+
+{/* TODO: Replace with new cli url */}
+
+Make sure to follow the installation instructions for each tool on their respective websites. For `kubectl`, you can refer to the detailed installation steps provided in the [Amazon EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html).
+
+To install the scroll-sdk-cli, run:
+
+```bash
+npm install -g scroll-sdk-cli
+```
+
+{/* TODO: Replace with new command */}
+
+Verify your setup by running:
+
+```bash
+scrollsdk test dependencies
+```
+
+### Configuring AWS CLI
+
+Before we begin, you need to configure your AWS CLI with your credentials:
+
+1. Create an IAM user in AWS with appropriate permissions (AdministratorAccess for simplicity, but you may want to restrict this in production).
+2. Get the Access Key ID and Secret Access Key for this user.
+3. Run the following command and enter your credentials when prompted:
+
+```bash
+aws configure
+```
+
+This will create a `~/.aws/credentials` file with your AWS credentials.
+
+### Setting up your Owner Wallet
+
+The chain owner has the ability to upgrade essential contracts. We recommend setting up a Safe with at least 2 independently controlled signers required for managing upgrades. You may also consider adding as signers any external party that will help you execute upgrades, as they'll be able to propose new transactions.
+
+For this demo, we'll use a temporary development account created in MetaMask. Make sure to save the private key securely.
+
+## Setting up your infrastructure
+
+### Creating an EKS Cluster
+
+Let's create our EKS cluster using `eksctl`. Run the following command:
+
+```bash
+eksctl create cluster \
+ --name scroll-sdk-cluster \
+ --region us-west-2 \
+ --nodegroup-name standard-workers \
+ --node-type t3.2xlarge \
+ --nodes 3 \
+ --nodes-min 1 \
+ --nodes-max 4 \
+ --managed
+```
+
+This will create a cluster with 3 t3.2xlarge nodes, which should be sufficient for our needs. Adjust the region and node type as needed.
+
+### Configuring Persistent Storage
+
+After creating your EKS cluster, you need to set up persistent storage using Amazon Elastic Block Store (EBS). This involves installing the EBS CSI driver, configuring IAM permissions, and setting a default storage class.
+
+#### Installing the EBS CSI Driver
+
+The EBS Container Storage Interface (CSI) driver allows your Kubernetes cluster to manage the lifecycle of Amazon EBS volumes for persistent storage.
+
+1. Navigate to the EKS console for your cluster.
+2. Go to the "Add-ons" section and install the "Amazon EBS CSI Driver" add-on.
+
+
+
+#### Configuring IAM Permissions
+
+To allow your EKS nodes to interact with EBS volumes, attach the appropriate IAM policy:
+
+```bash
+# Create an EBS policy file named ebs-policy.json:
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "ec2:CreateVolume",
+ "ec2:DeleteVolume",
+ "ec2:AttachVolume",
+ "ec2:DetachVolume",
+ "ec2:DescribeVolumes",
+ "ec2:DescribeVolumesModifications",
+ "ec2:ModifyVolume"
+ ],
+ "Resource": "*"
+ }
+ ]
+}
+
+# Create the IAM policy:
+aws iam create-policy --policy-name EKSNodeEBSPolicy --policy-document file://ebs-policy.json
+
+# Identify your node group's IAM role:
+aws eks describe-nodegroup --cluster-name scroll-sdk-cluster --nodegroup-name standard-workers --query "nodegroup.nodeRole" --output text --region us-west-2
+
+# Attach the policy to the node group's role:
+aws iam attach-role-policy --role-name --policy-arn arn:aws:iam:::policy/EKSNodeEBSPolicy
+```
+
+Be sure to replace `` and `` with your specific details.
+
+#### Setting the Default Storage Class
+
+To ensure that your Kubernetes pods can automatically provision EBS volumes:
+
+```bash
+# Verify available storage classes:
+kubectl get sc
+
+# Create a new gp3 storage class:
+cat < \
+ --allocated-storage 20 \
+ --vpc-security-group-ids $SECURITY_GROUP_ID \
+ --db-subnet-group-name scroll-sdk-subnet-group \
+ --db-name scrolldb \
+ --publicly-accessible \
+ --region us-west-2
+
+# Wait for the database to be available
+aws rds wait db-instance-available --db-instance-identifier scroll-sdk-db --region us-west-2
+```
+
+This command sequence does the following:
+- Gets the VPC ID of our EKS cluster
+- Creates a new security group for our RDS instance in the same VPC
+- Gets the security group ID for the EKS cluster
+- Allows inbound PostgreSQL traffic from the EKS cluster security group
+- Gets the subnet IDs in the VPC
+- Creates a DB subnet group using the VPC subnets
+- Creates a PostgreSQL database named `scrolldb`
+- Uses a `db.t3.2xlarge` instance (suitable for testing, adjust as needed)
+- Sets the master username to `scrolladmin` (change this as desired)
+- Allocates 20GB of storage
+- Makes the database publicly accessible for initial setup (we'll restrict access later)
+- Creates the database in the `us-west-2` region (adjust as needed)
+
+Make sure to replace `` with a strong password.
+
+After the database is created, you can retrieve its endpoint using:
+
+```bash
+aws rds describe-db-instances --db-instance-identifier scroll-sdk-db --query "DBInstances[0].Endpoint.Address" --output text --region us-west-2
+```
+Make note of this endpoint, as you'll need it when configuring your Scroll SDK.
+
+### Setting up a Load Balancer
+
+When you install the NGINX Ingress Controller, it automatically creates an AWS Elastic Load Balancer (ELB) for you. This load balancer is used to route external traffic to your Kubernetes services.
+
+To view the created load balancer:
+
+1. Wait a few minutes after installing the NGINX Ingress Controller for the load balancer to be provisioned.
+
+2. You can check the status of the load balancer creation by running:
+
+ ```bash
+ kubectl get services -n kube-system
+ ```
+
+ Look for a service named something like `nginx-ingress-ingress-nginx-controller`. The `EXTERNAL-IP` column will show the DNS name of your load balancer once it's ready.
+
+3. You can also view the load balancer in the AWS Console:
+ - Go to the EC2 dashboard
+ - Click on "Load Balancers" in the left sidebar
+ - You should see a new load balancer created for your NGINX Ingress Controller
+
+Remember the DNS name of this load balancer, as you'll need to point your domain to this address when configuring DNS.
+
+
+
+We'll configure the routing rules for this load balancer when we set up our ingress resources later in this guide.
+
+### Configuring your DNS with Cloudflare
+
+We'll use Cloudflare for DNS management, setting up a subdomain and a wildcard record. Here's how to set it up:
+
+1. If you haven't already, create a Cloudflare account and add your domain to Cloudflare.
+
+2. In the Cloudflare dashboard, go to the DNS settings for your domain.
+
+3. Add two new DNS records:
+
+ a. Subdomain record:
+ - Type: CNAME
+ - Name: scrollsdk
+ - Target: Paste the DNS name of your AWS load balancer (the EXTERNAL-IP you noted earlier)
+ - Proxy status: Set to "DNS only" (grey cloud icon) to bypass Cloudflare's proxy
+
+ b. Wildcard record:
+ - Type: CNAME
+ - Name: *.scrollsdk
+ - Target: scrollsdk.yourdomain.xyz
+ - Proxy status: Set to "DNS only" (grey cloud icon)
+
+4. Save your changes.
+
+
+
+
+
+
+
+Remember to update your Scroll SDK configuration to use these new subdomains. For example, you might use:
+
+- frontends.scrollsdk.yourdomain.xyz for your frontend
+- l2-rpc.scrollsdk.yourdomain.xyz for your API
+
+Make sure to update these in your `config.toml` file and any other relevant configuration files.
+
+
+
+### Connecting to your Cluster
+
+After creating the cluster, `eksctl` should have automatically updated your kubeconfig. Verify the connection by running:
+
+```bash
+kubectl get nodes
+```
+
+You should see your cluster nodes listed.
+
+### Adding External Secrets Operator
+
+Scroll SDK uses [External Secrets](https://external-secrets.io/latest/introduction/getting-started/) to manage sensitive information. Once you have `kubectl` working with your cluster, run the following:
+
+```bash
+helm repo add external-secrets https://charts.external-secrets.io
+helm repo update
+helm install external-secrets external-secrets/external-secrets -n external-secrets --create-namespace
+```
+
+## Setup your local repo
+
+Create a directory for your project and initialize a git repository:
+
+```bash
+mkdir aws-scroll-sdk && cd aws-scroll-sdk && git init
+```
+
+## Configuration
+
+### Grabbing Files from `scroll-sdk`
+
+We'll want two files from the `scroll-sdk` repo. You can either copy-paste the contents from GitHub or copy the files from another location you've cloned.
+
+Here, I'll copy them from a local repo copy.
+
+```bash
+cp ../scroll-sdk/examples/config.toml.example ./config.toml
+```
+
+```bash
+cp ../scroll-sdk/examples/Makefile.example ./Makefile
+```
+
+```bash
+cp -r ../scroll-sdk/examples/values values
+```
+
+`Config.toml` will be used to set up the base configuration of our chain, from which each service's independent config files will be generated. `Makefile` will allow us to directly run `helm` commands in an automated way. The `values` directory contains the starter values for each service's `production.yaml` file, where we'll customize the behavior of each chart.
+
+### Preparing our Config.toml file
+
+Although these values can be set manually, we have a number of "setup" methods in the `scroll-sdk-cli` to help you quickly configure your stack.
+
+### Setting Domains
+
+We want to set up our ingress hosts and the URLs used by our frontend sites. These will often be the same, but are defined separately to allow flexibility in architecture and usage of HTTP while in development.
+
+If you're using a public testnet like Scroll Sepolia, please have a private L1 RPC URL ready for HTTPS and WSS. Public RPC endpoints are too unreliable for supporting the SDK backend services.
+
+First, run `scrollsdk setup domains` to begin an interactive session for setting the values.
+
+```
+Current domain configurations:
+EXTERNAL_RPC_URI_L1 = "http://l1-devnet.scrollsdk"
+EXTERNAL_RPC_URI_L2 = "http://l2-rpc.scrollsdk"
+BRIDGE_API_URI = "http://bridge-history-api.scrollsdk/api"
+ROLLUPSCAN_API_URI = "http://rollup-explorer-backend.scrollsdk/api"
+EXTERNAL_EXPLORER_URI_L1 = "http://l1-explorer.scrollsdk"
+EXTERNAL_EXPLORER_URI_L2 = "http://blockscout.scrollsdk"
+ADMIN_SYSTEM_DASHBOARD_URI = "http://admin-system-dashboard.scrollsdk"
+GRAFANA_URI = "http://grafana.scrollsdk"
+
+Current ingress configurations:
+FRONTEND_HOST = "frontends.scrollsdk"
+BRIDGE_HISTORY_API_HOST = "bridge-history-api.scrollsdk"
+ROLLUP_EXPLORER_API_HOST = "rollup-explorer-backend.scrollsdk"
+COORDINATOR_API_HOST = "coordinator-api.scrollsdk"
+RPC_GATEWAY_HOST = "l2-rpc.scrollsdk"
+BLOCKSCOUT_HOST = "blockscout.scrollsdk"
+ADMIN_SYSTEM_DASHBOARD_HOST = "admin-system-dashboard.scrollsdk"
+L1_DEVNET_HOST = "l1-devnet.scrollsdk"
+L1_EXPLORER_HOST = "l1-explorer.scrollsdk"
+? Select the L1 network: Anvil (Local)
+? Enter the L1 Chain Name: l1-devnet
+? Enter the L1 Chain ID: 31337
+Using l1-devnet network:
+L1 Chain Name = "l1-devnet"
+L1 Chain ID = "31337"
+Updated [general] L1_RPC_ENDPOINT = "http://l1-devnet:8545"
+Updated [general] L1_RPC_ENDPOINT_WEBSOCKET = "ws://l1-devnet:8546"
+? Do you want all external URLs to share a URL ending? yes
+? Enter the shared URL ending: scrollsdk.yourdomain
+? Choose the protocol for the shared URLs: HTTPS
+? Do you want the frontends to be hosted at the root domain? (No will use a "frontends" subdomain) no
+
+New domain configurations:
+EXTERNAL_RPC_URI_L2 = "https://l2-rpc.scrollsdk.yourdomain.xyz"
+BRIDGE_API_URI = "https://bridge-history-api.scrollsdk.yourdomain/api"
+ROLLUPSCAN_API_URI = "https://rollup-explorer-backend.scrollsdk.yourdomain/api"
+EXTERNAL_EXPLORER_URI_L2 = "https://blockscout.scrollsdk.yourdomain"
+ADMIN_SYSTEM_DASHBOARD_URI = "https://admin-system-dashboard.scrollsdk.yourdomain"
+EXTERNAL_RPC_URI_L1 = "https://l1-devnet.scrollsdk.yourdomain"
+EXTERNAL_EXPLORER_URI_L1 = "https://l1-explorer.scrollsdk.yourdomain"
+
+New ingress configurations:
+FRONTEND_HOST = "frontends.scrollsdk.yourdomain"
+BRIDGE_HISTORY_API_HOST = "bridge-history-api.scrollsdk.yourdomain"
+ROLLUP_EXPLORER_API_HOST = "rollup-explorer-backend.scrollsdk.yourdomain"
+COORDINATOR_API_HOST = "coordinator-api.scrollsdk.yourdomain"
+RPC_GATEWAY_HOST = "l2-rpc.scrollsdk.yourdomain"
+BLOCKSCOUT_HOST = "blockscout.scrollsdk.yourdomain"
+ADMIN_SYSTEM_DASHBOARD_HOST = "admin-system-dashboard.scrollsdk.yourdomain"
+L1_DEVNET_HOST = "l1-devnet.scrollsdk.yourdomain"
+L1_EXPLORER_HOST = "l1-explorer.scrollsdk.yourdomain"
+
+New general configurations:
+CHAIN_NAME_L1 = "l1-devnet"
+CHAIN_ID_L1 = "31337"
+L1_RPC_ENDPOINT = "http://l1-devnet:8545"
+L1_RPC_ENDPOINT_WEBSOCKET = "ws://l1-devnet:8546"
+? Do you want to update the config.toml file with these new confi
+{/* TODO: The prompt seems to be cut off. Ensure the instruction is complete. */}
+```
+
+
+
+### Initializing our Databases and Database Users
+
+We need to temporarily allow public access to our RDS instance to run the initial setup. After setup, we'll revert these changes to maintain security.
+
+1. Update the security group to allow inbound traffic from any IP:
+
+```bash
+aws ec2 authorize-security-group-ingress \
+--group-id $SECURITY_GROUP_ID \
+--protocol tcp \
+--port 5432 \
+--cidr 0.0.0.0/0 \
+--region us-west-2
+```
+
+2. Get the public endpoint of your RDS instance:
+
+```bash
+aws rds describe-db-instances \
+--db-instance-identifier scroll-sdk-db \
+--query 'DBInstances[0].Endpoint.Address' \
+--output text \
+--region us-west-2
+```
+
+3. Run the database initialization:
+
+```bash
+scrollsdk setup db-init
+```
+
+Next, follow the prompts to create the new database users and passwords.
+
+When prompted “Do you want to connect to a different database cluster for this database?”, type “no”.
+
+Lastly, when asked “Do you want to update the config.toml file with the new DSNs?” select “yes” to update your config.
+
+4. Reverting RDS to Private Access:
+
+After initializing your databases, it's important to revert the RDS instance to private access for improved security. Follow these steps:
+
+```bash
+# 1. Remove the public accessibility:
+aws rds modify-db-instance \
+ --db-instance-identifier scroll-sdk-db \
+ --no-publicly-accessible \
+ --apply-immediately \
+ --region us-west-2
+
+# 2. Remove the temporary security group rule that allowed public access:
+aws ec2 revoke-security-group-ingress \
+ --group-id $SECURITY_GROUP_ID \
+ --protocol tcp \
+ --port 5432 \
+ --cidr 0.0.0.0/0 \
+ --region us-west-2
+
+# 3. Wait for the changes to take effect:
+aws rds wait db-instance-available --db-instance-identifier scroll-sdk-db --region us-west-2
+```
+
+These steps ensure that your RDS instance is only accessible from within your VPC, enhancing the security of your deployment.
+
+### Generate Keystore Files
+
+Next, we need to generate new private keys for the sequencer signer and the SDK accounts used for on-chain activity. The prompt will also ask if you want to set up backup sequencers. These will be standby full nodes ready to take over the sequencer role if needed for recovery or key rotation. This step will also allow you to set up pre-defined bootnodes.
+
+This step will also update `L2_GETH_STATIC_PEERS` to point to all sequencers as well.
+
+Follow the prompt by running `scrollsdk setup gen-keystore`. Use the Owner wallet address of the multi-sig generated in the first step, or for testing, allow the script to generate a wallet for you. But, be sure to save the private key, as it should not be stored in config.toml!
+
+### Generate Configuration Files
+
+Now, we'll do the last steps for generating each service's configuration files based on our values in `config.toml`.
+
+Run `scrollsdk setup configs`.
+
+You'll see a few prompts to update a few remaining values, like the L1 height at contract deployment and the "deployment salt" which should be unique per new deployment with a deployer address.
+
+Now, we'll simulate contract deployment to get contract addresses and build the config files and secrets files for all SDK services. Secrets will be written to `./secrets` and config files to `./values`. If you want the config files written to a different directory, pass the `--configs-dir` flag.
+
+### Pull Charts and Move Config Files
+
+Now, we need to prepare the Helm charts. We will check access to charts, review the Makefile, and check the values files for any missing values.
+
+To do this, run `scrollsdk setup prep-charts`
+
+
+
+After pulling the charts, the CLI tool will try to auto-fill each chart's `production.yaml` file.
+
+You will be prompted with each update and even flagged for empty values. Be sure to sanity check these values to make sure you didn't set up something incorrectly earlier. If you did, re-run any earlier steps, being sure to rerun the `setup configs` command before running `setup prep-charts`.
+
+### Push Secrets
+
+Lastly, we need to take the configuration values that are sensitive and publish them to wherever we're deploying "secrets."
+
+We'll use AWS Secrets Manager to store our secrets. First, let's set up the necessary permissions and create a ServiceAccount:
+
+1. Create an IAM policy for accessing Secrets Manager:
+
+```bash
+cat < secretsmanager-policy.json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "secretsmanager:GetSecretValue",
+ "secretsmanager:DescribeSecret",
+ "secretsmanager:PutSecretValue",
+ "secretsmanager:CreateSecret",
+ "secretsmanager:DeleteSecret",
+ "secretsmanager:TagResource"
+ ],
+ "Resource": "*"
+ }
+ ]
+}
+EOF
+
+aws iam create-policy --policy-name ExternalSecretsPolicy --policy-document file://secretsmanager-policy.json
+```
+
+2. Associate an IAM OIDC provider with your cluster:
+
+```bash
+eksctl utils associate-iam-oidc-provider --region=us-west-2 --cluster=scroll-sdk-cluster --approve
+```
+
+3. Create a ServiceAccount and associate it with the IAM role:
+
+```bash
+eksctl create iamserviceaccount \
+ --name external-secrets \
+ --namespace default \
+ --cluster scroll-sdk-cluster \
+ --attach-policy-arn arn:aws:iam::YOUR_AWS_ACCOUNT_ID:policy/ExternalSecretsPolicy \
+ --approve \
+ --region us-west-2
+```
+
+Remember to replace `YOUR_AWS_ACCOUNT_ID` with your actual AWS account ID.
+
+4. Create a SecretStore:
+
+```yaml
+apiVersion: external-secrets.io/v1beta1
+kind: SecretStore
+metadata:
+ name: aws-secretsmanager
+spec:
+ provider:
+ aws:
+ service: SecretsManager
+ region: us-west-2
+ auth:
+ jwt:
+ serviceAccountRef:
+ name: external-secrets
+```
+
+Apply this configuration:
+
+```bash
+kubectl apply -f secretstore.yaml
+```
+
+Now, push your secrets:
+
+```bash
+scrollsdk setup push-secrets
+```
+
+Select "AWS" when prompted.
+
+You'll be asked to update your `production.yaml` files, and we'll be ready to deploy!
+
+### Enable TLS & HTTPS
+
+Next, we'll want to enable HTTPS access. You should have already enabled Cert Manager in the previous step.
+
+Now, save the following file as `cluster-issuer.yaml` (being sure to use your own email address) then run `kubectl apply -f cluster-issuer.yaml`
+
+```yaml
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: letsencrypt-prod
+spec:
+ acme:
+ server: https://acme-v02.api.letsencrypt.org/directory
+ email: replace.this@email.com
+ privateKeySecretRef:
+ name: letsencrypt-prod
+ solvers:
+ - http01:
+ ingress:
+ class: nginx
+```
+
+Next, run `scrollsdk setup tls` and walk through the instructions to update each of your `production.yaml` files that define an external hostname.
+
+## Deployment
+
+### Fund the Deployer
+
+We need to send some Anvil Devnet ETH to the Deployer — 2 ETH should do it. Run the following if you want to use your mobile wallet or have forgotten the account.
+
+```bash
+scrollsdk helper fund-accounts -i -f 2
+```
+
+`-i` is used for funding the deployer. Later we'll run this with different params to fund other SDK accounts.
+
+Now, let's fund the other L1 accounts.
+
+```bash
+scrollsdk helper fund-accounts -f 0.2 -l 1
+```
+
+Here, we pass `-l 1` to only fund the L1 accounts. Any L2 funding will fail at this point since we haven't launched the chain yet!
+
+### Installing the Helm Charts
+
+Run `make install` to install (or later, to upgrade) all the SDK charts needed. It may help to run the commands one-by-one the first time and check the deployment status.
+
+`k9s` is a useful tool for this. The sample Makefile also doesn't include Blockscout, but feel free to add it as well.
+{/* TODO: Fix this once blockscout is official and added to Makefile */}
+
+### Fund L2 accounts
+
+Let's fund our L1 Gas Oracle Sender (an account on L2 😅) with some funds.
+
+```bash
+scrollsdk helper fund-accounts -f 0.2 -l 2
+```
+
+This will fund it with 0.2 of our gas token. Select "Directly fund L2 Wallet" for now, since our Deployer starts with 1 token on L2. But now we have a working chain, so we can start bridging funds!
+
+## Testing
+
+`scroll-sdk-cli` has a number of tools built in for testing your new network. These commands should be run from the same directory as your `config.toml` and `config-contracts.toml` files.
+
+### Ingress
+
+Run `scrollsdk test ingress` to check all ingresses and that they match the expected value. If you're not using the default namespace, add `-n [namespace]`.
+
+### Contracts
+
+Run `scrollsdk test contracts` to check all contract deployments, initialization, and owner updates.
+
+### e2e Test
+
+Run `scrollsdk test e2e` to try end-to-end testing. Without any flags, the test will create and fund a new wallet, but depending on Sepolia gas costs, you may need to manually fund the generated account with additional ETH. If the tests stop at any time, just run `scrollsdk test e2e -r` to resume from the saved file.
+
+We recommend opening up another terminal and running `scrollsdk helper activity -i 1` to generate traffic and produce more blocks — otherwise, finalization will be stopped.
+
+### Frontends
+
+Go visit the frontends, connect your wallet, and try to bridge some funds!
+
+## Next Steps
+
+### Optimize EKS Node Groups for Specific Services
+
+As you work with your network, you might want to be more selective about the node groups you provide to services.
+
+One example is that the `l2-sequencer` may want additional CPU resources and the `coordinator-api` has RAM requirements far greater than other services.
+
+If you'd like to give this a try, create a new node group in your EKS cluster -- perhaps selecting instances with higher CPU and RAM.
+
+Now, in your `values/l2-sequencer-production-0.yaml` file, add the following section:
+
+```yaml
+affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: eks.amazonaws.com/nodegroup
+ operator: In
+ values:
+ - high-performance-nodegroup
+resources:
+ requests:
+ memory: "450Mi"
+ cpu: "80m"
+ limits:
+ memory: "14Gi"
+ cpu: "7.5"
+```
+
+Here, we're asking it to only select nodes that are in the "high-performance-nodegroup" and increasing the resources of the pod to allow up to 7.5 CPU cores.
+
+To apply this, run:
+
+```bash
+helm upgrade -i l2-sequencer-0 oci://ghcr.io/scroll-tech/scroll-sdk/helm/l2-sequencer \
+ --version=0.0.11 \
+ --values values/l2-sequencer-production-0.yaml
+```
+
+Replace with the version value from your Makefile. Add `-n [namespace]` if you're not using the default namespace.
+
+For more info, see the Kubernetes page on [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/).
+
+### Add Redundancy with Replicas
+
+Soon, we'll add more information about quickly adding Replicas.
+
+For some components (like `l2-rpc` and all `-api` services), this is as easy as adding or modifying the following value in your production.yaml file:
+
+```yaml
+controller:
+ replicas: 2
+```
+
+Some services do not support this without additional configurations (for example, `l2-sequencer` and `l2-bootnode`). We are working on additional info on how to properly run multiple services for load balancing between or for having redundant backups available.
+
+{/* TODO: Fix this comment once we add this documentation */}
+
+
+### Monitoring
+You can monitor the cluster's running status through Grafana.
+Additionally, you can send alerts via email and Slack using Alertmanager.
+If you have configured a domain for Grafana in the previous steps, you can access it by opening http://grafana.yourdomain, where you will see two sets of dashboards. The defalt password of user `admin` is `scroll-sdk`.
+
+
+#### send alert to slack
+1. **Create a Slack App**
+
+ Open [https://api.slack.com/apps](https://api.slack.com/apps) and click **`Create New App`** if you don't have one already. Select **`From scratch`**, enter a name, and select the workspace.
+
+2. **Activate Incoming Webhooks**
+
+ Click the **`Incoming Webhooks`** label on the right side of the page, then turn on **`Activate Incoming Webhooks`**.
+
+
+ Click the **`Add New Webhook to Workspace`** button.
+
+
+ Select the channel you want to send alerts to, then copy the Webhook URL.
+
+
+
+3. **Edit the Config File**
+
+ Edit `./values/alert-manager.yaml` to replace it with your webhook URL and your Slack channel name.
+ ```
+ kube-prometheus-stack:
+ alertmanager:
+ config:
+ global:
+ resolve_timeout: 5m
+ slack_api_url: 'https://hooks.slack.com/services/xxxxxxxxxxx/xxxxxxxxxxx/xxxxxxxxxxxxxxxxxxxxxxxx' # your webhook url
+ receivers:
+ - name: 'slack-alerts'
+ slack_configs:
+ - channel: '#scroll-webhook' #your channel name
+ send_resolved: true
+ text: '{{ .CommonAnnotations.description }}'
+ title: '{{ .CommonAnnotations.summary }}'
+ route:
+ group_by: ['alertname']
+ receiver: 'slack-alerts'
+ routes:
+ - matchers: []
+ receiver: 'slack-alerts'
+ ```
+ This configuration file will send all alerts to your Slack channel. If you need more complex rules, refer to the [Prometheus Alerting Configuration Documentation](https://prometheus.io/docs/alerting/latest/configuration/).
+
+4. **Update to alertmanager**
+
+ Use the following command to update Alertmanager:
+ ```
+ helm upgrade --reuse-values -i scroll-monitor oci://ghcr.io/scroll-tech/scroll-sdk/helm/scroll-monitor -n $(NAMESPACE) \
+ --values ./values/alert-manager.yaml
+ ```
diff --git a/src/content/docs/en/sdk/guides/customizing-sdk-components.mdx b/src/content/docs/en/sdk/guides/customizing-sdk-components.mdx
new file mode 100644
index 000000000..667d00cac
--- /dev/null
+++ b/src/content/docs/en/sdk/guides/customizing-sdk-components.mdx
@@ -0,0 +1,123 @@
+---
+section: sdk
+date: Last Modified
+title: "Customizing SDK Components"
+lang: "en"
+permalink: "sdk/guides/customizing-sdk-components"
+excerpt: "Learn to make custom changes to Scroll SDK services"
+---
+
+import Aside from "../../../../../components/Aside.astro"
+import ToggleElement from "../../../../../components/ToggleElement.astro"
+import Steps from '../../../../../components/Steps/Steps.astro';
+import ClickToZoom from "../../../../../components/ClickToZoom.astro"
+
+## Overview
+
+This guide documents how to run customized components in your own Scroll SDK deployment. We'll see how to modify a service, build a custom Docker image, and deploy your changes to an existing Scroll SDK deployment.
+
+## Prerequisites
+
+
+1. Clone the [scroll-sdk repo](https://github.com/scroll-tech/scroll-sdk) to your local machine
+2. Install dependencies:
+ - [Docker](https://docs.docker.com/desktop/install/linux/)
+ - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/)
+ - [minikube](https://minikube.sigs.k8s.io/docs/start/)
+ - [Helm](https://helm.sh/docs/intro/install/)
+3. Verify installations by running:
+ - `docker -v`
+ - `kubectl version`
+ - `minikube status`
+ - `helm version`
+
+
+
+
+## Modifying a Service
+
+
+1. Clone the repository of the service you want to modify
+2. Make your desired code modifications
+3. Test your changes locally
+
+
+#### Available Services
+
+| Service | Repository |
+|---------|------------|
+| bridge-history-api | [scroll-tech/scroll/bridge-history-api](https://github.com/scroll-tech/scroll/tree/develop/bridge-history-api/cmd/api) |
+| bridge-history-fetcher | [scroll-tech/scroll/bridge-history-fetcher](https://github.com/scroll-tech/scroll/tree/develop/bridge-history-api/cmd/fetcher) |
+| chain-monitor | [scroll-tech/chain-monitor](https://github.com/scroll-tech/chain-monitor) |
+| contracts | [scroll-tech/scroll-contracts](https://github.com/scroll-tech/scroll-contracts/tree/feat-deterministic-deployment) |
+| coordinator-api | [scroll-tech/scroll/coordinator-api](https://github.com/scroll-tech/scroll/tree/develop/coordinator/cmd/api) |
+| coordinator-cron | [scroll-tech/scroll/coordinator-cron](https://github.com/scroll-tech/scroll/tree/develop/coordinator/cmd/cron) |
+| frontends | [scroll-tech/frontends](https://github.com/scroll-tech/frontends) |
+| gas-oracle | [scroll-tech/scroll/gas-oracle](https://github.com/scroll-tech/scroll/tree/develop/rollup/cmd/gas_oracle) |
+| l1-devnet | [scroll-tech/scroll-sdk/l1-devnet](https://github.com/scroll-tech/scroll-sdk/blob/develop/custom-images/l1-devnet/Dockerfile) |
+| l2-bootnode | [scroll-tech/go-ethereum](https://github.com/scroll-tech/go-ethereum) |
+| l2-rpc | [scroll-tech/go-ethereum](https://github.com/scroll-tech/go-ethereum) |
+| l2-sequencer | [scroll-tech/go-ethereum](https://github.com/scroll-tech/go-ethereum) |
+| rollup-explorer-backend | [scroll-tech/rollup-explorer-backend](https://github.com/scroll-tech/rollup-explorer-backend) |
+| rollup-node | [scroll-tech/scroll/rollup-node](https://github.com/scroll-tech/scroll/tree/develop/rollup/cmd/rollup_relayer) |
+
+## Building a Custom Docker Image
+
+
+1. Locate the Dockerfile for your service
+2. Build the image:
+ ```bash
+ docker build -f -t : .
+ ```
+3. Choose one of two options for making the image available:
+
+ **Option 1: Publish to Docker Hub**
+ ```bash
+ docker login
+ docker push /:
+ ```
+
+ **Option 2: Load Directly to Minikube**
+ ```bash
+ minikube image load :
+ ```
+
+
+
+
+## Updating the Service Configuration
+
+
+1. Locate the `values.yaml` file for your service in `devnet/scroll-sdk/charts//values.yaml`
+
+2. Update the `image` field based on your chosen deployment method:
+
+ **For published Docker Hub images:**
+ ```yaml
+ image:
+ repository: /
+ pullPolicy: Always
+ tag:
+ ```
+
+ **For local Minikube images:**
+ ```yaml
+ image:
+ repository:
+ tag:
+ ```
+
+3. Apply your changes:
+ ```bash
+ make install
+ ```
+
+4. Verify the deployment:
+ ```bash
+ kubectl get pods
+ ```
+
\ No newline at end of file
diff --git a/src/content/docs/en/sdk/guides/devnet-deployment.mdx b/src/content/docs/en/sdk/guides/devnet-deployment.mdx
new file mode 100644
index 000000000..9099b0b40
--- /dev/null
+++ b/src/content/docs/en/sdk/guides/devnet-deployment.mdx
@@ -0,0 +1,546 @@
+---
+section: sdk
+date: Last Modified
+title: "Running Scroll SDK Devnet"
+lang: "en"
+permalink: "sdk/guides/devnet-deploymnet"
+excerpt: "Run the Scroll SDK devnet locally to get started."
+---
+
+import Aside from "../../../../../components/Aside.astro"
+import ToggleElement from "../../../../../components/ToggleElement.astro"
+import Steps from '../../../../../components/Steps/Steps.astro';
+import { Tabs, TabsContent } from "../../../../../components/Tabs"
+
+## Overview
+
+This guide should get you started with running a local Scroll SDK devnet. Additionally, it has specific asides for running on macOS and Ubuntu.
+
+We've written this guide because local deployments of Kubernetes clusters can be finicky. Even the prerequisites can be tricky without help.
+
+By the end of the guide, you should have a Scroll SDK running with a block explorer, RPC, webUI, monitoring and metrics, and a local L1. These are all accessible to wallets, browsers, and applications running on your local machine.
+
+
+
+
+
+
+
Updates to the Guide
+
+ - October 30, 2024
+ - Updated for Scroll SDK v0.1.0
+
+
+
+## Prerequisites
+
+
+macOS
+Ubuntu
+
+
+1. Install dependencies (using `brew` is strongly recommended):
+ - [Brew](https://brew.sh/) (optional)
+ - `/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"`
+ - Docker ([Docker Desktop](https://docs.docker.com/desktop/install/mac-install/))
+ - `brew install --cask docker`
+ - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/)
+ - `brew install kubectl`
+ - [minikube](https://minikube.sigs.k8s.io/docs/start/?arch=%2Fmacos%2Farm64%2Fstable%2Fhomebrew)
+ - `brew install minikube`
+ - [Helm](https://helm.sh/docs/intro/install/#from-homebrew-macos)
+ - `brew install helm`
+ - [docker-mac-net-connect](https://github.com/chipmk/docker-mac-net-connect) (macOS ARM64 only)
+ - `brew install chipmk/tap/docker-mac-net-connect`
+
+2. *Optional:* Install dependencies for support CLI tool:
+ - [node >=18](https://nodejs.org/en/download/package-manager)
+ - `brew install nvm`
+ - `nvm install node`
+ - scroll-sdk-cli *(Experimental, APIs may change)*
+ - `npm install -g scroll-sdk-cli`
+3. You should now be able to open a terminal and run the following:
+ - `docker -v`
+ - `kubectl version`
+ - `minikube status`
+ - `helm version`
+ - `node -v`
+ - `scrollsdk`
+ - Or, in one step: `scrollsdk test dependencies --dev`
+
+{/* TODO: Update the cli command to use the new `scrollsdk` install command */}
+
+
+
+
+
+1. Update system packages:
+ ```bash
+ sudo apt update
+ sudo apt-get install make
+ ```
+
+2. Install [Docker](https://docs.docker.com/engine/install/ubuntu/):
+ ```bash
+ # Add Docker's official GPG key
+ sudo apt-get install ca-certificates curl
+ sudo install -m 0755 -d /etc/apt/keyrings
+ sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
+ sudo chmod a+r /etc/apt/keyrings/docker.asc
+
+ # Add repository to Apt sources
+ echo \
+ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
+ $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+
+ sudo apt-get update
+ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
+ ```
+
+3. Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/):
+ ```bash
+ curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
+ sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
+ ```
+
+4. Install [minikube](https://minikube.sigs.k8s.io/docs/start/#linux):
+ ```bash
+ curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64
+ sudo install minikube-linux-amd64 /usr/local/bin/minikube
+ rm minikube-linux-amd64
+ ```
+
+5. Install [Helm](https://helm.sh/docs/intro/install/#from-script):
+ ```bash
+ curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
+ ```
+
+6. Install [Node](https://github.com/nvm-sh/nvm#installing-and-updating) and [scroll-sdk-cli](https://github.com/scroll-tech/scroll-sdk-cli):
+ ```bash
+ curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash
+ # Re-login or source your shell configuration
+ nvm install 20
+ npm install -g scroll-sdk-cli
+ ```
+ {/* TODO: Update the cli command to use the new `scrollsdk` install command */}
+
+7. You should now be able to open a terminal and run the following:
+ - `docker -v`
+ - `kubectl version`
+ - `minikube status`
+ - `helm version`
+ - `node -v`
+ - `scrollsdk`
+ - Or, in one step: `scrollsdk test dependencies --dev`
+
+
+
+
+### Starting minikube
+
+First, we need to allow minikube sufficient resources to run the devnet.
+
+```bash
+minikube config set cpus 8
+minikube config set memory 8192
+```
+
+Next, we start minikube.
+
+
+macOS
+Ubuntu
+
+
+Mac requires extra commands as a work-around for `ingress-dns` not working correctly on arm64 MacOS. Hopefully the issue will be resolved in later versions of minikube.
+
+```bash
+minikube start --driver=docker
+minikube addons enable ingress
+minikube addons enable ingress-dns
+
+# Additional steps for MacOS DNS resolution
+minikube ssh "sudo apt-get update && sudo apt-get -y install qemu-user-static"
+sudo brew services start chipmk/tap/docker-mac-net-connect
+```
+
+
+
+
+
+Add your user to the docker group.
+
+```bash
+sudo usermod -aG docker $USER && newgrp docker
+```
+
+Now you are ready to start minikube.
+
+```bash
+minikube start --driver=docker
+minikube addons enable ingress
+minikube addons enable ingress-dns
+```
+
+
+
+## Installing Scroll SDK
+
+{/* */}
+
+
+
+1. **Clone the `scroll-sdk` repo and find `devnet` directory**
+
+ ```bash
+ git clone git@github.com:scroll-tech/scroll-sdk.git
+ cd scroll-sdk/devnet
+ ```
+
+1. **Bootstrap**
+
+ Now we'll install Helm chart dependencies, generate additional config files for our services, and create our `genesis.json` file.
+
+ ```bash
+ make bootstrap
+ ```
+
+ {/* */}
+
+1. Optional: **Configure `values.yaml` and `config.toml`**
+
+ This is the time to adjust what services run on your stack and their configuration. We suggest not altering these on your first installation, but see `charts/scroll-sdk/values.yaml` ([view on Github](https://github.com/scroll-tech/scroll-sdk/blob/develop/charts/scroll-sdk/values.yaml)) and `config.toml` ([view on Github](https://github.com/scroll-tech/scroll-sdk/blob/develop/charts/scroll-sdk/config.toml)).
+
+ If you do make changes, you’ll need to run `make config` after to regenerate the additional configuration files. No services directly read from the `config.toml` file.
+
+
+1. **Launch the chain!**
+
+ ```bash
+ make install
+ ```
+
+ Your chain is now starting! Run `kubectl get pods` to check in on their progress. In the next section we'll expose the chain to your local machine so you can interact with the stack.
+
+ The install process for the various containers can take several minutes as new docker images are downloaded and services wait on others to be online to start.
+
+
+
+
+
+
+## After Launching the Stack
+
+### Configuring DNS
+
+Running `kubectl get ingress` should show all the domains setup within the cluster, like the following:
+
+```
+➜ scroll-sdk git:(develop) ✗ kubectl get ingress
+NAME CLASS HOSTS ADDRESS PORTS AGE
+admin-system-dashboard nginx admin-system-dashboard.scrollsdk 192.168.49.2 80 5h3m
+blockscout-backend-ingress nginx blockscout-backend.scrollsdk 192.168.49.2 80 5h3m
+blockscout-frontend-ingress nginx blockscout.scrollsdk 192.168.49.2 80 5h3m
+bridge-history-api nginx bridge-history-api.scrollsdk 192.168.49.2 80 5h3m
+frontends nginx frontends.scrollsdk 192.168.49.2 80 5h3m
+grafana nginx grafana.scrollsdk 192.168.49.2 80 5h3m
+l1-devnet nginx l1-devnet.scrollsdk 192.168.49.2 80 5h3m
+l1-explorer-blockscout-ingress nginx l1-explorer-backend.scrollsdk 192.168.49.2 80 5h3m
+l1-explorer-frontend-ingress nginx l1-explorer.scrollsdk 192.168.49.2 80 5h3m
+l2-rpc nginx l2-rpc.scrollsdk 192.168.49.2 80 5h3m
+l2-rpc-websocket nginx l2-rpc-ws.scrollsdk 192.168.49.2 80 5h3m
+rollup-explorer-backend nginx rollup-explorer-backend.scrollsdk 192.168.49.2 80 5h3m
+```
+
+
+macOS
+Ubuntu
+
+
+
+Now, we'll follow the instructions from [the minikube docs](https://minikube.sigs.k8s.io/docs/handbook/addons/ingress-dns/#Mac) for setting up our local machine to use our cluster to resolve all `.scrollsdk` domain requests.
+
+Take note of the `ADDRESS` in your output (it should match the result of running `minikube ip`).
+
+
+1. Create the following file at `/etc/resolver/minikube-scrollsdk` (will require `sudo` access).
+
+ ```
+ domain scrollsdk
+ nameserver
+ search_order 1
+ timeout 5
+ ```
+
+ Alternatively, this can be done in one command that creates the directory and file and then outputs the required info:
+
+ ```bash
+ sudo mkdir -p /etc/resolver && sudo touch /etc/resolver/minikube-scrollsdk && sudo sudo sh -c "cat >>/etc/resolver/minikube-scrollsdk" <<-EOF
+
+ domain scrollsdk
+ nameserver $(minikube ip)
+ search_order 1
+ timeout 5
+ EOF
+ ```
+
+2. Flush your DNS:
+
+ ```bash
+ sudo dscacheutil -flushcache; sudo killall -HUP mDNSResponder
+ ```
+
+
+
+
+
+
+To resolve the ingress domains, we need to add the minikube IP and our service hostnames to the hosts file.
+
+
+1. Get your minikube IP address:
+
+ ```bash
+ minikube ip
+ ```
+
+ This should return something like `192.168.49.2`.
+
+2. Add this IP and our service hostnames to the hosts file:
+
+ ```bash
+ # Open the hosts file in nano
+ sudo nano /etc/hosts
+ ```
+
+3. Add the following entries to the file, replacing `192.168.49.2` with your minikube IP if different:
+
+ ```
+ 192.168.49.2 admin-system-dashboard.scrollsdk
+ 192.168.49.2 blockscout-backend.scrollsdk
+ 192.168.49.2 blockscout.scrollsdk
+ 192.168.49.2 bridge-history-api.scrollsdk
+ 192.168.49.2 frontends.scrollsdk
+ 192.168.49.2 grafana.scrollsdk
+ 192.168.49.2 l1-devnet.scrollsdk
+ 192.168.49.2 l1-explorer-backend.scrollsdk
+ 192.168.49.2 l1-explorer.scrollsdk
+ 192.168.49.2 l2-rpc.scrollsdk
+ 192.168.49.2 l2-rpc-ws.scrollsdk
+ 192.168.49.2 rollup-explorer-backend.scrollsdk
+ ```
+
+4. Save the file by pressing `Ctrl+X`, then `Y`, then `Enter`.
+
+
+
+
+
+#### Testing Ingress DNS
+
+Now, test ingress and your access to the services by running `scrollsdk test ingress`.
+
+If you aren't using the CLI tool, you can manually test the ingress DNS by running:
+
+1. `nslookup frontends.scrollsdk $(minikube ip)`
+2. `curl frontends.scrollsdk`
+3. If those work, visit [`http://frontends.scrollsdk`](http://frontends.scrollsdk) in your browser.
+
+4. If those do not work:
+ - **For MacOS:** Try resetting your machine and running through the DNS setup instructions again. It may help to run `minikube stop` and `minikube start` again.
+ - **For Ubuntu:** Double check that your minikube IP matches what's in your `/etc/hosts` file by running `minikube ip`.
+
+
+### Funding SDK Addresses
+
+You will need to provide funds to the following accounts:
+
+ - `DEPLOYER_ADDR` *(only needs funded on L1)*
+ - In the default configuration, this account is pre-funded by Anvil. If changed, you'll need to relaunch the contracts pod after funding to deploy the contracts.
+ - `L1_COMMIT_SENDER_ADDR`
+ - `L1_FINALIZE_SENDER_ADDR`
+ - `L1_GAS_ORACLE_SENDER_ADDR`
+ - `L2_GAS_ORACLE_SENDER_ADDR` *(funded after L2 chain deployment)*
+
+If you installed the necessary prerequisites, you can run the CLI helper command to fund the accounts.
+
+```bash
+cd scroll-sdk
+scrollsdk helper fund-accounts --dev
+```
+
+
+
+### Other Useful Commands
+
+`kubectl get pods` will show all the active pods and their status.
+
+`kubectl get ingress` will show all the exposed services and URIs.
+
+`make delete` will stop all services. You wil also need to run `kubectl delete pvc data-l2-rpc-0` to delete the remaining pvc.
+
+`minikube dashboard` launches a WebUI for exploring the kubernetes cluster and the various pods, volumes and ingresses without learning all the CLI commands. `k9s` is also a CLI great tool for exploring running pods and quickly looking at their logs.
+
+If you need to update a specific service's config file (not the original `config.toml`):
+ 1. Make any necessary changes to the config files or helm charts
+ 2. Run `make install`
+ 3. Delete the running pod by running `kubectl delete pod [pod-name]`. Kubernetes will restart the pod with the updated config.
+
+
+## Testing & Interacting with the Chain
+
+### scroll-sdk-cli
+
+The scroll-sdk-cli tool has many helper commands. Run it in the same directory as your `config.toml` file in the `./devnet/scroll-sdk` folder.
+
+- `scrollsdk test ingress` will test if your ingress is configured correctly and host urls are accessible from your machine.
+- `scrollsdk test contracts` will check that essential contracts are deployed, initialized and have the correct owner.
+- `scrollsdk helper fund-accounts --dev -a [account-address]` will fund any account.
+- `scrollsdk helper activity -o -t` will generate activity on both layer one and layer two to help produce blocks and batches.
+
+#### End-To-End Test
+
+The `scrollsdk test e2e` command will walk you through generating the following tests on your chain:
+
+- Generate a new wallet
+- Fund it from the Deployer address on L1
+- Deposit funds to the L2
+- Deploying an ERC20 and depositing them to the L2
+- Deploying an ERC20 on the L2
+- Bridging ETH and ERC20 from L2 back to L1
+- Executing the withdrawal claim on L1
+
+If any step fails, you can restart where you left off by adding the `--resume` flag.
+
+
+
+
+### Web UIs
+
+You should now be able to explore the stack on your local machine and using your web browser. All links below assume default configuration and working Ingress DNS.
+
+- Block Explorers (Blockscout)
+ - [L2 Explorer](http://blockscout.scrollsdk/)
+ - [L1 Explorer](http://l1-devnet-explorer.scrollsdk/) (this is scanning Anvil and can be a bit buggy)
+- [Bridge](http://frontends.scrollsdk/bridge)
+ - Until some activity on the network has started, gas errors may occur.
+- [Rollup Explorer](http://frontends.scrollsdk/rollupscan?page=1&per_page=10)
+ - Shows committed batches and finalized batches
+- [Granfana Dashboards](http://grafana.scrollsdk/)
+ - Login
+ - User: `admin`
+ - Pass: `scroll-sdk`
+ - See “Scroll” dashboards on [this page](http://grafana.scrollsdk/dashboards).
+
+### Connecting to the RPC using a Wallet
+
+To connect directly to an RPC or using MetaMask, use:
+
+| Network | Scroll SDK Chain | Scroll SDK L1 |
+|-------------|-------------|-------------|
+| RPC URL | `http://l2-rpc.scrollsdk` | `http://l1-devnet.scrollsdk` |
+| Chain ID | `221122` | `111111` |
+| Currency Symbol | `ETH` | `ETH` |
+| Block Explorer URL | [`http://blockscout.scrollsdk`](http://blockscout.scrollsdk/) | [`http://l1-devnet-explorer.scrollsdk`](http://l1-devnet-explorer.scrollsdk/) |
+
+
+
+## What's Next?
+
+If you're a developer, you might want to try [customizing specific sdk components](/sdk/customization).
+
+If you're looking to learn more about running a public testnet, try out a production deployment with our [Digital Ocean guide](/sdk/guides/production-deployment-digital-ocean) or our [AWS guide](/sdk/guides/production-deployment-aws).
+
+### Helpful Commands
+
+Anvil has a lot of [useful methods](https://book.getfoundry.sh/reference/anvil/#custom-methods) that can manipulate the L1. Proper documentation for using them is available in the [Hardhat docs](https://hardhat.org/hardhat-network/docs/reference#hardhat-network-methods) (replacing `hardhat_` with `anvil_`)
+
+#### Set L1 Token Balance of an Account
+
+In params, change first item to a wallet, and second to hex of wei. This config uses 1000 ETH.
+
+See [docs](https://hardhat.org/hardhat-network/docs/reference#hardhat_setbalance) for details.
+
+```bash
+curl --location 'http://l1-devnet.scrollsdk/' \
+--header 'Content-Type: application/json' \
+--data '{
+ "jsonrpc":"2.0",
+ "method":"anvil_setBalance",
+ "params":["0x98110937b5D6C5FCB0BA99480e585D2364e9809C","0x3635C9ADC5DEA00000"],
+ "id":0
+}'
+```
+
+#### Mine some L1 Block
+
+See [docs](https://hardhat.org/hardhat-network/docs/reference#hardhat_mine) for details.
+
+```bash
+curl --location 'http://l1-devnet.scrollsdk/' \
+--header 'Content-Type: application/json' \
+--data '{
+ "jsonrpc":"2.0",
+ "method":"anvil_mine",
+ "params":["0x10000000", "0xc"],
+ "id":0
+}'
+```
+
+## Troubleshooting
+
+### `ingress-dns` issues
+
+Getting `ingress-dns` can be tricky across different machines, operating systems and network configurations.
+
+**Directly editing `etc/hosts` has worked for some users, adding all ingress hosts pointing to the minikube IP address.**
+
+Follow the template below, being sure to create one value for every ingress in your configuration. Also, be sure to try the IP of your minikube deployment.
+
+For VPN users, we've seen the following work:
+
+
+1. Execute command `sudo minikube tunnel`
+
+1. Open file `/etc/hosts` and add:
+ ```
+ 127.0.0.1 l1-devnet.scrollsdk
+ 127.0.0.1 bridge-history.scrollsdk
+ 127.0.0.1 frontends.scrollsdk
+ 127.0.0.1 grafana.scrollsdk
+ 127.0.0.1 l1-devnet-explorer.scrollsdk
+ 127.0.0.1 l2-rpc.scrollsdk
+ 127.0.0.1 blockscout.scrollsdk
+ 127.0.0.1 bridge-history-api.scrollsdk
+ ```
+
+1. Turn off VPN when visiting in browser
+
\ No newline at end of file
diff --git a/src/content/docs/en/sdk/guides/digital-ocean-alt-gas-token.mdx b/src/content/docs/en/sdk/guides/digital-ocean-alt-gas-token.mdx
new file mode 100644
index 000000000..210c9802a
--- /dev/null
+++ b/src/content/docs/en/sdk/guides/digital-ocean-alt-gas-token.mdx
@@ -0,0 +1,790 @@
+---
+section: sdk
+date: Last Modified
+title: "Digital Ocean Deployment using an ERC20 Gas Token"
+lang: "en"
+permalink: "sdk/guides/digital-ocean-alt-gas-token"
+excerpt: "Get accustomed to the process of running an SDK deployment."
+---
+
+import Aside from "../../../../../components/Aside.astro"
+import ToggleElement from "../../../../../components/ToggleElement.astro"
+import Steps from '../../../../../components/Steps/Steps.astro';
+import ClickToZoom from "../../../../../components/ClickToZoom.astro"
+
+import DOKubernetesCluster from "./_images/do-kubernetes-cluster.png"
+import DOKubernetesCluster2 from "./_images/do-kubernetes-cluster-2.png"
+import DOMarketplaceAddons from "./_images/do-marketplace-addons.png"
+import DODatabaseSetup from "./_images/do-database-setup.png"
+import DODatabaseRestrictions from "./_images/do-database-restrictions.png"
+import DODatabaseRestrictionsIP from "./_images/do-database-restrictions-ip.png"
+import DOLoadBalancer from "./_images/do-load-balancer.png"
+import CloudflareDNS from "./_images/cloudflare-dns.png"
+import CloudflareDNSFrontends from "./_images/cloudflare-dns-frontends.png"
+import DOKubernetesConnect from "./_images/do-cluster-connection.png"
+import DOConnectionPools from "./_images/do-connection-pools.png"
+import DOConnectionInfo from "./_images/do-connection-info.png"
+import DOVPCNetwork from "./_images/do-vpc-network.png"
+import CreateSlackApp from "./_images/CreateSlackApp.png"
+import AddNewWebhookToWorkspace from "./_images/AddNewWebhookToWorkspace.png"
+import CopyWebhookURL from "./_images/CopyWebhookURL.png"
+import GrafanaDashboard from "./_images/grafana.png"
+
+This guide documents getting a Scroll SDK deployment working on Digital Ocean's Kubernetes and managed databases, using Cloudflare for DNS.
+
+Though this isn't the most "sophisticated" cloud setup, it is a step more complicated than a local devnet.
+
+This guide is intended for chain owners and those that aren't DevOps professionals, to see what's involved and show the additional considerations that need to be made.
+
+Because using an alternative gas token introduces another element of complexity, we will use it here as well.
+
+
+
+
+
+## Getting your machine ready
+
+### Installing Prerequisites
+
+- kubectl
+- helm
+- docker
+- node ≥ 18
+- [scroll-sdk-cli](https://www.npmjs.com/package/scroll-sdk-cli) *(see below)*
+- doctl *(optional)*
+- k9s *(optional)*
+
+{/* TODO: Update URL to use new package name and command below */}
+
+To install the scroll-sdk-cli, run `npm install -g scroll-sdk-cli`
+
+Then, run `scrollsdk test dependencies` to test that the tool works and to check the required dependencies listed above.
+
+### Setting up your Owner Wallet
+
+The chain owner has the ability to upgrade essential contracts. We recommend setting up a Safe with at least 2 independently controlled signers required for managing upgrades. You may also consider adding as signers any external party that will help you execute upgrades, as they'll be able to propose new transactions.
+
+For this demo, I'll use a temporary development account created in MetaMask.
+
+## Setting up your infrastructure
+
+### Creating a Kubernetes Cluster
+
+First, we need to setup the cluster of machines that our services will be deployed on.
+
+Here, we will opt for simplicity — you may want to have specific machines better catered to specific services. But, we've chosen 10Gbps to handle the heavy testing load and 32GB RAM machines to accommodate the Coordinator, which requires > 20GB.
+
+
+
+
+### Installing Marketplace Add-Ons
+
+We know we'll want a Ingress Controller for handling in-bound requests, a monitoring stack and a certificate manager to support HTTPS connections.
+
+All of these can be installed and configured manually, but that falls outside the scope of this guide. Be sure to wait for the new cluster to be fully deployed before trying to install add-ons.
+
+We'll choose:
+
+- NGINX Ingress Controller
+- Kubernetes Monitoring Stack
+- Cert-Manager
+
+
+
+### Creating a Managed Database
+
+`scroll-sdk` uses 3 distinct databases, with 2 additional databases used for L1 and L2 Blockscout instances if needed. We'll setup a single database cluster that we'll host each database and user inside (more on the setup later).
+
+Although Digital Ocean supports a Database Service as part of its Kubernetes clusters, we'll instead just deploy a managed database.
+
+We'll select the same datacenter as our cluster, PostgresSQL latest version (v16), and start with a Basic - Shared CPU plan. We can always upgrade this configuration easily later on. The default 10GB of storage is okay for now as well.
+
+
+
+To better secure your database, restrict inbound connections. First, select your new Kubernetes cluster:
+
+
+
+Next, select your current IP so you can run the setup scripts locally later in this guide.
+
+
+
+
+### Creating a Load Balancer
+
+DigitalOcean's NGINX Ingress Controller automatically deploys and configures a load balancer and give it an IP address, viewable in the "Networking" area of your dashboard. We'll use this IP for setting up DNS.
+
+
+
+### Configuring your DNS
+
+We've already [added a domain](https://developers.cloudflare.com/learning-paths/get-started-free/onboarding/add-and-activate/#_top) to be managed by Cloudflare.
+
+Here, we use a subdomain and wildcard to point to our load balancer. Because we don't have SSL certificates setup and Cloudflare requires additional configuration for wildcards and nested subdomains, we'll bypass the Proxy. The proxy also has limited support for traffic outside of HTTP & HTTPS traffic.
+
+
+
+Here, we'll also make one without the wildcard so that our frontends URL can live at that domain:
+
+
+
+### Connecting to your Cluster
+
+Next, we need to connect our local machine to the cluster in order to manage it.
+
+DigitalOcean provides two ways of doing this — we'll use the `doctl` based version.
+
+Run `doctl kubernetes cluster kubeconfig save [credential]`
+
+```
+Notice: Adding cluster credentials to kubeconfig file found in "/home/user/.kube/config"
+Notice: Setting current-context to do-nyc1-scroll-sdk-alt-gas-demo
+```
+
+
+
+### Adding External Secrets Operator
+
+Scroll SDK uses [External Secrets](https://external-secrets.io/latest/introduction/getting-started/) to manage sensitive information. Once you have `kubctl` working with your cluster, please run the following:
+
+```bash
+helm repo add external-secrets https://charts.external-secrets.io
+helm repo update
+helm install external-secrets external-secrets/external-secrets -n external-secrets --create-namespace
+```
+
+## Setup your local repo
+
+Let's now create a directory to host our local files and setup our git repo.
+
+```bash
+mkdir do-alt-gas-demo && cd do-alt-gas-demo && git init
+```
+
+## Configuration
+
+### Grabbing Files from `scroll-sdk`
+
+We'll want two files from the `scroll-sdk` repo. You can either copy paste the contents from GitHub, or copy the files from another location you've cloned.
+
+Here, I'll copy them from a local repo copy.
+
+```bash
+cp ../scroll-sdk/examples/config.toml.example ./config.toml
+```
+
+```bash
+cp ../scroll-sdk/examples/Makefile.example ./Makefile
+```
+
+```bash
+cp -r ../scroll-sdk/examples/values values
+```
+
+`Config.toml` will be used to setup our base configuration of our chain, from which each service's independent config files will be generated. `Makefile` will allow us to directly run `helm` commands in an automated way. The `values` directory contains the starter values for each service's `production.yaml` file, where we'll customize the behavior of each chart.
+
+### Preparing our Config.toml file
+
+Although these values can be set manually, we have a number of "setup" methods in the `scroll-sdk-cli` to help you quickly configure your stack.
+
+### Setting Domains
+
+We want to setup our ingress hosts and the URLs used by our frontend sites. These will often be the same, but are defined separately to allow flexibility in architecture and usage of HTTP while in development.
+
+If you're using a public testnet like Scroll Sepolia, please have a private L1 RPC URL ready for HTTPS and WSS. Public RPC endpoints are too unreliable for supporting the SDK backend services.
+
+First, run `scrollsdk setup domains` to begin an interactive session for setting the values.
+
+```
+Current domain configurations:
+EXTERNAL_RPC_URI_L1 = "http://l1-devnet.scrollsdk"
+EXTERNAL_RPC_URI_L2 = "http://l2-rpc.scrollsdk"
+BRIDGE_API_URI = "http://bridge-history-api.scrollsdk/api"
+ROLLUPSCAN_API_URI = "http://rollup-explorer-backend.scrollsdk/api"
+EXTERNAL_EXPLORER_URI_L1 = "http://l1-explorer.scrollsdk"
+EXTERNAL_EXPLORER_URI_L2 = "http://blockscout.scrollsdk"
+? Are you using a public L1 network? yes
+? Select the L1 network: Ethereum Sepolia Testnet
+Using sepolia network:
+L1 Explorer URL: https://sepolia.etherscan.io
+L1 RPC URL: https://rpc.ankr.com/eth_sepolia
+? Do you want to set custom L1 RPC endpoints for the SDK backend? yes
+? Enter the L1 RPC HTTP endpoint for SDK backend:
+https://xxxx.quiknode.pro/xxxxx/
+? Enter the L1 RPC WebSocket endpoint for SDK backend:
+wss://xxxx.quiknode.pro/xxxxx/
+? Do you want all L2 external URLs to share a URL ending? yes
+? Enter the shared URL ending: do-alt-gas-demo.scroll.xyz
+? Choose the protocol for the shared URLs: HTTPS
+? Do you want the frontends to be hosted at the root domain? (No will use a "frontends" subdomain) yes
+
+New domain configurations:
+EXTERNAL_EXPLORER_URI_L1 = "https://sepolia.etherscan.io"
+EXTERNAL_RPC_URI_L1 = "https://rpc.ankr.com/eth_sepolia"
+EXTERNAL_RPC_URI_L2 = "https://l2-rpc.do-alt-gas-demo.scroll.xyz"
+BRIDGE_API_URI = "https://bridge-history-api.do-alt-gas-demo.scroll.xyz/api"
+ROLLUPSCAN_API_URI = "https://rollup-explorer-backend.do-alt-gas-demo.scroll.xyz/api"
+EXTERNAL_EXPLORER_URI_L2 = "https://l2-explorer.do-alt-gas-demo.scroll.xyz"
+
+New ingress configurations:
+FRONTEND_HOST = "do-alt-gas-demo.scroll.xyz"
+BRIDGE_HISTORY_API_HOST = "bridge-history-api.do-alt-gas-demo.scroll.xyz"
+ROLLUP_EXPLORER_API_HOST = "rollup-explorer-backend.do-alt-gas-demo.scroll.xyz"
+COORDINATOR_API_HOST = "coordinator-api.do-alt-gas-demo.scroll.xyz"
+RPC_GATEWAY_HOST = "l2-rpc.do-alt-gas-demo.scroll.xyz"
+BLOCKSCOUT_HOST = "blockscout.do-alt-gas-demo.scroll.xyz"
+? Do you want to update the config.toml file with these new configurations? yes
+config.toml has been updated with the new domain configurations.
+```
+
+
+
+### Initializing our Databases and Database Users
+
+Return to DigitalOcean and get your admin connection information:
+
+
+
+Run `scrollsdk setup db-init`, select "yes" to Blockexplorer, "no" to L1 Explorer, then enter the public network host value from DigitalOcean, followed by the port, admin username and password, and default database.
+
+Now, input the information from the VPC Network panel, which is how pods will connect to the database from inside the virtual private network.
+
+
+
+Next, follow the prompts to create the new database users and passwords.
+
+When prompted "Do you want to connect to a different database cluster for this database?", type "no".
+
+Lastly, when asked "Do you want to update the config.toml file with the new DSNs?" select "yes" to update your config.
+
+### Dealing with Connections & Connection Pools
+
+DigitalOcean limits the number of incoming connections and we'll quickly reach this limitation for the basic cluster we've deployed. We'll setup "Connection Pools" so that services can leave their connections open, but share the "bandwidth" of number of active db transactions.
+
+In the DigitalOcean "Connection Pool" tab, create the following pools and choose their corresponding database, using "Transaction" Pool Mode. Pool size can be 4 for each.
+
+In the end, your Connection Pools page should look like this:
+
+
+
+Notice, these pool names use a different port to connect, so we need to modify `config.toml` to use this port number (for me, `25061`), or, if you altered the pool name, use it instead of the database name in the DSN string.
+
+You can do this manually or run `scrollsdk setup db-init --update-port 25061`
+
+### Generate Keystore Files
+
+Next, we need to generate new private keys for the sequencer signer and the SDK accounts used for on-chain activity. The prompt will also ask if you want to setup backup sequencers. These will be standby fullnodes ready to take over the sequencer role if needed for recovery or key rotation. This step will also allow you to setup pre-defined bootnodes.
+
+This step will also update `L2_GETH_STATIC_PEERS` to point to all sequencer as well.
+
+Follow the prompt by running `scrollsdk setup gen-keystore`. Use the Owner wallet address of the multi-sig generated in the first step, or for testing, allow the script to generate a wallet for you. But, be sure to save the private key, as it should not be stored in config.toml!
+
+### Define Gas Token Details
+
+If no token address specified, a new ERC20 will be deployed on L1. The Example decimal is only used for this deployed contract.
+
+Below configuration uses Aave DAI, see their faucet [here](https://staging.aave.com/faucet/).
+
+```toml
+[gas-token]
+ALTERNATIVE_GAS_TOKEN_ENABLED = true
+L1_GAS_TOKEN = "0xFF34B3d4Aee8ddCd6F9AFFFB6Fe49bD371b8a357"
+```
+
+You can follow the prompt by running `scrollsdk setup gas-token`
+
+
+
+{/* TODO: Document using CLI for advanced configuration here. */}
+
+### Generate Configuration Files
+
+Now, we'll do the last steps for generating each service's configuration files based on our values in `config.toml`.
+
+Run `scrollsdk setup configs`.
+
+You'll see a few prompts to update a few remaining values, like the L1 height at contract deployment and the "deployment salt" which should be unique per new deployment with a deployer address.
+
+Now, we'll simulate contract deployment to get contract addresses and build the config files and secrets files for all SDK services. Secrets will be written to `./secrets` and config files to `./values`. If you want the config files written to a different directory, pass the `--configs-dir` flag.
+
+### Prep Charts Values
+
+Now, we need to prepare the Helm charts. We will check access to charts, review the Makefile and check the values files for any missing values.
+
+To do this, run `scrollsdk setup prep-charts` and the CLI tool will try to auto-fill each chart's `production.yaml` file.
+
+You will be prompted with each update, and even flagged for empty values. Be sure to sanity check these values to make sure you didn't setup something incorrectly earlier. If you did, re-run any earlier steps, being sure to rerun the `setup configs` command before running `setup prep-charts` .
+
+### Push Secrets
+
+Lastly, we need to take the configuration values that are sensitive and publish them to wherever we're deploying "secrets."
+
+Since DigitalOcean doesn't natively host a Secret Management tool like AWS or Azure, we'll use Hashicorp Vault to store our sensitive data like database connection strings and private keys.
+
+For the purposes of this demo, we will not host an independent version of Hashicorp vault, but instead launch a service in our cluster, and push values to this vault. Then, items from the store are made available to pods using [External Secrets](https://external-secrets.io/latest/).
+
+First, let's setup a local development setup of Hashicorp Vault.
+
+```bash
+helm repo add hashicorp https://helm.releases.hashicorp.com
+helm repo update
+helm install vault hashicorp/vault --set "server.dev.enabled=true"
+```
+
+Next, we'll create a secret to store our access token and a SecretStore — copy paste the following text to `vault-secret-store.yaml`
+
+```bash
+apiVersion: external-secrets.io/v1beta1
+kind: SecretStore
+metadata:
+ name: vault-backend
+spec:
+ provider:
+ vault:
+ server: "http://vault.default.svc.cluster.local:8200"
+ path: "scroll"
+ version: "v2"
+ auth:
+ tokenSecretRef:
+ name: vault-token
+ key: token
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: vault-token
+type: Opaque
+stringData:
+ token: "root" # This is the default token in dev mode. Don't use in production!
+```
+
+Now, run `kubectl apply -f ./vault-secret-store.yaml`
+
+Next, run `scrollsdk setup push-secrets` and select "Hashicorp Vault - Dev". If you need guidance with another Secret Provider, please reach out.
+
+You'll be asked to update your `production.yaml` files, and we'll be ready to deploy!
+
+### Enable TLS & HTTPS
+
+Next, we'll want to enable HTTPS access. You should have already enabled Cert Manager through the DigitalOcean backend.
+
+Now, save the following file as `cluster-issuer.yaml` (being sure to use your own email address) then run `kubectl apply -f cluster-issuer.yaml`
+
+```bash
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: letsencrypt-prod
+spec:
+ acme:
+ server: https://acme-v02.api.letsencrypt.org/directory
+ email: replace.this@email.com
+ privateKeySecretRef:
+ name: letsencrypt-prod
+ solvers:
+ - http01:
+ ingress:
+ class: nginx
+```
+
+Next, run `scrollsdk setup tls` and walk through the instructions to update each of your `production.yaml` files that define an external hostname.
+
+## Deploying
+
+### Fund the Deployer
+
+We need to send some Sepolia ETH to the Deployer — 2 ETH should do it. Run the following if you want to use your mobile wallet or have forgotten the account.
+
+`scrollsdk helper fund-accounts -i -f 2`
+
+`-i` is used for funding the deployer. Later we'll run this with different params to fund other SDK accounts.
+
+
+
+Now, let's fund the other L1 accounts.
+
+`scrollsdk helper fund-accounts -f 0.2 -l 1`
+
+Here, we pass `-l 1` to only fund the L1 accounts. Any L2 funding will fail at this point since we haven't launched the chain yet!
+
+### Installing the Helm Charts
+
+Run `make install` to install (or later, to upgrade) all the SDK charts needed. It may help to run the commands one-by-one the first time, and check the deployment status. `k9s` is a useful tool for this. The sample Make file also doesn't include Blockscout, but feel free to add it as well.
+
+### Fund L2 accounts
+
+Let's fund our L1 Gas Oracle Sender (an account on L2 😅) with some funds.
+
+`scrollsdk helper fund-accounts -f 0.2 -l 2` will fund it with 0.2 of our gas token. Select "Directly fund L2 Wallet" for now, since our Deployer starts with 1 token starts with 1 token on L2. But now we have a working chain, so we can start bridging funds!
+
+## Testing
+
+`scroll-sdk-cli` has a number of tools built in for testing your new network. These commands should be run from the same directory as your `config.toml` and `config-contracts.toml` files.
+
+### Ingress
+
+Run `scrollsdk test ingress` to check all ingresses and that they match the expected value. If you're not using the default namespace, add `-n [namespace]`.
+
+### Contracts
+
+Run `scrollsdk test contracts` to check all contract deployments, initialization and owner updates.
+
+### e2e Test
+
+Run `scrollsdk test e2e` to try end-to-end testing. Without any flags, the test with create and fund a new wallet, but depending on Sepolia gas costs, you may need to manually fund the generated account with additional ETH. If the tests stop at any time, just run `scrollsdk test e2e -r` to resume from the saved file.
+
+We recommend opening up another terminal and running `scrollsdk helper activity -i 1` to generate traffic and produce more blocks — otherwise, finalization will be stopped.
+
+### Frontends
+
+Go visit the frontends, connect your wallet and try to bridge some funds!
+
+## Next Steps
+
+### Disable L1 Data Fee
+On Scroll, transactions on L2 have two components -- the gas costs for execution and an L1 data fee. When gas on your network is paid in a token that has no standard relationship to the currency being used to pay for data fees on the L1, you will need to introduce tooling that can set the gas caluclation "scalar" values.
+
+At the moment, we have not built any automated tooling for this, and instead of viewing the ERC20 value as 1:1 with Sepolia Ether, we suggest setting the scalars to 0 to eliminate these overheads.
+
+To do so, you can run the following commands using your L2 RPC URL and Owner account private key:
+
+```bash
+cast send --rpc-url http://l2-rpc.scrollsdk 0x5300000000000000000000000000000000000002 "setCommitScalar(uint256)" 0 --private-key [private-key]
+cast send --rpc-url http://l2-rpc.scrollsdk 0x5300000000000000000000000000000000000002 "setBlobScalar(uint256)" 0 --private-key [private-key]
+```
+
+Or, if your Owner is just a test account, you can use it's private key to call this method:
+
+```bash
+scrollsdk helper set-scalars -k [private-key]
+```
+
+### Deploy Blockscout
+
+
+
+As long as you setup the databases in the `scrollsdk setup db-init` step, you can download the chart, extract it, and run `helm upgrade -i blockscout blockscout --values blockscout/values/production.yaml`
+
+{/* Todo: return with instructions that add this directly as a Makefile command. */}
+
+### Optimize Machine Configuration with Node Affinity
+
+As you work with your network, you might want to be more selective about the pools you provide to services.
+
+One example is that the `l2-sequencer` may want additional CPU-resources and the `coordinator-api` has RAM requirements far greater than other services.
+
+If you'd like to give this a try, create a new "Node Pool" in DigitalOcean -- perhaps selecting "CPU Intensive - 8vCPU 16 GB RAM" and naming it "pool-sequencer".
+
+Now, in your `values/l2-sequencer-production-0.yaml` file, add the following section:
+
+```yaml
+affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: doks.digitalocean.com/node-pool
+ operator: In
+ values:
+ - pool-sequencer
+resources:
+ requests:
+ memory: "450Mi"
+ cpu: "80m"
+ limits:
+ memory: "14Gi"
+ cpu: "7.5"
+```
+
+Here, we're adding asking it to only select nodes that are in the "pool-sequencer" and increasing the resources of the pod to allow up to 7.5 CPU cores.
+
+To apply this, run:
+
+```bash
+helm upgrade -i l2-sequencer-0 oci://ghcr.io/scroll-tech/scroll-sdk/helm/l2-sequencer \
+ --version=0.0.11 \
+ --values values/l2-sequencer-production-0.yaml
+```
+
+Replace with the version value from your Makefile. Add `-n [namespace]` if you're not using the default namespace.
+
+For more info, see the Kubernetes page on [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/).
+
+### Add Redundancy with Replicas
+
+Soon, we'll add more information about quickly adding Replicas.
+
+For some components (like `l2-rpc` and all `-api` services), this is as easy as adding or modifying the following value in your production.yaml file:
+
+```yaml
+controller:
+ replicas: 2
+```
+
+Some services do not support this without additional configurations (for example, `l2-sequencer` and `l2-bootnode`). We are working on additional info on how to properly run multiple services for loadbalancing between or for having redunant backups available.
+
+### Enable Proof Generation using External Providers
+
+The Scroll team has been collaborating closely with teams specializing in proof generation to enable plug-and-play proof generation for SDK networks.
+
+In this example, we'll use a sample chart from the `scroll-proving-sdk` repo to generate proofs with [Sindri](https://sindri.app/docs/introduction/). In the future, teams will publish their own charts for chains to easily enable one or external providers.
+
+Because this feature is not directly built into the Scroll SDK, there will be quite a bit of copy-pasting.
+
+#### Creating Values Files for each type of Provider
+
+In Scroll, we have 3 types of provers: chunk (type-1), batch (type-2), and bundle (type-3). We'll deploy 3 sets of the chart, each with a different type of prover.
+
+Create the following files in your `values` directory:
+
+`prover-chunk-production.yaml`:
+```yaml
+global:
+ nameOverride: &app_name prover-chunk
+ fullnameOverride: *app_name
+
+persistence:
+ config:
+ enabled: true
+ type: configMap
+ mountPath: /sdk_prover/
+ name: prover-chunk-config
+
+scrollConfig: |
+ {
+ "prover_name_prefix": "sindri_chunk_",
+ "keys_dir": "keys",
+ "coordinator": {
+ "base_url": "http://coordinator-api:80",
+ "retry_count": 3,
+ "retry_wait_time_sec": 5,
+ "connection_timeout_sec": 60
+ },
+ "l2geth": {
+ "endpoint": "http://l2-rpc:8545"
+ },
+ "prover": {
+ "circuit_type": 1,
+ "circuit_version": "v0.13.1",
+ "n_workers": 1,
+ "cloud": {
+ "base_url": "https://sindri.app/api/v1/",
+ "api_key": "",
+ "retry_count": 3,
+ "retry_wait_time_sec": 5,
+ "connection_timeout_sec": 60
+ }
+ }
+ }
+```
+
+`prover-batch-production.yaml`:
+```yaml
+global:
+ nameOverride: &app_name prover-batch
+ fullnameOverride: *app_name
+
+persistence:
+ config:
+ enabled: true
+ type: configMap
+ mountPath: /sdk_prover/
+ name: prover-batch-config
+
+scrollConfig: |
+ {
+ "prover_name_prefix": "sindri_batch_",
+ "keys_dir": "keys",
+ "coordinator": {
+ "base_url": "http://coordinator-api:80",
+ "retry_count": 3,
+ "retry_wait_time_sec": 5,
+ "connection_timeout_sec": 60
+ },
+ "l2geth": {
+ "endpoint": "http://l2-rpc:8545"
+ },
+ "prover": {
+ "circuit_type": 2,
+ "circuit_version": "v0.13.1",
+ "n_workers": 1,
+ "cloud": {
+ "base_url": "https://sindri.app/api/v1/",
+ "api_key": "",
+ "retry_count": 3,
+ "retry_wait_time_sec": 5,
+ "connection_timeout_sec": 60
+ }
+ }
+ }
+
+```
+
+`prover-bundle-production.yaml`:
+```yaml
+global:
+ nameOverride: &app_name prover-bundle
+ fullnameOverride: *app_name
+
+persistence:
+ config:
+ enabled: true
+ type: configMap
+ mountPath: /sdk_prover/
+ name: prover-bundle-config
+
+scrollConfig: |
+ {
+ "prover_name_prefix": "sindri_bundle_",
+ "keys_dir": "keys",
+ "coordinator": {
+ "base_url": "http://coordinator-api:80",
+ "retry_count": 3,
+ "retry_wait_time_sec": 5,
+ "connection_timeout_sec": 60
+ },
+ "l2geth": {
+ "endpoint": "http://l2-rpc:8545"
+ },
+ "prover": {
+ "circuit_type": 3,
+ "circuit_version": "v0.13.1",
+ "n_workers": 1,
+ "cloud": {
+ "base_url": "https://sindri.app/api/v1/",
+ "api_key": "",
+ "retry_count": 3,
+ "retry_wait_time_sec": 5,
+ "connection_timeout_sec": 60
+ }
+ }
+ }
+
+```
+
+Be sure to set `prover.api_key` to the value created in Sindri's user dashboard.
+
+Notice that each file is similar, with only the `prover.circuit_type` and few name values changing.
+
+Lastly, set `prover.n_workers` to the number of provers you'd like to dedicate to proof generation. We recommend starting at 1 for each during testing, but scaling up as needed.
+
+#### Adding Provers to your Makefile
+
+Now, let's add the prover services to the bottom of your `Makefile`.
+
+```
+install-provers:
+ helm upgrade -i prover-chunk oci://ghcr.io/scroll-tech/scroll-sdk/helm/scroll-proving-sdk -n $(NAMESPACE) \
+ --version=0.0.5 \
+ --values values/prover-chunk-production.yaml
+
+ helm upgrade -i prover-batch oci://ghcr.io/scroll-tech/scroll-sdk/helm/scroll-proving-sdk -n $(NAMESPACE) \
+ --version=0.0.5 \
+ --values values/prover-batch-production.yaml
+
+ helm upgrade -i prover-bundle oci://ghcr.io/scroll-tech/scroll-sdk/helm/scroll-proving-sdk -n $(NAMESPACE) \
+ --version=0.0.5 \
+ --values values/prover-bundle-production.yaml
+
+delete-provers:
+ helm delete -n $(NAMESPACE) prover-chunk
+ helm delete -n $(NAMESPACE) prover-batch
+ helm delete -n $(NAMESPACE) prover-bundle
+```
+
+Now, simply run `make install-provers` to deploy the provers.
+
+
+
+{/* TODO: Update to point at actual charts once available. */}
+
+
+{/* ### TODO: Add Graphana charts for Monitoring
+
+To quickly get started with Grafana, run the following command:
+
+
+Now, visit the localhost URL in [your browser](http://localhost:3000/). The default password for the `admin` user is `prom-operator`.
+
+Adding an ingress URL, changing the default password or adding LDAP login are all suggested if you use this stack in production. */}
+
+
+### Monitoring
+You can monitor the cluster's running status through Grafana.
+Additionally, you can send alerts via email and Slack using Alertmanager.
+If you have configured a domain for Grafana in the previous steps, you can access it by opening http://grafana.yourdomain, where you will see two sets of dashboards. The defalt password of user `admin` is `scroll-sdk`.
+
+
+#### send alert to slack
+1. **Create a Slack App**
+
+ Open [https://api.slack.com/apps](https://api.slack.com/apps) and click **`Create New App`** if you don't have one already. Select **`From scratch`**, enter a name, and select the workspace.
+
+2. **Activate Incoming Webhooks**
+
+ Click the **`Incoming Webhooks`** label on the right side of the page, then turn on **`Activate Incoming Webhooks`**.
+
+
+ Click the **`Add New Webhook to Workspace`** button.
+
+
+ Select the channel you want to send alerts to, then copy the Webhook URL.
+
+
+
+3. **Edit the Config File**
+
+ Edit `./values/alert-manager.yaml` to replace it with your webhook URL and your Slack channel name.
+ ```
+ kube-prometheus-stack:
+ alertmanager:
+ config:
+ global:
+ resolve_timeout: 5m
+ slack_api_url: 'https://hooks.slack.com/services/xxxxxxxxxxx/xxxxxxxxxxx/xxxxxxxxxxxxxxxxxxxxxxxx' # your webhook url
+ receivers:
+ - name: 'slack-alerts'
+ slack_configs:
+ - channel: '#scroll-webhook' #your channel name
+ send_resolved: true
+ text: '{{ .CommonAnnotations.description }}'
+ title: '{{ .CommonAnnotations.summary }}'
+ route:
+ group_by: ['alertname']
+ receiver: 'slack-alerts'
+ routes:
+ - matchers: []
+ receiver: 'slack-alerts'
+ ```
+ This configuration file will send all alerts to your Slack channel. If you need more complex rules, refer to the [Prometheus Alerting Configuration Documentation](https://prometheus.io/docs/alerting/latest/configuration/).
+
+4. **Update to alertmanager**
+
+ Use the following command to update Alertmanager:
+ ```
+ helm upgrade --reuse-values -i scroll-monitor oci://ghcr.io/scroll-tech/scroll-sdk/helm/scroll-monitor -n $(NAMESPACE) \
+ --values ./values/alert-manager.yaml
+ ```
+
+
+{/* TODO: Add guide for disabling testnet finalization without proofs. */}
\ No newline at end of file
diff --git a/src/content/docs/en/sdk/guides/production-deployment.mdx b/src/content/docs/en/sdk/guides/production-deployment.mdx
new file mode 100644
index 000000000..ac74562e5
--- /dev/null
+++ b/src/content/docs/en/sdk/guides/production-deployment.mdx
@@ -0,0 +1,62 @@
+---
+section: sdk
+date: Last Modified
+title: "Running Scroll SDK in Production"
+lang: "en"
+permalink: "sdk/guides/production-deploymnet"
+excerpt: "Run the Scroll SDK in production."
+---
+
+import Aside from "../../../../../components/Aside.astro"
+import ToggleElement from "../../../../../components/ToggleElement.astro"
+import Steps from '../../../../../components/Steps/Steps.astro';
+
+
+
+## Overview
+
+This guide will support DevOps teams in running Scroll SDK in a production environment. This requires many considerations beyond a local [Devnet deployment](/en/sdk/guides/devnet-deployment).
+
+For a more hands-on guide looking at specifics, see our [Digital Ocean guide](/en/sdk/guides/digital-ocean-alt-gas-token), which walks through a full Kubernetes deployment using an easy-to-understand interface. We also have an [AWS guide](/en/sdk/guides/aws-deployment) available.
+
+### Monitoring
+
+See the [Monitoring](/en/sdk/operation/monitoring) section for more information.
+
+### Ingress
+
+We're using Nginx and Cert Manager. More info later.
+
+### Secrets
+
+We use External Secret Manager to store secrets. This is a Kubernetes-native solution that allows you to store secrets in a separate repository. This is a more secure way to store secrets than in the Scroll SDK repository, but you will still need to bring your own secret management tool. This could be Hashicorp Vault, AWS Secret Manager, or similar.
+
+Our CLI tool currently supports a development mode Hashicorp Vault and AWS Secret Manager. The [Digital Ocean guide](/en/sdk/guides/digital-ocean-alt-gas-token) uses Hashicorp vault, while the [AWS guide](/en/sdk/guides/aws-deployment) uses AWS Secret Manager.
+
+### Machine Resources
+
+In addition to 3 databases (and an optional database for Blockscout), we'll be providing guidance on the resources needed for each Scroll service.
+
+#### Sepolia Configuration
+
+For Scroll's Sepolia environment, we use the following resources:
+
+|Service | Quantity (sepolia) | vCPU (sepolia) | Mem in Mi (sepolia) |
+|----------|------------------|------------------|------------------|
+| balance-checker| 1 | 0.1 |500|
+| bridge-history-api | 2 | 0.2 |200|
+| bridge-history-fetcher | 1| 0.2 |200|
+| coordinator-api| 2 | 0.2 |20000|
+| coordinator-cron| 1 | 0.1 |200|
+| chain-monitor| 1 | 0.2 |200|
+| frontends 1|| 0.1 |500|
+| gas-oracle| 1| 0.1 |200|
+| l2-bootnode| 3 | 2 |16000|
+| l2-rpc| 4 | 0.5 |4000|
+| l2-sequencer| 1 | 0.1 |1500|
+| rollup-explorer-backend | 2 | 3 |6000|
+| rollup-node| 1 | 0.1 |200|
+| rpc-gateway| 1 | 0.1 |100|
+| Total | 22| 15.9 |120000|
\ No newline at end of file
diff --git a/src/content/docs/en/sdk/index.mdx b/src/content/docs/en/sdk/index.mdx
new file mode 100644
index 000000000..f6d4bbf72
--- /dev/null
+++ b/src/content/docs/en/sdk/index.mdx
@@ -0,0 +1,63 @@
+---
+section: sdk
+title: "Scroll SDK"
+lang: "en"
+permalink: "sdk/"
+excerpt: "Learn more about how to depoly your own rollup using Scroll's zkEVM"
+---
+
+import NavCard from "../../../../components/NavCard.astro"
+import StartSvg from "~/assets/svgs/home/home-start.svg?raw"
+import TechnologySvg from "../../../../assets/svgs/home/home-technology.svg?raw"
+import LearnSvg from "../../../../assets/svgs/home/home-learn.svg?raw"
+import DevelopSvg from "../../../../assets/svgs/home/home-develop.svg?raw"
+import Aside from "../../../../components/Aside.astro"
+
+## Introduction
+
+Scroll SDK allows anyone to quickly deploy an instance of the Scroll zkEVM and its rollup architecture for deploying an L2 on Ethereum.
+
+We're working with a number of clients, technology parters, and service providers to build the most robust ZK stack for Ethereum.
+
+If you want to dive deeper and try launching your own L2, keep reading and check out the additional resources.
+
+## Current Feature Set
+
+- Fully functional local devnet or Sepolia testnet deployment of Scroll's current zkEVM and protocol
+- Configurable deployment using Docker, Kubernetes, and Helm
+- Choose between Ether or any ERC20 token as the native gas token
+- Plug-and-play proof generation using various service providers, allowing for failover and elastic capacity
+- Adaptable finality time, allowing custom trade-offs between finality time and on-chain costs
+- Tools for interacting and exploring your chain, including a CLI tool, Blockscout, Grafana dashboards, our Rollup Explorer, and a demo Bridge UI
+
+## Planned Feature Set
+
+- Rollup or Validium Mode (Modular DA using 4844, callData, or Alt-DA Layers)
+- Base layer flexibility (Ethereum mainnet, Scroll, or any EVM-compatible environment)
+- Customization of the sequencer node to add additional features to the EVM
+- Out-of-the-box, contract auto-deployments for various commonly used protocols
+
+
+
+
+
+
+
+
diff --git a/src/content/docs/en/sdk/operation/_images/admin-system-dashboard-batch-view.png b/src/content/docs/en/sdk/operation/_images/admin-system-dashboard-batch-view.png
new file mode 100644
index 000000000..5341421a5
Binary files /dev/null and b/src/content/docs/en/sdk/operation/_images/admin-system-dashboard-batch-view.png differ
diff --git a/src/content/docs/en/sdk/operation/_images/alertmanager.png b/src/content/docs/en/sdk/operation/_images/alertmanager.png
new file mode 100644
index 000000000..e4d67861f
Binary files /dev/null and b/src/content/docs/en/sdk/operation/_images/alertmanager.png differ
diff --git a/src/content/docs/en/sdk/operation/_images/grafana.png b/src/content/docs/en/sdk/operation/_images/grafana.png
new file mode 100644
index 000000000..20f594734
Binary files /dev/null and b/src/content/docs/en/sdk/operation/_images/grafana.png differ
diff --git a/src/content/docs/en/sdk/operation/gas-and-fees.mdx b/src/content/docs/en/sdk/operation/gas-and-fees.mdx
new file mode 100644
index 000000000..ad8eb2eea
--- /dev/null
+++ b/src/content/docs/en/sdk/operation/gas-and-fees.mdx
@@ -0,0 +1,237 @@
+---
+section: sdk
+title: "Gas & Fee Management in Scroll SDK"
+lang: "en"
+permalink: "sdk/operation/gas-and-fees"
+excerpt: "Learn more about gas and fee management in Scroll SDK"
+---
+
+import Aside from "../../../../../components/Aside.astro"
+
+Scroll SDK provides a comprehensive gas and fee management system to ensure the efficient operation of the network. This section provides an overview of the gas and fee management tools and best practices for using them.
+
+## Transaction Fees on an SDK Chain
+
+Transaction fees for users on Scroll are split between an L2 Fee and an L1 Fee. For more information on how transaction fees work, see [Transaction Fees on Scroll](/en/developers/transaction-fees-on-scroll).
+
+Paid network fees are collected in the `L2FeeVault` contract. Any user can trigger the movement of funds to L1, where they can be claimed by the `OWNER` role.
+
+### Configuring L2 Execution Fees
+
+L2 transaction fees are set as a minimum "floor" for the execution component of the fee, beyond which normal mechanisms for EIP1559 adjustment apply.
+
+This is set with `--miner.gasprice` on the sequencer. You can modify this value and `--gpo.ignoreprice` in the chart by overriding the `L2GETH_MIN_GAS_PRICE` environment variable [here](https://github.com/scroll-tech/scroll-sdk/blob/main/charts/l2-sequencer/values.yaml#L106).
+
+Additionally, you could modify the `--gpo.percentile` and `--gpo.blocks` arguements, but will need to manually modify the `l2-sequencer` chart.
+
+Lastly, RPC nodes (or any node that accepts transactions) should set `--gpo.congestionthreshold`, which we default to 500. This configuration allows nodes to provide more accurate fee estimate. The value is the number of pending transactions to consider the network congested and suggest a minimum tip cap if there are fewer pending transactions in the txpool.[^congestion-threshold]
+
+[^congestion-threshold]: See these [`l2geth` code comments](https://github.com/scroll-tech/go-ethereum/blob/develop/eth/gasprice/gasprice.go#L197) for more info.
+
+For additional information, see the [geth documentation](https://geth.ethereum.org/docs/fundamentals/command-line-options).
+
+### Configuring L1 Fees
+
+The `L1GasOracle` pre-deployed contract holds the values used to calculate the L1 fee for transactions.
+
+The following fields are set by the Gas Oracle service, specifically by transactions submitted by the `L2GasOracleSender`:
+- `l1BaseFee`: the base fee for the L1 transaction
+- `l1BlobBaseFee`: the base fee for the L1 blob data
+
+The following fields are set by the Owner using setter functions in the `L1GasOracle` contract:
+- `commitScalar`
+- `blobScalar`
+- `overhead`
+- `scalar`
+
+{/* TODO: Just suggest sensible defaults for these values. */}
+
+To see these on Scroll's mainnet deployment, view the [L1GasOracle contract](https://scrollscan.com/address/0x5300000000000000000000000000000000000002#writeContract).
+
+For more information on how gas fees on Scroll are calculated, see the [Calculating the L1 Data Fee with Gas Oracle](/en/developers/transaction-fees-on-scroll/#calculating-the-l1-data-fee-with-gas-oracle).
+
+#### Calculating and Setting Gas Oracle Fields
+
+`L1GasPriceOracle` has two variables `commitScalar` and `blobScalar` which are used to calculate L1 fee for a L1 transaction.
+
+To calculate the scalars, you can use the following formula:
+
+```jsx
+// fixed values
+compression_ratio = 2.887
+blob_util_ratio = 87.1%
+
+// formula to calculate scalars
+// tx_per_block: average transaction number per L2 block
+// tx_per_batch: average transaction number per batch
+// fluctuation_multiplier: multiplier used to pervent loss from transaction number and L1 gas price fluctuation
+commitScalar = (1382.58 / tx_per_block + 71621.32 / tx_per_batch) * fluctuation_multiplier * 1e9
+blobScalar = fluctuation_multiplier / compression_ratio / blob_util_ratio * 1e9
+```
+
+To set the scalars on `L1GasPriceOracle`, you can run this command with `cast`:
+
+```jsx
+cast send --rpc-url 0x5300000000000000000000000000000000000002 "setCommitScalar(uint256)" --private-key
+cast send --rpc-url 0x5300000000000000000000000000000000000002 "setBlobScalar(uint256)" --private-key
+```
+
+
+
+### Claiming Fees from the Vault
+
+As L2 Fees accumulate in the L2FeeVault, any user can call the `withdrawl` method to "claim" these fees. This will bridge the funds to the `L1_FEE_VAULT_ADDR` address (configured in `config.toml`) on the L1 chain.
+
+After the funds are bridged, the bridged funds will still need to be claimed on L1 with a proof that can be easily obtained using the `bridge-history-api`. To see this process on Scroll, see [Finalizing Transactions on L1](/en/developers/l1-and-l2-bridging/the-scroll-messenger/#finalizing-transactions-on-l1)
+
+
+
+## Alternative Gas Token
+
+Beyond using Ethereum as the native gas token, Scroll SDK also supports alternative gas tokens. This customization allows users to use their preferred gas token for transactions.
+
+Because transaction fees are calculated by not just charging a gas fee, but also an L1 fee, conversion is needed between the L1's native token and the SDK's gas token, additional configuration and logic is needed in the gas oracle.
+
+#### Gas Oracle Fields for Alternative Gas Tokens
+
+{/* TODO: What's the latest here? */}
+
+Instead of introducing another variable to the `L1GasPriceOracle` contract which requires manual updates from the owner, operators should modify the Gas Oracle to include the ETH/ERC20 conversion rate.
+
+### Configuring Gas Oracle for Alternative Gas Tokens
+
+Basic configuration for the Gas Oracle can be made in the `config.toml` file before generating the service's config values.
+
+#### config.toml [gas-token] values
+
+- `GAS_ORACLE_INCORPORATE_TOKEN_EXCHANGE_RATE_ENANBLED`
+ - if `true`, includes the L2/L1 exchange rate into gas price. *Should only set to true when alternative gas token enabled.*
+ - Default: `false`
+- `EXCHANGE_RATE_UPDATE_MODE`
+ - The mode used to set L2/L1 gas token exchange rate. Options supported are `Fixed` and `BinanceApi`.
+ - Default: `Fixed`
+- `FIXED_EXCHANGE_RATE`
+ - When using "Fixed" exchange rate mode, the number of native token on L1 required to exchange for 1 native token on L2
+ - Devnet Default: `0.01`
+- `TOKEN_SYMBOL_PAIR`
+ - When using "BinanceApi" exchange rate mode, the pair should be L2 gas token symbol + L1 native token symbol. For instance, if using UNI token as the gas token on L2, the pair should be “UNIETH”. Token pair should be supported by Binance and included in their [ticker list API](https://api.binance.com/api/v3/ticker/price)
+ - **NOTE:** This API is not accessible in some regions, including the US. Confirm access eligibility before using.
+
+#### `gas-oracle` config values
+
+For more complicated configurations, you'll want to make manual adjustments to the your Gas Oracle config values, specifically the `alternative_gas_token_config` sections.
+
+```json
+// L1 gas oracle config
+"gas_oracle_config": {
+ "min_gas_price": 0,
+ "gas_price_diff": 50000,
+ "l1_base_fee_weight": 0.132,
+ "l1_blob_base_fee_weight": 0.145,
+ "check_committed_batches_window_minutes": 5,
+ "l1_base_fee_default": 15000000000,
+ "l1_blob_base_fee_default": 1,
+ "alternative_gas_token_config": {
+ "enabled": false,
+ "mode": "BinanceAp",
+ "fixed_exchange_rate": 0.001,
+ "token_symbol_pair": "UNIETH"
+ }
+}
+
+// L2 gas oracle config
+"gas_oracle_config": {
+ "min_gas_price": 0,
+ "gas_price_diff": 50000,
+ "alternative_gas_token_config": {
+ "enabled": false,
+ "mode": "BinanceAp",
+ "fixed_exchange_rate": 0.001,
+ "token_symbol_pair": "UNIETH"
+ }
+},
+```
+
+**L1 gas oracle config**
+
+- `min_gas_price`
+ - The minimal gas price set to contract `L1GasPriceOracle` *(for both baseFee and blobBaseFee)*
+- `gas_price_diff`
+ - The minimum percentage of gas price difference to update gas oracle *(for both baseFee and blobBaseFee)*
+- `l1_blob_base_fee_weight`
+ - The weight for L1 blob base fee *(deprecated after curie upgrade)*
+- `check_committed_batches_window_minutes`
+ - The time frame to check if we committed batches to decide to update gas oracle or not in minutes. If we are not committing batches due to high fees then we shouldn't update fees to prevent users from paying high l1_data_fee, so we should set fees to some default value
+ - Should be set it to the same or slightly larger value as `batch_timeout_sec`, remembering to convert second to minutes
+- `l1_base_fee_default`
+ - The default base cost value set when a batch is not committed for longer than `check_committed_batches_window_minutes`.
+- `l1_blob_base_fee_default`
+ - The default blob base cost value set when a batch is not committed for longer than `check_committed_batches_window_minutes`.
+- `alternative_gas_token_config`:
+ - `enabled`
+ - If enabled, incorporates L2/L1 gas token exchange rate into gas price. *(Should only set to true when alternative gas token enabled)*
+ - `mode`
+ - The mode to retrieve L2/L1 gas token exchange rate. (`Fixed` || `BinanceApi`)
+ - `fixed_exchange_rate`
+ - When using "Fixed" exchange rate mode, the number of native tokens on L1 required to exchange for 1 native token on L2
+ - `token_symbol_pair`
+ - When using "BinanceApi" exchange rate mode, the pair should be L2 gas token symbol + L1 native token symbol. For instance, if using UNI token as the gas token on L2, the pair should be “UNIETH”. Token pair should be supported by Binance and included in their [ticker list API](https://api.binance.com/api/v3/ticker/price)
+ - **NOTE:** This API is not accessible in some regions, including the US. Confirm access eligibility before using.
+
+**L2 gas oracle config**
+
+- `min_gas_price`
+ - The minimal gas price set to contract L1GasPriceOracle.
+- `gas_price_diff`
+ - The minimum percentage of gas price difference to update gas oracle.
+- `alternative_gas_token_config`
+ - Refer to `alternative_gas_token_config` on L1 gas oracle config above.
+
+## Security Considerations for Alternative Gas Tokens
+
+When implementing alternative gas tokens, operators should be aware of several important security considerations to prevent potential loss of funds and user errors.
+
+The contract and gas oracle changes for Scroll SDK are not used on Scroll mainnet, but have been audited by Trail of Bits.
+
+{/* TODO: Add link to audit report */}
+
+### L2 to L1 Message Queue Restrictions
+
+
+
+The L2 Message Queue system has strict requirements for handling native token transfers:
+
+- Messages with non-zero value **must** be sent to the `L1GasTokenGateway` address
+- Messages sent to any other address with non-zero value will fail to relay on L1, resulting in burned tokens
+- Ideally, custom gateway implementations should validate destination addresses on L2 before allowing transfers
+
+### Token Decimal Scaling Issues
+
+When bridging ERC-20 tokens to be used as native L2 tokens:
+
+- Tokens are automatically scaled to 18 decimals on L2
+- Example: If using USDT (6 decimals) as gas token:
+ - 1 USDT on L1 = 1012 native tokens on L2
+ - This maintains UI display values but can affect base unit calculations
+
+
+
+### Contract Interface Naming
+
+
+
+Operators should be aware of potentially confusing contract interfaces:
+
+- `L1GasTokenGateway.depositETH()` - Actually deposits ERC-20 gas tokens, not ETH
+- L2-side functions reference "ETH" even when using alternative tokens
+- `L1WrappedTokenGateway` handles wrapped ETH, not wrapped gas tokens
diff --git a/src/content/docs/en/sdk/operation/index.mdx b/src/content/docs/en/sdk/operation/index.mdx
new file mode 100644
index 000000000..93db5d1ba
--- /dev/null
+++ b/src/content/docs/en/sdk/operation/index.mdx
@@ -0,0 +1,35 @@
+---
+section: sdk
+title: "Operating Scroll SDK"
+lang: "en"
+permalink: "sdk/operation"
+excerpt: "Learn more about running and operating Scroll SDK"
+---
+
+import NavCard from "../../../../../components/NavCard.astro"
+import TechnologySvg from "../../../../../assets/svgs/home/home-technology.svg?raw"
+import LearnSvg from "../../../../../assets/svgs/home/home-learn.svg?raw"
+import DevelopSvg from "../../../../../assets/svgs/home/home-develop.svg?raw"
+
+After familiarizing yourself with the technical stack, you can proceed to the following sections to learn more about running and operating Scroll SDK, along with some best practices and reference information.
+
+{/*
+
+
+
+
*/}
diff --git a/src/content/docs/en/sdk/operation/monitoring.mdx b/src/content/docs/en/sdk/operation/monitoring.mdx
new file mode 100644
index 000000000..04de712dc
--- /dev/null
+++ b/src/content/docs/en/sdk/operation/monitoring.mdx
@@ -0,0 +1,451 @@
+---
+section: sdk
+title: "Monitoring Scroll SDK"
+lang: "en"
+permalink: "sdk/operation/monitoring"
+excerpt: "Learn more about monitoring Scroll SDK"
+---
+
+import Aside from "../../../../../components/Aside.astro"
+import ClickToZoom from "../../../../../components/ClickToZoom.astro";
+import AdminSystemDashboard from "./_images/admin-system-dashboard-batch-view.png"
+import Alertmanager from "./_images/alertmanager.png"
+import Grafana from "./_images/grafana.png"
+
+Scroll SDK provides a comprehensive monitoring system to ensure the health and performance of the network. This section provides an overview of the monitoring tools and best practices for using them.
+
+## `scroll-monitor` Service
+
+The Scroll Monitor service provides a starting point for adding observability and monitoring to your Scroll SDK deployment. It brings together Grafana, Loki, Prometheus, and AlertManager to provide a comprehensive monitoring solution, or can serve as a template for implementing Scroll SDK into your existing monitoring stack.
+
+### Service Dashboards
+
+We've build and made available a few Grafana dashboards for Scroll SDK chains. These include views for the following services:
+- `bridge-history-api`
+- `bridge-history-fetcher`
+- `chain-monitor`
+- `coordinator-api`
+- `coordinator-cron`
+- `gas-oracle`
+- `l2geth` instances (including `l2-sequencer`, `l2-bootnode` and `l2-rpc`)
+- `rollup-node`
+
+You can access these from the `scroll-sdk` repo [here](https://github.com/scroll-tech/scroll-sdk/tree/develop/charts/scroll-monitor/grafana/scroll-dashboards).
+
+
+{/* */}
+
+{/* */}
+
+{/* TODO: List important views from various dashboards. */}
+
+### Notifications with Alertmanager
+
+The `scroll-monitor` services configuration file also supports Slack webhooks for easy integration with your existing Slack workspace.
+
+
+
+
+{/* TODO: Document what we have alerts setup for. */}
+
+For a detailed guide on configuring and setting up AlertManager, you can refer to the [official documentation](https://prometheus.io/docs/alerting/latest/configuration/).
+
+## Scroll Admin System
+
+The Scroll Admin System Dashboard provides a web interface for managing and monitoring proof production in Scroll SDK deployments. It includes features for viewing the status of chunks, batches and bundles, along with all registered provers and assigned tasks.
+
+
+
+{/* TODO: Consider describing what you can do in each view */}
+
+{/* ## Balance Checker */}
+
+{/* TODO: Return with simple overview of balance-checker, how to set it up, and what it does. */}
+
+## Chain Monitor
+
+Chain Monitor can be configured to send Slack notifications if:
+- deposit and withdraw messages from L1 and L2 don't match
+- asset value on escrow contracts doesn't match the deposit or withdraw messages
+- WithdrawRoots don't match
+
+To set this up, set a Webhook URL in the `slack_webhook_config` section of the `chain-monitor` config.
+
+## Prometheus Metrics
+
+Below are tables of Prometheus metrics for each service in the Scroll SDK:
+
+### chain-monitor
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| chain_monitor_request_body_total | The server received request body size, unit byte | Counter |
+| chain_monitor_request_duration_bucket | Cumulative counters for the observation buckets (the time server took to handle the request.) | Counter |
+| chain_monitor_request_duration_count | Count of events that have been observed for the histogram metric (the time server took to handle the request.) | Counter (Histogram) |
+| chain_monitor_request_duration_sum | Total sum of all observed values for the histogram metric (the time server took to handle the request.) | Counter (Histogram) |
+| chain_monitor_request_total | All the server received request num. | Counter |
+| chain_monitor_request_uv_total | All the server received ip num. | Counter |
+| chain_monitor_response_body_total | The server send response body size, unit byte | Counter |
+| chain_monitor_uri_request_total | All the server received request num with every uri. | Counter |
+| contract_controller_block_number | The block number of controller running. | Gauge |
+| contract_controller_running_total | The total number of controllers running. | Counter |
+| cross_chain_check_controller_running_total | The total number of cross chain controllers running. | Counter |
+| gateway_batch_finalized_failed_total | The total number of gateway batch finalized failed. | Counter |
+| messenger_batch_finalized_failed_total | The total number of messenger batch finalized failed. | Counter |
+| slack_alert_cross_chain_eth_event_balance_not_match_total | The total number of alert cross chain eth event balance not match total. | Counter |
+| slack_alert_cross_chain_eth_event_not_match_total | The total number of alert cross chain eth event not match total. | Counter |
+| slack_alert_cross_chain_gateway_event_not_match_total | The total number of alert cross chain gateway event not match total. | Counter |
+| slack_alert_gateway_event_duplicated_total | The total number of alert gateway event duplicated. | Counter |
+| slack_alert_gateway_transfer_not_match_total | The total number of alert gateway transfer event not match total. | Counter |
+| slack_alert_messenger_event_duplicated_total | The total number of alert messenger event duplicated. | Counter |
+| slack_alert_withdraw_root_not_match_total | The total number of alert withdraw root not match total. | Counter |
+
+### rollup-node
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| rollup_l2_block_l1_commit_calldata_size | The l1 commitBatch calldata size of the l2 block | Gauge |
+| rollup_l2_watcher_blocks_fetched_gap | The gap of l2 fetch | Gauge |
+| rollup_l2_watcher_fetch_running_missing_blocks_height | The total number of l2 watcher fetch running missing blocks height | Gauge |
+| rollup_l2_watcher_fetch_running_missing_blocks_total | The total number of l2 watcher fetch running missing blocks | Counter |
+| rollup_layer2_bundles_finalized_confirmed_failed_total | Total number of failed confirmations for finalized bundles on layer2. | Counter |
+| rollup_layer2_bundles_finalized_confirmed_total | Total number of finalized bundles confirmed on layer2. | Counter |
+| rollup_layer2_chain_monitor_latest_failed_batch_call | The total number of failed call chain_monitor api | Counter |
+| rollup_layer2_chain_monitor_latest_failed_batch_status | The total number of failed batch status get from chain_monitor | Counter |
+| rollup_layer2_gas_price_latest_gas_price | The latest gas price of rollup relayer l2 | Gauge |
+| rollup_layer2_gas_price_oracler_total | The total number of layer2 gas price oracler run total | Counter |
+| rollup_layer2_process_committed_batches_confirmed_failed_total | The total number of layer2 process committed batches confirmed failed total | Counter |
+| rollup_layer2_process_committed_batches_confirmed_total | The total number of layer2 process committed batches confirmed total | Counter |
+| rollup_layer2_process_committed_batches_finalized_success_total | The total number of layer2 process committed batches finalized success total | Counter |
+| rollup_layer2_process_committed_batches_finalized_total | The total number of layer2 process committed batches finalized total | Counter |
+| rollup_layer2_process_committed_batches_total | The total number of layer2 process committed batches run total | Counter |
+| rollup_layer2_process_finalized_batches_confirmed_failed_total | The total number of layer2 process finalized batches confirmed failed total | Counter |
+| rollup_layer2_process_finalized_batches_confirmed_total | The total number of layer2 process finalized batches confirmed total | Counter |
+| rollup_layer2_process_pending_batch_err_too_many_pending_blob_txs_total | The total number of layer2 process pending batch failed on too many pending blob txs | Counter |
+| rollup_layer2_process_pending_batch_success_total | The total number of layer2 process pending success batch | Counter |
+| rollup_layer2_process_pending_batch_total | The total number of layer2 process pending batch | Counter |
+| rollup_layer2_relayer_process_pending_bundles_finalized_success_total | Total number of times the layer2 relayer has successful finalized proven bundle processes. | Counter |
+| rollup_layer2_relayer_process_pending_bundles_finalized_total | Total number of times the layer2 relayer has finalized proven bundle processes. | Counter |
+| rollup_layer2_relayer_process_pending_bundles_total | Total number of times the layer2 relayer has processed pending bundles. | Counter |
+| rollup_layer2_update_layer1_gas_oracle_confirmed_failed_total | The total number of updating layer2 gas oracle confirmed failed | Counter |
+| rollup_layer2_update_layer1_gas_oracle_confirmed_total | The total number of updating layer2 gas oracle confirmed | Counter |
+| rollup_propose_batch_chunks_number | The number of chunks in the batch | Gauge |
+| rollup_propose_batch_chunks_propose_not_enough_total | Total number of batch chunk propose not enough | Counter |
+| rollup_propose_batch_circle_total | Total number of propose batch total. | Counter |
+| rollup_propose_batch_due_to_compressed_data_compatibility_breach_total | Total number of propose batch due to compressed data compatibility breach. | Counter |
+| rollup_propose_batch_estimate_blob_size_time | Time taken to estimate blob size for the chunk. | Gauge |
+| rollup_propose_batch_estimate_calldata_size_time | Time taken to estimate calldata size for the chunk. | Gauge |
+| rollup_propose_batch_estimate_gas_time | Time taken to estimate gas for the chunk. | Gauge |
+| rollup_propose_batch_failure_circle_total | Total number of propose batch total. | Counter |
+| rollup_propose_batch_first_block_timeout_reached_total | Total times of batch's first block timeout reached | Counter |
+| rollup_propose_batch_total_l1_call_data_size | The total l1 call data size | Gauge |
+| rollup_propose_batch_total_l1_commit_blob_size | The total l1 commit blob size | Gauge |
+| rollup_propose_batch_total_l1_commit_gas | The total l1 commit gas | Gauge |
+| rollup_propose_batch_update_info_failure_total | Total number of propose batch update info failure total. | Counter |
+| rollup_propose_batch_update_info_total | Total number of propose batch update info total. | Counter |
+| rollup_propose_bundle_batches_number | The number of batches in the current bundle. | Gauge |
+| rollup_propose_bundle_batches_propose_not_enough_total | Total number of times there were not enough batches to propose a bundle. | Counter |
+| rollup_propose_bundle_circle_total | Total number of propose bundle attempts. | Counter |
+| rollup_propose_bundle_failure_total | Total number of propose bundle failures. | Counter |
+| rollup_propose_bundle_first_block_timeout_reached_total | Total times the first block in a bundle reached the timeout. | Counter |
+| rollup_propose_bundle_update_info_failure_total | Total number of propose bundle update info failures. | Counter |
+| rollup_propose_bundle_update_info_total | Total number of propose bundle update info attempts. | Counter |
+| rollup_propose_chunk_blocks_propose_not_enough_total | Total number of chunk block propose not enough | Counter |
+| rollup_propose_chunk_chunk_block_number | The number of blocks in the chunk | Gauge |
+| rollup_propose_chunk_circle_total | Total number of propose chunk total. | Counter |
+| rollup_propose_chunk_due_to_compressed_data_compatibility_breach_total | Total number of propose chunk due to compressed data compatibility breach. | Counter |
+| rollup_propose_chunk_estimate_blob_size_time | Time taken to estimate blob size for the chunk. | Gauge |
+| rollup_propose_chunk_estimate_calldata_size_time | Time taken to estimate calldata size for the chunk. | Gauge |
+| rollup_propose_chunk_estimate_gas_time | Time taken to estimate gas for the chunk. | Gauge |
+| rollup_propose_chunk_estimate_l1_commit_gas | The chunk estimate l1 commit gas | Gauge |
+| rollup_propose_chunk_failure_circle_total | Total number of propose chunk failure total. | Counter |
+| rollup_propose_chunk_first_block_timeout_reached_total | Total times of chunk's first block timeout reached | Counter |
+| rollup_propose_chunk_max_tx_consumption | The max tx consumption | Gauge |
+| rollup_propose_chunk_total_l1_commit_blob_size | The total l1 commit blob size | Gauge |
+| rollup_propose_chunk_total_l1_commit_call_data_size | The total l1 commit call data size | Gauge |
+| rollup_propose_chunk_tx_num | The chunk tx num | Gauge |
+| rollup_propose_chunk_update_info_failure_total | Total number of propose chunk update info failure total. | Counter |
+| rollup_propose_chunk_update_info_total | Total number of propose chunk update info total. | Counter |
+| rollup_sender_blob_gas_fee_cap | The blob gas fee cap of current transaction. | Gauge |
+| rollup_sender_check_pending_transaction_total | The total number of check pending transaction. | Counter |
+| rollup_sender_gas_fee_cap | The gas fee cap of current transaction. | Gauge |
+| rollup_sender_gas_limit | The gas limit of current transaction. | Gauge |
+| rollup_sender_gas_tip_cap | The gas tip cap of current transaction. | Gauge |
+| rollup_sender_send_transaction_send_tx_failure_total | The total number of sending transactions failure for sending tx. | Counter |
+| rollup_sender_send_transaction_total | The total number of sending transactions. | Counter |
+
+### bridge-history-api
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| bridge_history_api_cache_hits_total | The total number of cache hits | Counter |
+| bridge_history_api_cache_misses_total | The total number of cache misses | Counter |
+| bridge_history_api_request_body_total | The server received request body size, unit byte | Counter |
+| bridge_history_api_request_duration_bucket | Cumulative counters for the observation buckets (the time server took to handle the request.) | Counter |
+| bridge_history_api_request_duration_count | Count of events that have been observed for the histogram metric (the time server took to handle the request.) | Counter (Histogram) |
+| bridge_history_api_request_duration_sum | Total sum of all observed values for the histogram metric (the time server took to handle the request.) | Counter (Histogram) |
+| bridge_history_api_request_total | All the server received request num. | Counter |
+| bridge_history_api_request_uv_total | All the server received ip num. | Counter |
+| bridge_history_api_response_body_total | The server send response body size, unit byte | Counter |
+| bridge_history_api_uri_request_total | All the server received request num with every uri. | Counter |
+
+### bridge-history-fetcher
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| L1_fetcher_logic_fetched_total | The total number of events or failed txs fetched in L1 fetcher logic. | Counter |
+| L1_message_fetcher_reorg_total | Total count of blockchain reorgs encountered by the L1 message fetcher. | Counter |
+| L1_message_fetcher_running_total | Current count of running L1 message fetcher instances. | Counter |
+| L1_message_fetcher_sync_height | Latest blockchain height the L1 message fetcher has synced with. | Gauge |
+| L2_fetcher_logic_fetched_total | The total number of events or failed txs fetched in L2 fetcher logic. | Counter |
+| L2_message_fetcher_reorg_total | Total count of blockchain reorgs encountered by the L2 message fetcher. | Counter |
+| L2_message_fetcher_running_total | Current count of running L2 message fetcher instances. | Counter |
+| L2_message_fetcher_sync_height | Latest blockchain height the L2 message fetcher has synced with. | Gauge |
+| event_update_logic_L1_finalize_batch_event_L2_block_update_height | L2 block height of the latest L1 batch event that has been finalized and updated in the message_table. | Gauge |
+| event_update_logic_L2_message_nonce_update_height | L2 message nonce height in the latest L1 batch event that has been finalized and updated in the message_table. | Gauge |
+
+### coordinator-api
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| coordinator_submit_proof_failure_total | Total number of submit proof failure. | Counter |
+| coordinator_submit_proof_total | Total number of submit proof. | Counter |
+| coordinator_task_prove_duration_seconds_bucket | Cumulative counters for the observation buckets (Time spend by prover prove task.) | Counter |
+| coordinator_task_prove_duration_seconds_count | Count of events that have been observed for the histogram metric (Time spend by prover prove task.) | Counter (Histogram) |
+| coordinator_task_prove_duration_seconds_sum | Total sum of all observed values for the histogram metric (Time spend by prover prove task.) | Counter (Histogram) |
+| coordinator_validate_failure_submit_have_been_verifier | Total number of submit proof validate failure proof have been verifier. | Counter |
+| coordinator_validate_failure_submit_status_not_ok | Total number of submit proof validate failure proof status not ok. | Counter |
+| coordinator_validate_failure_submit_timeout | Total number of submit proof validate failure timeout. | Counter |
+| coordinator_validate_failure_submit_twice_total | Total number of submit proof validate failure submit twice. | Counter |
+| coordinator_validate_failure_total | Total number of submit proof validate failure. | Counter |
+
+### coordinator-cron
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| coordinator_batch_prover_task_timeout_total | Total number of batch timeout prover task. | Counter |
+| coordinator_batch_timeout_checker_run_total | Total number of batch timeout checker run. | Counter |
+| coordinator_bundle_prover_task_timeout_total | Total number of bundle timeout prover task. | Counter |
+| coordinator_bundle_timeout_checker_run_total | Total number of bundle timeout checker run. | Counter |
+| coordinator_chunk_prover_task_timeout_total | Total number of chunk timeout prover task. | Counter |
+| coordinator_chunk_timeout_checker_run_total | Total number of chunk timeout checker run. | Counter |
+
+### gas-oracle
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| rollup_l1_watcher_fetch_block_header_processed_block_height | The current processed block height of l1 watcher fetch block header | Gauge |
+| rollup_l1_watcher_fetch_block_header_total | The total number of l1 watcher fetch block header total | Counter |
+| rollup_layer1_gas_price_oracler_total | The total number of layer1 gas price oracler run total | Counter |
+| rollup_layer1_latest_base_fee | The latest base fee of l1 rollup relayer | Gauge |
+| rollup_layer1_latest_blob_base_fee | The latest blob base fee of l1 rollup relayer | Gauge |
+| rollup_layer1_update_gas_oracle_confirmed_failed_total | The total number of updating layer1 gas oracle confirmed failed | Counter |
+| rollup_layer1_update_gas_oracle_confirmed_total | The total number of updating layer1 gas oracle confirmed | Counter |
+| rollup_layer2_bundles_finalized_confirmed_failed_total | Total number of failed confirmations for finalized bundles on layer2. | Counter |
+| rollup_layer2_bundles_finalized_confirmed_total | Total number of finalized bundles confirmed on layer2. | Counter |
+| rollup_layer2_chain_monitor_latest_failed_batch_call | The total number of failed call chain_monitor api | Counter |
+| rollup_layer2_chain_monitor_latest_failed_batch_status | The total number of failed batch status get from chain_monitor | Counter |
+| rollup_layer2_gas_price_latest_gas_price | The latest gas price of rollup relayer l2 | Gauge |
+| rollup_layer2_gas_price_oracler_total | The total number of layer2 gas price oracler run total | Counter |
+| rollup_layer2_process_committed_batches_confirmed_failed_total | The total number of layer2 process committed batches confirmed failed total | Counter |
+| rollup_layer2_process_committed_batches_confirmed_total | The total number of layer2 process committed batches confirmed total | Counter |
+| rollup_layer2_process_committed_batches_finalized_success_total | The total number of layer2 process committed batches finalized success total | Counter |
+| rollup_layer2_process_committed_batches_finalized_total | The total number of layer2 process committed batches finalized total | Counter |
+| rollup_layer2_process_committed_batches_total | The total number of layer2 process committed batches run total | Counter |
+| rollup_layer2_process_finalized_batches_confirmed_failed_total | The total number of layer2 process finalized batches confirmed failed total | Counter |
+| rollup_layer2_process_finalized_batches_confirmed_total | The total number of layer2 process finalized batches confirmed total | Counter |
+| rollup_layer2_process_pending_batch_err_too_many_pending_blob_txs_total | The total number of layer2 process pending batch failed on too many pending blob txs | Counter |
+| rollup_layer2_process_pending_batch_success_total | The total number of layer2 process pending success batch | Counter |
+| rollup_layer2_process_pending_batch_total | The total number of layer2 process pending batch | Counter |
+| rollup_layer2_relayer_process_pending_bundles_finalized_success_total | Total number of times the layer2 relayer has successful finalized proven bundle processes. | Counter |
+| rollup_layer2_relayer_process_pending_bundles_finalized_total | Total number of times the layer2 relayer has finalized proven bundle processes. | Counter |
+| rollup_layer2_relayer_process_pending_bundles_total | Total number of times the layer2 relayer has processed pending bundles. | Counter |
+| rollup_layer2_update_layer1_gas_oracle_confirmed_failed_total | The total number of updating layer2 gas oracle confirmed failed | Counter |
+| rollup_layer2_update_layer1_gas_oracle_confirmed_total | The total number of updating layer2 gas oracle confirmed | Counter |
+| rollup_sender_check_pending_transaction_total | The total number of check pending transaction. | Counter |
+| rollup_sender_gas_fee_cap | The gas fee cap of current transaction. | Gauge |
+| rollup_sender_gas_limit | The gas limit of current transaction. | Gauge |
+| rollup_sender_gas_tip_cap | The gas tip cap of current transaction. | Gauge |
+| rollup_sender_send_transaction_get_fee_failure_total | The total number of sending transactions failure for getting fee. | Counter |
+| rollup_sender_send_transaction_total | The total number of sending transactions. | Counter |
+
+### l2geth
+
+L2Geth has an extensive list of metrics. Below are tables grouped by metric name prefixes, highlighting important metrics and those not inherited from geth. For a complete list, please refer to the Prometheus metrics explorer.
+
+#### eth_db_chaindata
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| eth_db_chaindata_disk_size | Size of the chaindata on disk | Gauge |
+| eth_db_chaindata_ancient_size | Size of the ancient chaindata on disk | Gauge |
+
+#### miner
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| miner_commit_gas | Gas used in the last commit | Gauge |
+| miner_ccc_stall | | Summary |
+| miner_ccc_stall_count | Count of events that have been observed for the base metric | Counter |
+| miner_ccc_stall_total_count | | Counter |
+| miner_collect_l1_msgs | | Summary |
+| miner_collect_l1_msgs_count | Count of events that have been observed for the base metric | Counter |
+| miner_collect_l1_msgs_total_count | | Counter |
+| miner_collect_l2_txns | | Summary |
+| miner_collect_l2_txns_count | Count of events that have been observed for the base metric | Counter |
+| miner_collect_l2_txns_total_count | | Counter |
+| miner_commit | | Summary |
+| miner_commit_count | Count of events that have been observed for the base metric | Counter |
+| miner_commit_reason_ccc | | Gauge |
+| miner_commit_reason_deadline | | Gauge |
+| miner_commit_total_count | | Counter |
+| miner_idle | | Summary |
+| miner_idle_count | Count of events that have been observed for the base metric | Counter |
+| miner_idle_total_count | | Counter |
+| miner_prepare | | Summary |
+| miner_prepare_count | Count of events that have been observed for the base metric | Counter |
+| miner_prepare_total_count | | Counter |
+| miner_skipped_txs_l1 | | Gauge |
+| miner_skipped_txs_l2 | | Gauge |
+
+#### p2p
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| p2p_peers | Number of connected peers | Gauge |
+
+#### txpool
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| txpool_pending | Number of pending transactions in the pool | Gauge |
+| txpool_queued | Number of queued transactions in the pool | Gauge |
+
+#### ccc
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| ccc_async_active_workers | | Gauge |
+| ccc_async_check | | Summary |
+| ccc_async_check_count | Count of events that have been observed for the base metric | Counter |
+| ccc_async_check_total_count | | Counter |
+| ccc_async_fail | | Gauge |
+| ccc_encode | | Summary |
+| ccc_encode_count | Count of events that have been observed for the base metric | Counter |
+| ccc_encode_total_count | | Counter |
+
+#### chain
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| chain_account_commits | | Summary |
+| chain_account_commits_count | Count of events that have been observed for the base metric | Counter |
+| chain_account_commits_total_count | | Counter |
+| chain_account_hashes | | Summary |
+| chain_account_hashes_count | Count of events that have been observed for the base metric | Counter |
+| chain_account_hashes_total_count | | Counter |
+| chain_account_reads | | Summary |
+| chain_account_reads_count | Count of events that have been observed for the base metric | Counter |
+| chain_account_reads_total_count | | Counter |
+| chain_account_updates | | Summary |
+| chain_account_updates_count | Count of events that have been observed for the base metric | Counter |
+| chain_account_updates_total_count | | Counter |
+| chain_execution | | Summary |
+| chain_execution_count | Count of events that have been observed for the base metric | Counter |
+| chain_execution_total_count | | Counter |
+| chain_fees_l2basefee | | Gauge |
+| chain_head_block | Current head block number | Gauge |
+| chain_head_header | Current head header number | Gauge |
+| chain_head_receipt | Current head receipt number | Gauge |
+| chain_head_timegap | Time gap between current time and the head block timestamp | Gauge |
+| chain_inserts | | Summary |
+| chain_inserts_count | Count of events that have been observed for the base metric | Counter |
+| chain_inserts_total_count | | Counter |
+| chain_prefetch_executes | | Summary |
+| chain_prefetch_executes_count | Count of events that have been observed for the base metric | Counter |
+| chain_prefetch_executes_total_count | | Counter |
+| chain_prefetch_interrupts | | Gauge |
+| chain_reorg_add | | Gauge |
+| chain_reorg_drop | | Gauge |
+| chain_reorg_executes | | Gauge |
+| chain_reorg_invalidTx | | Gauge |
+| chain_snapshot_account_reads | | Summary |
+| chain_snapshot_account_reads_count | Count of events that have been observed for the base metric | Counter |
+| chain_snapshot_account_reads_total_count | | Counter |
+| chain_snapshot_commits | | Summary |
+| chain_snapshot_commits_count | Count of events that have been observed for the base metric | Counter |
+| chain_snapshot_commits_total_count | | Counter |
+| chain_snapshot_storage_reads | | Summary |
+| chain_snapshot_storage_reads_count | Count of events that have been observed for the base metric | Counter |
+| chain_snapshot_storage_reads_total_count | | Counter |
+| chain_storage_commits | | Summary |
+| chain_storage_commits_count | Count of events that have been observed for the base metric | Counter |
+| chain_storage_commits_total_count | | Counter |
+| chain_storage_hashes | | Summary |
+| chain_storage_hashes_count | Count of events that have been observed for the base metric | Counter |
+| chain_storage_hashes_total_count | | Counter |
+| chain_storage_reads | | Summary |
+| chain_storage_reads_count | Count of events that have been observed for the base metric | Counter |
+| chain_storage_reads_total_count | | Counter |
+| chain_storage_updates | | Summary |
+| chain_storage_updates_count | Count of events that have been observed for the base metric | Counter |
+| chain_storage_updates_total_count | | Counter |
+| chain_validation | | Summary |
+| chain_validation_count | Count of events that have been observed for the base metric | Counter |
+| chain_validation_total_count | | Counter |
+| chain_write | | Summary |
+| chain_write_count | Count of events that have been observed for the base metric | Counter |
+| chain_write_total_count | | Counter |
+
+#### rawdb
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| rawdb_l1_message_iterator_inner_next_called | | Gauge |
+| rawdb_l1_message_iterator_length_mismatch | | Gauge |
+| rawdb_l1_message_iterator_next_called | | Gauge |
+| rawdb_l1_message_iterator_next_time | | Summary |
+| rawdb_l1_message_iterator_next_time_count | Count of events that have been observed for the base metric | Counter |
+| rawdb_l1_message_iterator_next_time_total_count | | Counter |
+| rawdb_l1_message_size | | Gauge |
+
+#### rollup
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| rollup_l1_message | | Gauge |
+| rollup_tracing_feed_tx_to_tracer | | Summary |
+| rollup_tracing_feed_tx_to_tracer_count | Count of events that have been observed for the base metric | Counter |
+| rollup_tracing_feed_tx_to_tracer_total_count | | Counter |
+| rollup_tracing_fill_block_trace | | Summary |
+| rollup_tracing_fill_block_trace_count | Count of events that have been observed for the base metric | Counter |
+| rollup_tracing_fill_block_trace_total_count | | Counter |
+| rollup_tracing_get_tx_result | | Summary |
+| rollup_tracing_get_tx_result_apply_message | | Summary |
+| rollup_tracing_get_tx_result_apply_message_count | Count of events that have been observed for the base metric | Counter |
+| rollup_tracing_get_tx_result_apply_message_total_count | | Counter |
+| rollup_tracing_get_tx_result_count | Count of events that have been observed for the base metric | Counter |
+| rollup_tracing_get_tx_result_total_count | | Counter |
+| rollup_tracing_get_tx_result_tracer_result | | Summary |
+| rollup_tracing_get_tx_result_tracer_result_count | Count of events that have been observed for the base metric | Counter |
+| rollup_tracing_get_tx_result_tracer_result_total_count | | Counter |
+| rollup_tracing_get_tx_result_zk_trie_build | | Summary |
+| rollup_tracing_get_tx_result_zk_trie_build_count | Count of events that have been observed for the base metric | Counter |
+| rollup_tracing_get_tx_result_zk_trie_build_total_count | | Counter |
+
+#### validator
+
+| METRIC | DESCRIPTION | TYPE |
+|--------|-------------|------|
+| validator_async | | Summary |
+| validator_async_count | Count of events that have been observed for the base metric | Counter |
+| validator_async_total_count | | Counter |
+| validator_l1msg | | Summary |
+| validator_l1msg_count | Count of events that have been observed for the base metric | Counter |
+| validator_l1msg_total_count | | Counter |
+
+Note: This is not an exhaustive list. There are many more metrics available for eth_downloader, eth_fetcher, les_client, les_server, miner, p2p, processor, rpc, state, system, trie, txpool, and other components.
\ No newline at end of file
diff --git a/src/content/docs/en/sdk/operation/security-and-recovery.mdx b/src/content/docs/en/sdk/operation/security-and-recovery.mdx
new file mode 100644
index 000000000..54ebac622
--- /dev/null
+++ b/src/content/docs/en/sdk/operation/security-and-recovery.mdx
@@ -0,0 +1,302 @@
+---
+section: sdk
+title: "Security & Recovery in Scroll SDK"
+lang: "en"
+permalink: "sdk/operation/security-and-recovery"
+excerpt: "Learn more about security and recovery in Scroll SDK"
+---
+
+import Aside from "../../../../../components/Aside.astro"
+import ToggleElement from "../../../../../components/ToggleElement.astro"
+import Steps from "../../../../../components/Steps/Steps.astro"
+
+
+
+## Protocol Security & Risks
+
+For a comprehensive overview of the security of the protocol, L2Beat's [overview of Scroll](https://l2beat.com/scaling/projects/scroll#risk-summary) is a great place to understand the risks, centalization points and permissioned operators on Scroll chain. Because Scroll is a single entity (who also built the tech), the risk factors may increase as you coordinate with external parties (ie RaaS providers).
+
+
+
+### Audits
+
+For a list of independent audits of the Scroll protocol, see [Audits & Bug Bounty](en/technology/security/audits-and-bug-bounty#independent-audits).
+
+Additionally, Scroll SDK has undergone the following audits:
+
+- Alternative Gas Token Contracts and Gas Oracle
+ - Trail of Bits *(Report to be released)*
+
+{/* TODO: Add audit report URL */}
+
+
+
+## Owner Role & Safe Management
+
+Because the Owner Role has the ability to upgrade smart contracts, it can compromise the bridge and user funds. This account should be a multi-sig wallet, and we encourage you to review the best practices for [creating a Security Council](https://medium.com/l2beat/stages-update-security-council-requirements-4c79cea8ef52).
+
+If a RaaS provider is used, create a plan for multi-sig upgrades where the provider cannot arbitrarily upgrade the contracts.
+
+## Privileged Smart Contract Roles
+
+The following accounts are given roles that have special permissions and should be managed with extra care:
+
+- `DEPLOYER`
+ - Used to deploy initial contracts and has permissions to set the initial `OWNER`
+ - Private key held in `contracts` service
+- `OWNER`
+ - Can upgrade contracts, set important parameters, whitelist accounts to grant them roles.
+ - Should be a multi-sig wallet, with the RaaS provider having no more signing authority than the other signers.
+- `L1_GAS_ORACLE_SENDER`
+ - Permissioned to report L2 gas prices to L1 `L1_SCROLL_MESSENGER` contract
+ - Private key held in `gas-oracle` service (unless using Web3Signer)
+- `L2_GAS_ORACLE_SENDER`
+ - Permissioned to report L1 gas prices to L2 `L1_GAS_PRICE_ORACLE` contract
+ - Private key held in `gas-oracle` service (unless using Web3Signer)
+- `L1_COMMIT_SENDER_ADDR`
+ - Permissioned to submit batches to the L1 `ScrollChain` contract
+ - Private key held in `rollup-node` service (unless using Web3Signer)
+- `L1_FINALIZE_SENDER`
+ - Permissioned to submit proofs and finalize batches on L1 `ScrollChain` contract
+ - Private key held in `rollup-node` service (unless using Web3Signer)
+
+
+For additional assessments on protocol permissions and to see how Scroll manages multisigs and timelocks, see [L2Beat's Scroll permissions](https://l2beat.com/scaling/projects/scroll#permissions).
+
+## Handling Private Keys and Secrets
+
+By default, Scroll SDK's production deployments are configured to store "hot" private keys in the service and a secret manager service. We use ExternalSecrets to support a variety of secret manager services, but by default, the CLI tool only automates AWS Secrets Manager and an insecure, development-only deployment of HashiCorp Vault.
+
+We intend to add support for Web3Signer in the future as well, allowing more restricted access to apply to a single service.
+
+For more information on implemententing access control to specific parts of your cluster, see [Kubernetes: Using RBAC Authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/).
+
+## Pausing the Bridge
+
+In extreme security instances, you may need to pause the bridge. The easiest way to do this quickly from the infrastructure operator is to bring the rollup node offline. This way, even if blocks contine to be produced, finalization (and thus new withdrawals) will not be processed until the `rollup-node` is back online.
+
+## Key Rotation for Rollup Accounts
+
+Rotating the keys for the `gas-oracle` and `rollup-node` accounts is a manual process requiring involvement from the `OWNER` role.
+
+At a high level, you simply need to add the new key to the whitelist, restart your services, and then remove the old key from the whitelist.
+
+{/* TODO: Provide cast commands for doing this process. */}
+
+## Rotating Sequencer Keys
+
+Rotating sequencer keys requires careful coordination to ensure continuous block production. The process involves running two sequencer nodes temporarily - the active sequencer and a new sequencer with the new keys.
+
+#### Prerequisites
+
+
+1. Update your L2 Geth nodes to the latest version
+2. Prepare a second value file for the new sequencer with:
+ - New keystore and password
+ - New nodekey
+ - Updated peer list
+3. Ensure all L2 Geth services have both sequencers in their `L2GETH_PEER_LIST`
+
+
+#### Rotation Process
+
+
+1. Deploy the new sequencer node with the new keys
+2. Verify the new sequencer is fully synced and connected to peers
+3. On the active sequencer, connect to the Geth console:
+ ```bash
+ geth attach /l2geth/data/geth.ipc
+ ```
+
+ or, if using `kubectl`:
+ ```bash
+ kubectl exec -it l2-sequencer-0 -- geth attach /l2geth/data/geth.ipc
+ ```
+
+4. Check current active signer:
+ ```bash
+ clique.getSigners()
+ ```
+
+5. Propose the new signer (replace with your new signer address):
+ ```bash
+ clique.propose("0xNEW_SIGNER_ADDRESS", true)
+ ```
+
+6. Wait for one block to be generated, then verify both signers are active:
+ ```bash
+ clique.getSigners() // Should show both addresses
+ ```
+
+7. Remove the old signer from both nodes:
+ - On the old sequencer:
+ ```bash
+ clique.propose("0xOLD_SIGNER_ADDRESS", false)
+ ```
+ - On the new sequencer:
+ ```bash
+ clique.propose("0xOLD_SIGNER_ADDRESS", false)
+ ```
+
+8. After two blocks are generated, verify only the new signer remains:
+ ```bash
+ clique.getSigners() // Should show only new signer
+ ```
+
+
+#### Post-Rotation Verification
+
+
+1. Monitor block production on the new sequencer
+2. Verify blocks are being properly signed with the new key
+3. Monitor network health metrics
+4. Once confirmed working, decommission the old sequencer
+
+
+
+
+## Recovering from a Infrastructure Failure
+
+Recoving from an infrastructure failure will depend on what components are affected.
+
+#### Database Failure
+
+For a managed database recovery, we recommend maintaining backups, ideally in an alternate region. If you operate your own database, be sure to take snapshots, and consider backups to alternate cloud providers. We plan to provide further guidance for database recovery in the future.
+
+#### Sequencer Failure
+
+**If your sequencer host goes down:**
+
+We recommend having at least one hot standby sequencer to take its place. This sequencer can be configured with different keys than the original sequencer (and be fully synced in case you need to [rotate the sequencer keys](#rotating-sequencer-keys)), but a simple configuration change will allow it to reboot using the original sequencer's keys to immediately resume block production.
+
+**If all of your sequencer machines are lost:**
+
+You will need either:
+ - Sync a new full node from gensis (assuming there are full nodes remaining somewhere in your p2p network).
+ - Repurpose a synced RPC node. "Converting" it to be the sequencer by creating a new sequencer chart that takes over the RPC node's Persistent Volume Claim.
+
+**If all full nodes in the network are lost:**
+
+If you cannot sync from other network nodes, you will need to sync from L1 data. As of version 0.1.0, this is unsupported, but we plan to add support for this in the near future.
+
+Please reach out to the Scroll team if you need assistance reviewing your recovery plan.
+
+## Planning for your Incident Response & Recovery
+
+It is important to plan for your incident response and recovery before an incident occurs. Here is a list of potential issues, their implications, and things to consider as a team.
+
+### Bug Categories and Response Plans
+
+#### 1. Liveness Issues
+- **Symptoms**: Delays in block production or finalization
+- **Impact**:
+ - Write operations may be temporarily unavailable
+ - Bridge withdrawals may be delayed
+ - Read operations remain functional
+- **Response**:
+ - Monitor block production metrics
+ - Engage backup systems if necessary
+ - Communicate status to users
+
+#### 2. Safety Issues
+
+##### Scenario A: Invalid Block Production
+- **Symptoms**: RPC nodes rejecting blocks
+- **Impact**: Chain appears offline for writes
+- **Response**:
+ - Investigate sequencer logs
+ - Prepare for potential rollback
+ - Maintain read-only access
+
+##### Scenario B: Unprovable Batch
+- **Symptoms**: Proof generation failures
+- **Response**:
+ - Coordinate with Scroll team
+ - Potential prover upgrade
+ - Possible L1 batch revocation
+ - Prepare for L2 reorg
+
+##### Scenario C: ZK System Bug
+- **Highest Risk Scenario**
+- **Required Actions**:
+ - Immediate escalation to Scroll team
+ - Potential emergency shutdown
+ - Review of all recent proofs
+ - External party verification
+
+#### 3. Gas Oracle Issues
+- **Monitoring**: Track gas price anomalies
+- **Impact Assessment**:
+ - Transaction cost implications
+ - Potential chain usability issues
+- **Resolution Steps**:
+ - Oracle parameter adjustment
+ - Emergency price override if necessary
+
+### Disaster Recovery
+
+#### Cross-Region Resilience
+1. **Backup Infrastructure**
+ - Maintain 1-2 fullnodes in alternate regions
+ - Regular database snapshots
+ - Off-site backup storage
+ - Cross-region K8s cluster capability
+
+2. **Recovery Procedures**
+ - Sequencer Role Recovery:
+ 1. Deploy new sequencer with original keys
+ 2. Verify chain sync status
+ 3. Resume block production
+ - Signer Change Process:
+ 1. Follow documented key rotation
+ 2. Update necessary configurations
+ 3. Verify new signer functionality
+
+#### Cloud Provider Failover
+
+1. **Temporary Outages**
+ - Maintain hot standby in alternate region
+ - Automated DNS failover configuration
+ - Regular failover testing
+ - Document recovery procedures
+
+2. **Permanent Migration**
+ - Platform-agnostic deployment readiness
+ - Alternative cloud provider prerequisites:
+ - Pre-configured K8s clusters
+ - Network configuration templates
+ - DNS management strategy
+ - Migration checklist:
+ - Sequencer deployment
+ - RPC node setup
+ - Database migration
+ - DNS updates
+ - Security configuration verification
+
+### Security Monitoring and Response Checklist
+
+#### Continuous Monitoring
+- Monitor all privileged key usage
+- Track gas oracle values for anomalies
+- Watch for unusual block proposal patterns
+- Monitor bridge activity for suspicious patterns
+- Track system resource utilization
+- Monitor network latency and availability
+
+#### Incident Response
+- Maintain an up-to-date incident response plan
+- Document escalation procedures
+- Keep backup RaaS provider details readily available
+- Regular testing of recovery procedures
+- Maintain communication templates for various scenarios
\ No newline at end of file
diff --git a/src/content/docs/en/sdk/operation/troubleshooting.mdx b/src/content/docs/en/sdk/operation/troubleshooting.mdx
new file mode 100644
index 000000000..226b89a6c
--- /dev/null
+++ b/src/content/docs/en/sdk/operation/troubleshooting.mdx
@@ -0,0 +1,162 @@
+---
+section: sdk
+title: "Troubleshooting a Scroll SDK Deployment"
+lang: "en"
+permalink: "sdk/operation/troubleshooting"
+excerpt: "Troubleshooting issues you may encounter when running Scroll SDK"
+---
+
+import Steps from '../../../../../components/Steps/Steps.astro';
+import Aside from '../../../../../components/Aside.astro';
+
+The Scroll SDK is a complex system with many interdependent services. This document covers common issues you may encounter when running the SDK and how to resolve them.
+
+### Rollup node isn't committing batches or finalizing
+
+
+
+1. Check if the accounts have funds
+ - Verify public addresses and send them funds on L1
+
+2. Look for logs about "check chain monitor"
+ - If present, check chain monitor logs. You may not have enough blocks *after* the batch to finalize.
+ - Generate more network activity to produce blocks, or change the `chain-monitor-config.json` value:
+
+ ```json
+ "l2_config": {
+ ...
+ "confirm": "0x80",
+ ...
+ }
+ ```
+
+3. If your logs include something like "replacement transaction underpriced: new tx blob gas fee cap 1000000000000 ≤ 574376045900 queued + 100% replacement penalty":
+ - Update these values in `rollup-config.json`:
+
+ ```json
+ "l2_config": {
+ ...
+ "max_gas_price": 5000000000000,
+ "max_blob_gas_price": 5000000000000,
+ ...
+ }
+ ```
+
+4. If you see "Failed to finalize timeout batch without proof":
+ ```
+ ERROR[08-29|18:40:37.366|scroll-tech/rollup/internal/controller/relayer/l2_relayer.go:465] Failed to finalize timeout batch without proof ││ index=6 hash=0x05bc419ecb59e9566554ddb716ee4b69fbe3b103a84e1c714656190c5af5028c err="failed to get batch status, errCode: 40001, errMsg: " ││ WARN [08-29|18:40:52.273|scroll-tech/rollup/internal/controller/relayer/l2_relayer.go:506] failed to get batch status, please check chain_ ││ monitor api server batch_index=6 err="failed to get batch status, errCode: 40001, errMsg: "
+ ```
+ - Confirm that chain monitor's Layer 2 "start block" is higher than the block in the batch. See the change for `chain-monitor-config.json` value above.
+
+
+
+### Unable to withdraw funds
+
+
+
+1. Check if the withdrawal block is finalized
+ - If not, wait for finalization
+
+2. Verify if bridge-history-fetcher is still syncing
+ - Check its 'fetch and save L1 events" height
+ - You may need to wait for it to catch up before the bridge history API can create a withdrawal proof
+
+
+
+### Didn't receive funds on Layer 2 after deposit on Layer 1
+
+
+
+1. Check if the deposit transaction block is finalized on Layer 1
+ - If not, wait (or send a transaction on Layer 1 to force block generation if using anvil)
+
+2. Verify if there's a transaction on Layer 2
+ - If not, send a transaction on Layer 2 to force block generation
+
+
+
+### Rollup node failed to get batch status
+
+Error from rollup node pod:
+
+```
+ERROR[09-18|07:52:21.515|scroll-tech/rollup/internal/controller/relayer/l2_relayer.go:489] Failed to finalize timeout batch without proof index=1 hash=0x43b0e21561d60b052c14eeb53f04c4f797b6c1532fae207fcb03f7da3ea819dd err="failed to get batch status, errCode: 40001, errMsg: "
+```
+
+This occurs because there are not enough L2 blocks generated. Continue sending transactions on L2 to force block generation, which should resolve the issue.
+
+### Gas-oracle/rollup-relayer failed to get fee data
+
+Error: `max fee per gas less than block base fee`
+
+This is a bug from the l2-geth node. A workaround is to manually send a legacy transaction with a high gas price.
+
+### Rollup node failed to commit a batch, and batch status on rollup-explorer is unknown
+
+#### Issue context
+
+```
+ERROR[10-15|09:04:42.873|scroll-tech/rollup/internal/controller/relayer/l2_relayer.go:476]
+Failed to send commitBatch tx to layer1 index=191 hash=0x80c87236e31dd2b33cb03909905e7ccdf070ea879b8e7f5c91542fd1a1ad7d6f RollupContractAddress=0xE518eD8A0568c99Be066ecEDcf29e0C1315E4b77 err="failed to get fee data, err: execution reverted
+```
+
+#### Analysis
+
+**Issue 1:** The failed commit transaction (txhash: 0x1ca3fb4ed1688d7aa43d65f3d8ef0a98ff6afb03dc9cff69924f77fd1f06e432) reverted with error code 0x12137ab0, which stands for `"ErrorBatchIsAlreadyCommitted()"`. The rollup node is attempting to commit an already committed batch. This likely occurred because the rollup node stopped while sending a commit transaction to L1. L1 received the transaction, but the rollup node didn't save it in the pending_transaction table of the database.
+
+**Issue 2:** After fixing Issue 1, another issue arose where the rollup node failed to commit a later batch, reverting with the error `ErrorIncorrectBatchHash()`. This was caused by updating the batch rollup_status in the database without shutting down the rollup node, leading to the rollup fetching incorrect batch information.
+
+#### Solutions
+
+**Issue 1:**
+
+
+
+1. Debug the failed commit transaction using the `debug_traceTransaction` RPC method:
+
+ ```bash
+ curl l1_rpc_url -X POST -H "Content-Type: application/json" --data '{"method":"debug_traceTransaction","params":["0x1ca3fb4ed1688d7aa43d65f3d8ef0a98ff6afb03dc9cff69924f77fd1f06e432", {"tracer": "callTracer"}], "id":1,"jsonrpc":"2.0"}'
+ ```
+
+ If the output is `0x12137ab0` (`ErrorBatchIsAlreadyCommitted()`), it confirms Issue 1.
+
+2. Shut down the rollup-node and gas-oracle.
+
+3. In the PostgreSQL database, select the `batch` table and update the `rollup_status` of the corresponding failing commit batch to `3` (representing rollup committed).
+
+4. Restart the rollup-node and gas-oracle.
+
+
+
+**Issue 2:**
+
+
+
+Issue 2 typically shouldn't occur unless the rollup-node wasn't shut down when solving Issue 1. If it happens anyway:
+
+
+
+1. Debug the failed commit transaction using the `debug_traceTransaction` RPC method:
+
+ ```bash
+ curl l1_rpc_url -X POST -H "Content-Type: application/json" --data '{"method":"debug_traceTransaction","params":["0x1ca3fb4ed1688d7aa43d65f3d8ef0a98ff6afb03dc9cff69924f77fd1f06e432", {"tracer": "callTracer"}], "id":1,"jsonrpc":"2.0"}'
+ ```
+
+ If the output is `0x2a1c1442` (`ErrorIncorrectBatchHash()`), it confirms Issue 2.
+
+2. Revert the incorrect batch (the previous batch of the current failing commit batch):
+ `revert_batch_number = current_failing_batch_number - 1`
+
+3. Shut down the rollup-node and gas-oracle. In the PostgreSQL database, select the `batch` table and delete batches where `batch_index ≥ revert_batch_number`.
+
+4. Revert the batch on the scrollChain contract:
+
+ ```bash
+ cast send --rpc-url "revertBatch(bytes, bytes)" --private-key
+ ```
+
+5. Restart the rollup-node and gas-oracle.
+
+
diff --git a/src/content/docs/en/sdk/operation/upgrades.mdx b/src/content/docs/en/sdk/operation/upgrades.mdx
new file mode 100644
index 000000000..cf915a144
--- /dev/null
+++ b/src/content/docs/en/sdk/operation/upgrades.mdx
@@ -0,0 +1,15 @@
+---
+section: sdk
+title: "Upgrading Scroll SDK"
+lang: "en"
+permalink: "sdk/operation/upgrades"
+excerpt: "Learn more about upgrading Scroll SDK"
+---
+
+Scroll SDK will include a comprehensive upgrade system to ensure the smooth operation of a network during upgrades.
+
+With our current `scroll-sdk 0.1.x` release, we haven't had any breaking changes that would require manual intervention beyond upgrading a chart's version. There may be some upgrades that will require re-syncing a sequencer from genesis, upgrading smart contracts, or even database migrations.
+
+When that time comes, we will provide thorough documentation on upgrade paths for bringing the latest features to your Scroll SDK chain.
+
+{/* TODO: Add upgrade information here */}
\ No newline at end of file
diff --git a/src/content/docs/en/sdk/sdk-faq.mdx b/src/content/docs/en/sdk/sdk-faq.mdx
new file mode 100644
index 000000000..e4262dbe5
--- /dev/null
+++ b/src/content/docs/en/sdk/sdk-faq.mdx
@@ -0,0 +1,45 @@
+---
+section: sdk
+date: Last Modified
+title: "Scroll SDK FAQ"
+lang: "en"
+permalink: "sdk/early-access-program"
+excerpt: "Help us sculpt the Scroll SDK by trying it out and giving us early feedback!"
+# whatsnext: { "Scroll Rollup Process": "/en/technology/chain/rollup" }
+---
+
+## Troubleshooting / FAQ
+
+### How long is finality on Scroll chain?
+
+Finality depends on the parameterization of how often your chain wants to finalize to Ethereum. Roughly, a batch is created every minute (containing ~20 blocks or 200 txs), and takes about 50 minutes to finalize on L1.
+
+Our next upgrade with increase the variability on block speed, but also increase how many batches will fit in a proof. We may decide to lengthen finality in order to reduce on-chain costs and lower transaction fees. _(At 556k gas to finalize, each finalize tx costs ~$9.80 as of June 3, 2024)._
+
+If you want to explore more, check out [https://scroll.io/rollupscan](https://scroll.io/rollupscan)
+
+### Does Scroll SDK support a mock prover? How does finality work without running a prover?
+
+The Scroll SDK defaults to the behavior seen on Scroll Sepolia, which does not require proofs to finalize.
+
+In the default testnet configuration, the contracts deployed to L1 allow the `rollup-node` (the service that submits proofs to the verification contract on L1) to submit "empty" proofs and the L1 contract will accept it.
+
+The rollup-node is configured to submit these after a "timeout" period if the service does not receive a valid proof. This mode doesn't require a literal "mock prover" service — if fact, even the `coordinator`, the service which typically assigns works to provers and verifies proofs before storing them for the rollup-node, is not required to run.
+
+The suggested testnet timeout for this finalization is 3600 seconds, to approximate mainnet finalization latency. To alter this behavior in Scroll SDK, set the following variables in `config.toml`:
+
+```toml
+[rollup]
+TEST_ENV_MOCK_FINALIZE_ENABLED = true
+TEST_ENV_MOCK_FINALIZE_TIMEOUT_SEC = 3600
+```
+
+These values affect the version of the contracts deployed and the config values for the `rollup-node` service.
+
+{/* TODO: The technical components above should be moved into the operating guide docs for provers, and we just answer the question above and link to specifics there. */}
+
+### Is Kubernetes a requirement? Do you support docker-compose, ansible, etc?
+
+We do not provide templates for deploying with tooling outside of Kubernetes and Helm. That said, every Helm chart points to a docker image, and we are happy to help teams that need support understanding service configuration. We may explore providing these by default if enough teams need this support, but it's not how we manage Scroll chain in its production environment.
+
+{/* TODO: Add a few more questions here */}
\ No newline at end of file
diff --git a/src/content/docs/en/sdk/technical-stack/configuration.mdx b/src/content/docs/en/sdk/technical-stack/configuration.mdx
new file mode 100644
index 000000000..476f22c72
--- /dev/null
+++ b/src/content/docs/en/sdk/technical-stack/configuration.mdx
@@ -0,0 +1,229 @@
+---
+section: sdk
+date: Last Modified
+title: "Scroll SDK Configuration"
+lang: "en"
+permalink: "sdk/technical-stack/configuration"
+excerpt: "Information on configuring and customizing a Scroll SDK deployment."
+---
+
+import Aside from "../../../../../components/Aside.astro"
+
+Initial change configuration is made by modifying `config.toml`. All other config files are auto-generated from this file. For automating changes to your configuration for production deployments, see the [scroll-sdk-cli](/en/sdk/technical-stack/scroll-sdk-cli) tool.
+{/* TODO: Fix CLI URL to point to github repo */}
+
+For new production deployments, we recommend using the [example template](https://github.com/scroll-tech/scroll-sdk/blob/develop/examples/config.toml.example), which the `scroll-sdk-cli` tool is designed to work with. You can reference the default devnet configuration [here](https://github.com/scroll-tech/scroll-sdk/blob/develop/charts/scroll-sdk/config.toml).
+
+
+
+## `config.toml` Variables
+Local Devnet defaults shown.
+
+### General
+
+Contained in the `[general]` section.
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| L1_RPC_ENDPOINT | Specifies the HTTP endpoint for the L1 RPC server. | `http://l1-devnet:8545` |
+| L1_RPC_ENDPOINT_WEBSOCKET | Specifies the WebSocket endpoint for the L1 RPC server. | `ws://l1-devnet:8546` |
+| L2_RPC_ENDPOINT | Specifies the HTTP endpoint for the L2 RPC server. | `http://l2-rpc:8545` |
+| CHAIN_NAME_L1 | Labels the chain name for the L1 network. | Ethereum |
+| CHAIN_NAME_L2 | Labels the chain name for the L2 network. | Scroll SDK |
+| CHAIN_ID_L1 | Defines the chain ID for the L1 network. | 111111 |
+| CHAIN_ID_L2 | Defines the chain ID for the L2 network. | 221122 |
+| L1_CONTRACT_DEPLOYMENT_BLOCK | Specifies the block number at which L1 contracts were deployed. | 0 |
+
+### Accounts
+
+Contained in the `[accounts]` section.
+
+{/* TODO: Add link to where these roles are documented. */}
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| DEPLOYER_PRIVATE_KEY | Private key for the deployer account. | `0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80` |
+| L1_COMMIT_SENDER_PRIVATE_KEY | Private key for the L1 commit sender account. | `0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d` |
+| L1_FINALIZE_SENDER_PRIVATE_KEY | Private key for the L1 finalize sender account. | `0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a` |
+| L1_GAS_ORACLE_SENDER_PRIVATE_KEY | Private key for the L1 gas oracle sender account. | `0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6` |
+| L2_GAS_ORACLE_SENDER_PRIVATE_KEY | Private key for the L2 gas oracle sender account. | `0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80` |
+| DEPLOYER_ADDR | Address of the deployer account. | `0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266` |
+| OWNER_ADDR | Address of the owner account. Should be a multi-sig wallet. | `0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266` |
+| L1_COMMIT_SENDER_ADDR | Address of the L1 commit sender account. | `0x70997970C51812dc3A010C7d01b50e0d17dc79C8` |
+| L1_FINALIZE_SENDER_ADDR | Address of the L1 finalize sender account. | `0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC` |
+| L1_GAS_ORACLE_SENDER_ADDR | Address of the L1 gas oracle sender account. | `0x90F79bf6EB2c4f870365E785982E1f101E93b906` |
+| L2_GAS_ORACLE_SENDER_ADDR | Address of the L2 gas oracle sender account. | `0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266` |
+
+### Database
+
+Contained in the `[db]` section.
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| ADMIN_SYSTEM_DB_CONNECTION_STRING | Connection string for the Admin System database. | `""` |
+| BLOCKSCOUT_DB_CONNECTION_STRING | Connection string for the Blockscout database. | `postgresql://postgres:qwerty12345@postgresql:5432/blockscout` |
+| BRIDGE_HISTORY_DB_CONNECTION_STRING | Connection string for the Bridge History database. | `postgres://postgres:qwerty12345@postgresql:5432/scroll?sslmode=disable` |
+| CHAIN_MONITOR_DB_CONNECTION_STRING | Connection string for the Chain Monitor database. | `postgres://postgres:qwerty12345@postgresql:5432/scroll?sslmode=disable` |
+| GAS_ORACLE_DB_CONNECTION_STRING | Connection string for the Gas Oracle database. | `postgres://postgres:qwerty12345@postgresql:5432/scroll?sslmode=disable` |
+| COORDINATOR_DB_CONNECTION_STRING | Connection string for the Coordinator database. | `""` |
+| L1_EXPLORER_DB_CONNECTION_STRING | Connection string for the L1 Explorer database. | `postgresql://postgres:qwerty12345@postgresql:5432/l1-explorer` |
+| ROLLUP_NODE_DB_CONNECTION_STRING | Connection string for the Rollup Node database. | `postgres://postgres:qwerty12345@postgresql:5432/scroll?sslmode=disable` |
+| ROLLUP_EXPLORER_DB_CONNECTION_STRING | Connection string for the Rollup Explorer database. | `""` |
+
+### Gas Token
+
+Contained in the `[gas-token]` section.
+
+{/* TODO: Add link to where these modes are documented. */}
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| ALTERNATIVE_GAS_TOKEN_ENABLED | Enables using an alternative gas token instead of ETH. | false |
+| EXAMPLE_GAS_TOKEN_DECIMAL | Decimal places for the example gas token. | 6 |
+| L1_GAS_TOKEN | Address of the L1 ERC20 gas token contract. | `0x68a041e7c20Afa4784b5d9C63246c89545Ac0E66` |
+| GAS_ORACLE_INCORPORATE_TOKEN_EXCHANGE_RATE_ENANBLED | Enables incorporating token exchange rates in gas oracle. | false |
+| EXCHANGE_RATE_UPDATE_MODE | Mode for updating exchange rates. | `Fixed` |
+| FIXED_EXCHANGE_RATE | Fixed exchange rate value when using Fixed mode. | `0.01` |
+| TOKEN_SYMBOL_PAIR | Symbol pair for the token exchange. | `UNIETH` |
+
+### Sequencer
+
+Contained in the `[sequencer]` section.
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| L2_GETH_STATIC_PEERS | Static peers for L2 Geth nodes, as an array of sequencer enode URLs. | `[""]` |
+| L2GETH_SIGNER_ADDRESS | Address of the primary sequencer's L2 Geth signer account. | `""` |
+| L2GETH_KEYSTORE | Keystore file for the primary sequencer's L2 Geth signer account. | `""` |
+| L2GETH_PASSWORD | Password for the primary sequencer's L2 Geth keystore. | `""` |
+| L2GETH_NODEKEY | Node key for the primary sequencer's L2 Geth node. | `""` |
+
+### Additional Sequencer Instances
+
+Contained in the `[sequencer.sequencer-1]` section and incrementing for each additional sequencer instance.
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| L2GETH_SIGNER_ADDRESS | Address of the L2 Geth signer account for this sequencer instance. | `0xE8fFE623460e54e546E54B1a0C93A968aF6295bb` |
+| L2GETH_KEYSTORE | Keystore file for this sequencer instance's signer account. | `{"address":"e8ffe623460e54e546e54b1a0c93a968af6295bb","id":"deef9b4a-a085-4f02-af36-afaa19da4132",...}` |
+| L2GETH_PASSWORD | Password for this sequencer instance's keystore. | `second` |
+| L2GETH_NODEKEY | Node key for this sequencer instance. | `bd347890c9d308957207379679e8ed548d015ef05588c228d13f92ea0288a35b` |
+
+### Bootnode Instances
+
+Contained in the `[bootnode.bootnode-0]` section and incrementing for each additional bootnode instance.
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| L2GETH_NODEKEY | Node key for this bootnode instance. | `""` |
+
+
+### Rollup
+
+Contained in the `[rollup]` section.
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| MAX_TX_IN_CHUNK | Sets the maximum number of transactions in a chunk. | 100 |
+| MAX_BLOCK_IN_CHUNK | Sets the maximum number of blocks in a chunk. | 100 |
+| MAX_CHUNK_IN_BATCH | Sets the maximum number of chunks in a batch. | 15 |
+| MAX_BATCH_IN_BUNDLE | Sets the maximum number of batches in a bundle. | 30 |
+| MAX_L1_MESSAGE_GAS_LIMIT | Defines the maximum gas limit for L1 messages. | 10000000 |
+| TEST_ENV_MOCK_FINALIZE_ENABLED | Enables mock finalization for testing environments. | true |
+| TEST_ENV_MOCK_FINALIZE_TIMEOUT_SEC | Sets the timeout for mock finalization in seconds. | 300 |
+
+### Frontend
+
+Contained in the `[frontend]` section.
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| EXTERNAL_RPC_URI_L1 | External RPC URI for L1. | `http://l1-devnet.scrollsdk` |
+| EXTERNAL_RPC_URI_L2 | External RPC URI for L2. | `http://l2-rpc.scrollsdk` |
+| BRIDGE_API_URI | URI for the Bridge API. | `http://bridge-history-api.scrollsdk/api` |
+| ROLLUPSCAN_API_URI | URI for the Rollupscan API. | `http://rollup-explorer-backend.scrollsdk/api` |
+| EXTERNAL_EXPLORER_URI_L1 | External Explorer URI for L1. | `http://l1-explorer.scrollsdk` |
+| EXTERNAL_EXPLORER_URI_L2 | External Explorer URI for L2. | `http://blockscout.scrollsdk` |
+| ADMIN_SYSTEM_DASHBOARD_URI | URI for the Admin System Dashboard. | `http://admin-system-dashboard.scrollsdk` |
+| GRAFANA_URI | URI for Grafana. | `http://grafana.scrollsdk` |
+
+### Genesis
+
+Contained in the `[genesis]` section.
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| L2_MAX_ETH_SUPPLY | Sets the maximum ETH supply for the L2 network. | `226156424291633194186662080095093570025917938800079226639565593765455331328` |
+| L2_DEPLOYER_INITIAL_BALANCE | Sets the initial balance for the L2 deployer account. | 1000000000000000000 |
+
+### Contracts
+
+Contained in the `[contracts]` section.
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| DEPLOYMENT_SALT | Salt used for contract deployment. | `salt-000` |
+| L1_FEE_VAULT_ADDR | Address of the L1 fee vault contract. | `0x0000000000000000000000000000000000000001` |
+| L1_PLONK_VERIFIER_ADDR | Address of the L1 PLONK verifier contract. | `0x0000000000000000000000000000000000000001` |
+
+### Contracts Overrides
+
+Contained in the `[contracts.overrides]` section.
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| L2_MESSAGE_QUEUE | Override address for the L2 message queue contract. | `0x5300000000000000000000000000000000000000` |
+| L1_GAS_PRICE_ORACLE | Override address for the L1 gas price oracle contract. | `0x5300000000000000000000000000000000000002` |
+| L2_WHITELIST | Override address for the L2 whitelist contract. | `0x5300000000000000000000000000000000000003` |
+| L2_WETH | Override address for the L2 WETH contract. | `0x5300000000000000000000000000000000000004` |
+| L2_TX_FEE_VAULT | Override address for the L2 transaction fee vault contract. | `0x5300000000000000000000000000000000000005` |
+
+### Coordinator
+
+Contained in the `[coordinator]` section.
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| CHUNK_COLLECTION_TIME_SEC | Time in seconds for chunk collection. | 3600 |
+| BATCH_COLLECTION_TIME_SEC | Time in seconds for batch collection. | 1800 |
+| BUNDLE_COLLECTION_TIME_SEC | Time in seconds for bundle collection. | 600 |
+| COORDINATOR_JWT_SECRET_KEY | Secret key used for JWT authentication in the coordinator. | `e788b62d39254928a821ac1c76b274a8c835aa1e20ecfb6f50eb10e87847de44` |
+
+
+### Ingress
+
+Contained in the `[ingress]` section.
+
+Ingress values are not used by the configuration generation scripts, but used by the `scroll-sdk-cli` to configure hosts and TLS settings in the values files for each chart.
+
+| Config Variable | Description | Default Value |
+|-----------------|-------------|---------------|
+| FRONTEND_HOST | Host for the frontend. | `frontends.scrollsdk` |
+| BRIDGE_HISTORY_API_HOST | Host for the Bridge History API. | `bridge-history-api.scrollsdk` |
+| ROLLUP_EXPLORER_API_HOST | Host for the Rollup Explorer API. | `rollup-explorer-backend.scrollsdk` |
+| COORDINATOR_API_HOST | Host for the Coordinator API. | `coordinator-api.scrollsdk` |
+| RPC_GATEWAY_HOST | Host for the RPC Gateway. | `l2-rpc.scrollsdk` |
+| BLOCKSCOUT_HOST | Host for Blockscout. | `blockscout.scrollsdk` |
+| BLOCKSCOUT_BACKEND_HOST | Host for Blockscout Backend. | `blockscout-backend.scrollsdk` |
+| ADMIN_SYSTEM_DASHBOARD_HOST | Host for the Admin System Dashboard. | `admin-system-dashboard.scrollsdk` |
+| L1_DEVNET_HOST | Host for the L1 Devnet. | `l1-devnet.scrollsdk` |
+| L1_EXPLORER_HOST | Host for the L1 Explorer. | `l1-explorer.scrollsdk` |
+| GRAFANA_HOST | Host for the Grafana frontend. | `grafana.scrollsdk` |
+
+{/* TODO: Check Blockscout backend host after PR is merged. */}
+
+## Sepolia Deployment
+
+For using Sepolia as the basechain of a testnet deployment, you will need to generate new wallets for the various missing accounts and provide a Sepolia RPC endpoint with generous limits.
+
+The `scroll-sdk-cli` tool has a command for generating new accounts setting the values for various basechain networks.
+
+### Generating Accounts
+
+To generate new test accounts quickly without using the `scroll-sdk-cli`, run the following command on a machine with Foundry installed.
+
+```bash
+cast wallet new --number 6 --json
+```
diff --git a/src/content/docs/en/sdk/technical-stack/contracts.mdx b/src/content/docs/en/sdk/technical-stack/contracts.mdx
new file mode 100644
index 000000000..ac0d81e6f
--- /dev/null
+++ b/src/content/docs/en/sdk/technical-stack/contracts.mdx
@@ -0,0 +1,194 @@
+---
+section: sdk
+date: Last Modified
+title: "Scroll SDK Contracts"
+lang: "en"
+permalink: "sdk/technical-stack/contracts"
+excerpt: "Documents the contracts deployed to support the Scroll SDK."
+---
+
+import Aside from "../../../../../components/Aside.astro"
+
+{/* TODO: Review full page before launch */}
+
+## Overview
+
+Contracts deployed for a Scroll SDK chain include both contracts on the L1 (or basechain), and contracts deployed on the L2 (or SDK chain). Additionally, the L2 has "pre-deployed" contracts, matching those on [Scroll](/en/developers/scroll-contracts#l2-predeploys).
+
+## Primary Contracts
+
+Although there are many contracts deployed during a new chain deployment, the most important contracts to understand are below.
+
+### Rollup Contract
+
+- Deployed to L1 using a proxy and also called "Scroll Chain"
+- Accepts new batches and proofs posted by the `rollup-relayer` service by calling "Commit Batch" and "Finalize Batch" methods.
+- Keeps track of finalized State Roots and Withdraw Roots.
+- [View on Etherscan](https://etherscan.io/address/0xa13BAF47339d63B743e7Da8741db5456DAc1E556) | [View Source](https://github.com/scroll-tech/scroll-contracts/blob/main/src/L1/rollup/ScrollChain.sol)
+
+### Scroll Message Queues
+
+- Deployed to L1 with a proxy and pre-deployed to L2
+- On L1, every depost to the bridge is added as a message to the L1 message queue.
+ - Messages are read by `l2geth` instances, including the sequencer, and brought into the Scroll chain via L1Message transaction types.
+- On L2, every withdrawal sent through the bridge is added as a message, and each block's resulting withdraw root is made available after finalization on the L1 Rollup Contract.
+ - At any time, a user can permissionlessly generate proof to [finish relaying the message on L1](/en/developers/l1-and-l2-bridging/the-scroll-messenger#finalizing-transactions-on-l1).
+- Messages are added to the queues exclusively by the Messenger contracts on [L1](https://github.com/scroll-tech/scroll-contracts/blob/main/src/L1/L1ScrollMessenger.sol) and [L2](https://github.com/scroll-tech/scroll-contracts/blob/main/src/L2/L2ScrollMessenger.sol).
+- [View L1 Deployment on Etherscan](https://etherscan.io/address/0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B) | [View Source](https://github.com/scroll-tech/scroll-contracts/blob/main/src/L1/rollup/L1MessageQueue.sol)
+- [View L2 Deployment on Scrollscan](https://scrollscan.com/address/0x5300000000000000000000000000000000000000) | [View Source](https://github.com/scroll-tech/scroll-contracts/blob/main/src/L2/predeploys/L2MessageQueue.sol)
+
+### Gas Oracle Contracts
+
+- Deployed on L1 (as part of L1 Message Queue) and pre-deployed on L2
+- On L1, it tracks the gas fees on L2. This is needed since L1 transactions need to pay for their L2 gas upfront.
+ - Stored `l2BaseFee` can only be updated by whitelisted addresses, _TODO: is this done by `gas-oracle` on L1 and L2?_
+- On L2, the contract keeps track of the fees on L1, allowing other contracts to know how the cost required to send a transaction back to L1
+ - Stored `l1BaseFee` can only be updated by whitelisted addresses, _TODO: is this done by `gas-oracle` on L1 and L2?_
+- [View L1 Deployment on Etherscan](https://etherscan.io/address/0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B) | [View Source](https://github.com/scroll-tech/scroll-contracts/blob/main/src/L1/rollup/L2GasPriceOracle.sol)
+- [View L2 Deployment on Scrollscan](https://scrollscan.com/address/0x5300000000000000000000000000000000000002) | [View Source](https://github.com/scroll-tech/scroll-contracts/blob/main/src/L2/predeploys/L1GasPriceOracle.sol)
+
+## Deployment Process
+
+Contracts are deployed by the `contracts` chart. Deterministic addresses are used, with a salt used to generate the address of the contract. For every new deployment, a unique salt should be configured in `config.toml`.
+
+During the configuration generation step, a simulation is first done to determine what address a contract will deploy to. This step is done during the creation of the config files for each service's chart and when the `genesis.json` file is created. Contract addresses are then used to set each service's configuration (see [`gen-configs.sh`](https://github.com/scroll-tech/scroll-contracts/blob/feat-robust-deployment/docker/scripts/gen-configs.sh)).
+
+Then, before the `contracts` chart is installed, you will need to fund your SDK `DEPLOYER` account to deploy all contracts on L1 and L2 using actual transactions.
+
+The `contracts` pod will connect to the L2 RPC and deploy the necessary contracts from the `DEPLOYER` account.
+
+
+
+### Funding Deployment Accounts
+
+In production deployments, you will need to manually fund the following wallet addresses from `config.toml`:
+
+- `DEPLOYER_ADDR` *(only needs funded on L1)*
+ {/* - Suggested funds: `(L1 basefee * VARIABLE * 10e-9) ETH` */}
+- `L1_COMMIT_SENDER_ADDR`
+ {/* - Suggested funds: `(L1 basefee * VARIABLE * 10e-9) ETH` */}
+- `L1_FINALIZE_SENDER_ADDR`
+ {/* - Suggested funds: `(L1 basefee * VARIABLE * 10e-9) ETH` */}
+- `L1_GAS_ORACLE_SENDER_ADDR`
+ {/* - Suggested funds: `(L1 basefee * VARIABLE * 10e-9) ETH` */}
+- `L2_GAS_ORACLE_SENDER_ADDR` *(funded after L2 chain deployment)*
+ {/* - Suggested funds: `(L1 basefee * VARIABLE * 10e-9) ETH` */}
+
+{/* TODO: Consider recommending an initial funding amount. */}
+
+
+
+
+
+
+## Contracts Deployed
+
+In the table below, we document every contract deployed for Scroll, including a link to the deployment for Scroll's mainnet. Not all of these are used by default for Scroll SDK.
+
+{/* */}
+
+| Contract Name | Description |
+| -------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
+| [L1_WETH_ADDR](https://etherscan.io/address/0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2) | The WETH contract on L1. |
+| [L2_WETH_ADDR](https://scrollscan.com/address/0x5300000000000000000000000000000000000004) | The WETH contract on L2. |
+| [L1_PLONK_VERIFIER_V0_ADDR](https://etherscan.io/address/0x4B8Aa8A96078689384DAb49691E9bA51F9d2F9E1) | The PLONK verifier version 0 on L1. |
+| [L1_ZKEVM_VERIFIER_V0_ADDR](https://etherscan.io/address/0x585DfaD7bF4099E011D185E266907A8ab60DAD2D) | The zkEVM verifier version 0 on L1. |
+| [L1_PLONK_VERIFIER_V1_ADDR](https://etherscan.io/address/0x2293cd12e8564e8219d314b075867c2f66ac6941) | The PLONK verifier version 1 on L1. |
+| [L1_ZKEVM_VERIFIER_V1_ADDR](https://etherscan.io/address/0x4b289E4A5331bAFBc6cCb2F10C39B8EDceCDb247) | The zkEVM verifier version 1 on L1. |
+| [L1_PLONK_VERIFIER_V2_ADDR](https://etherscan.io/address/0x03a72B00D036C479105fF98A1953b15d9c510110) | The PLONK verifier version 2 on L1. |
+| [L1_ZKEVM_VERIFIER_V2_ADDR](https://etherscan.io/address/0x63FB51C55d9605a75F8872C80De260a00fACfaA2) | The zkEVM verifier version 2 on L1. |
+| [L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR](https://etherscan.io/address/0xf94AfBD9370E25Dd6Ca557d5D67634aeFDA2416B) | The multiple version rollup verifier on L1. |
+| [L1_PROXY_ADMIN_ADDR](https://etherscan.io/address/0xEB803eb3F501998126bf37bB823646Ed3D59d072) | The proxy admin contract on L1. |
+| [L1_PROXY_IMPLEMENTATION_PLACEHOLDER_ADDR](https://etherscan.io/address/0xFAf8f72e54d1089fa1882b6f597BfDFF59a8AFca) | The proxy implementation placeholder on L1. |
+| [L1_WHITELIST_ADDR](https://etherscan.io/address/0x259204DDd2bA29bD9b1B9A5c9B093f73d7EAcf37) | The whitelist contract on L1. |
+| [L1_MESSAGE_QUEUE_IMPLEMENTATION_ADDR](https://etherscan.io/address/0xeBaed7A81c298B24EE6d59c22698A951dc448E01) | The message queue implementation on L1. |
+| [L1_MESSAGE_QUEUE_PROXY_ADDR](https://etherscan.io/address/0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B) | The message queue proxy on L1. |
+| [L2_GAS_PRICE_ORACLE_IMPLEMENTATION_ADDR](https://etherscan.io/address/0xfDF1eE0098168eaa61BF87Db68C39c85151a4E9E) | The gas price oracle implementation on L1. |
+| [L2_GAS_PRICE_ORACLE_PROXY_ADDR](https://etherscan.io/address/0x987e300fDfb06093859358522a79098848C33852) | The gas price oracle proxy on L1. |
+| [L1_SCROLL_CHAIN_IMPLEMENTATION_ADDR](https://etherscan.io/address/0x4F250B05262240C787a1eE222687C6eC395C628A) | The Scroll chain implementation on L1. |
+| [L1_SCROLL_CHAIN_PROXY_ADDR](https://etherscan.io/address/0xa13BAF47339d63B743e7Da8741db5456DAc1E556) | The Scroll chain proxy on L1. |
+| [L1_ETH_GATEWAY_IMPLEMENTATION_ADDR](https://etherscan.io/address/0x546E0bF31FB6e7babD493452e4e6999191367B42) | The ETH gateway implementation on L1. |
+| [L1_ETH_GATEWAY_PROXY_ADDR](https://etherscan.io/address/0x7F2b8C31F88B6006c382775eea88297Ec1e3E905) | The ETH gateway proxy on L1. |
+| [L1_WETH_GATEWAY_IMPLEMENTATION_ADDR](https://etherscan.io/address/0xa4F400593DFfc0ae02F940ab58f6e3Cc6fb9FB49) | The WETH gateway implementation on L1. |
+| [L1_WETH_GATEWAY_PROXY_ADDR](https://etherscan.io/address/0x7AC440cAe8EB6328de4fA621163a792c1EA9D4fE) | The WETH gateway proxy on L1. |
+| [L1_STANDARD_ERC20_GATEWAY_IMPLEMENTATION_ADDR](https://etherscan.io/address/0x4015Fc868C06689ABEba4a9dC8FA43B804F6239c) | The standard ERC20 gateway implementation on L1. |
+| [L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR](https://etherscan.io/address/0xD8A791fE2bE73eb6E6cF1eb0cb3F36adC9B3F8f9) | The standard ERC20 gateway proxy on L1. |
+| [L1_GATEWAY_ROUTER_IMPLEMENTATION_ADDR](https://etherscan.io/address/0xb93Ac04010Bd61F45BF492022A5b49a902F798F3) | The gateway router implementation on L1. |
+| [L1_GATEWAY_ROUTER_PROXY_ADDR](https://etherscan.io/address/0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6) | The gateway router proxy on L1. |
+| [L1_SCROLL_MESSENGER_IMPLEMENTATION_ADDR](https://etherscan.io/address/0x72981fD00087fF4F60aBFdE9f353cB1912A37fb6) | The Scroll messenger implementation on L1. |
+| [L1_SCROLL_MESSENGER_PROXY_ADDR](https://etherscan.io/address/0x6774Bcbd5ceCeF1336b5300fb5186a12DDD8b367) | The Scroll messenger proxy on L1. |
+| [L1_ENFORCED_TX_GATEWAY_IMPLEMENTATION_ADDR](https://etherscan.io/address/0x642af405bF64660665B37977449C9C536B806318) | The enforced transaction gateway implementation on L1. |
+| [L1_ENFORCED_TX_GATEWAY_PROXY_ADDR](https://etherscan.io/address/0x72CAcBcfDe2d1e19122F8A36a4d6676cd39d7A5d) | The enforced transaction gateway proxy on L1. |
+| [L1_CUSTOM_ERC20_GATEWAY_IMPLEMENTATION_ADDR](https://etherscan.io/address/0x7F512E2E9dfC4552941D99A5b2405BBcF5781C2c) | The custom ERC20 gateway implementation on L1. |
+| [L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR](https://etherscan.io/address/0xb2b10a289A229415a124EFDeF310C10cb004B6ff) | The custom ERC20 gateway proxy on L1. |
+| [L1_ERC721_GATEWAY_IMPLEMENTATION_ADDR](https://etherscan.io/address/0xd1841c5756428812233eEA78afC17cb2D3e392bb) | The ERC721 gateway implementation on L1. |
+| [L1_ERC721_GATEWAY_PROXY_ADDR](https://etherscan.io/address/0x6260aF48e8948617b8FA17F4e5CEa2d21D21554B) | The ERC721 gateway proxy on L1. |
+| [L1_ERC1155_GATEWAY_IMPLEMENTATION_ADDR](https://etherscan.io/address/0x244BF7aEf29F03916569470a51fA0794B62F8cd7) | The ERC1155 gateway implementation on L1. |
+| [L1_ERC1155_GATEWAY_PROXY_ADDR](https://etherscan.io/address/0xb94f7F6ABcb811c5Ac709dE14E37590fcCd975B6) | The ERC1155 gateway proxy on L1. |
+| [L1_SCROLL_OWNER_ADDR](https://etherscan.io/address/0x798576400F7D662961BA15C6b3F3d813447a26a6) | The Scroll owner contract on L1. |
+| [L1_1D_TIMELOCK_ADDR](https://etherscan.io/address/0x0e58939204eEDa84F796FBc86840A50af10eC4F4) | The 1-day timelock contract on L1. |
+| [L1_7D_TIMELOCK_ADDR](https://etherscan.io/address/0xDC1d1189Da69Ae2016E4976A43De20972D349B1b) | The 7-day timelock contract on L1. |
+| [L1_14D_TIMELOCK_ADDR](https://etherscan.io/address/0x1A658B88fD0a3c82fa1a0609fCDbD32e7dd4aB9C) | The 14-day timelock contract on L1. |
+| [L1_GAS_PRICE_ORACLE_ADDR](https://scrollscan.com/address/0x5300000000000000000000000000000000000002) | The gas price oracle contract on L2. |
+| [L2_MESSAGE_QUEUE_ADDR](https://scrollscan.com/address/0x5300000000000000000000000000000000000000) | The message queue contract on L2. |
+| [L2_TX_FEE_VAULT_ADDR](https://scrollscan.com/address/0x5300000000000000000000000000000000000005) | The transaction fee vault contract on L2. |
+| [L2_WHITELIST_ADDR](https://scrollscan.com/address/0x5300000000000000000000000000000000000003) | The whitelist contract on L2. |
+| [L2_PROXY_ADMIN_ADDR](https://scrollscan.com/address/0xA76acF000C890b0DD7AEEf57627d9899F955d026) | The proxy admin contract on L2. |
+| [L2_PROXY_IMPLEMENTATION_PLACEHOLDER_ADDR](https://scrollscan.com/address/0xF8a069d9230238763Fc574157fa39a78396bd26c) | The proxy implementation placeholder on L2. |
+| [L2_SCROLL_MESSENGER_IMPLEMENTATION_ADDR](https://scrollscan.com/address/0x6fa66EeD8e8086f4c77204B5484D26F4e9AB7772) | The Scroll messenger implementation on L2. |
+| [L2_SCROLL_MESSENGER_PROXY_ADDR](https://scrollscan.com/address/0x781e90f1c8Fc4611c9b7497C3B47F99Ef6969CbC) | The Scroll messenger proxy on L2. |
+| [L2_ETH_GATEWAY_IMPLEMENTATION_ADDR](https://scrollscan.com/address/0x191770c52309dff2c52FfEcf059ECC3862f5D721) | The ETH gateway implementation on L2. |
+| [L2_ETH_GATEWAY_PROXY_ADDR](https://scrollscan.com/address/0x6EA73e05AdC79974B931123675ea8F78FfdacDF0) | The ETH gateway proxy on L2. |
+| [L2_WETH_GATEWAY_IMPLEMENTATION_ADDR](https://scrollscan.com/address/0x86c5CBfC03ffFC7faf5dfC7D781A9adfA9f47dD1) | The WETH gateway implementation on L2. |
+| [L2_WETH_GATEWAY_PROXY_ADDR](https://scrollscan.com/address/0x7003E7B7186f0E6601203b99F7B8DECBfA391cf9) | The WETH gateway proxy on L2. |
+| [L2_STANDARD_ERC20_GATEWAY_IMPLEMENTATION_ADDR](https://scrollscan.com/address/0x3ffe801a43D25d0288683237A848e14f73a226f0) | The standard ERC20 gateway implementation on L2. |
+| [L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR](https://scrollscan.com/address/0xE2b4795039517653c5Ae8C2A9BFdd783b48f447A) | The standard ERC20 gateway proxy on L2. |
+| [L2_GATEWAY_ROUTER_IMPLEMENTATION_ADDR](https://scrollscan.com/address/0x3808d0F2F25839E73e0Fbf711368fC4aE80c7763) | The gateway router implementation on L2. |
+| [L2_GATEWAY_ROUTER_PROXY_ADDR](https://scrollscan.com/address/0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79) | The gateway router proxy on L2. |
+| [L2_SCROLL_STANDARD_ERC20_ADDR](https://scrollscan.com/address/0xC7d86908ccf644Db7C69437D5852CedBC1aD3f69) | The Scroll standard ERC20 contract on L2. |
+| [L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR](https://scrollscan.com/address/0x66e5312EDeEAef6e80759A0F789e7914Fb401484) | The Scroll standard ERC20 factory contract on L2. |
+| [L2_CUSTOM_ERC20_GATEWAY_IMPLEMENTATION_ADDR](https://scrollscan.com/address/0x1D40306EEfCF6EBd496d6048F6edf8892346e558) | The custom ERC20 gateway implementation on L2. |
+| [L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR](https://scrollscan.com/address/0x64CCBE37c9A82D85A1F2E74649b7A42923067988) | The custom ERC20 gateway proxy on L2. |
+| [L2_ERC721_GATEWAY_IMPLEMENTATION_ADDR](https://scrollscan.com/address/0x0894150DB82B912105F6D0907B5c69E72F1Df279) | The ERC721 gateway implementation on L2. |
+| [L2_ERC721_GATEWAY_PROXY_ADDR](https://scrollscan.com/address/0x7bC08E1c04fb41d75F1410363F0c5746Eae80582) | The ERC721 gateway proxy on L2. |
+| [L2_ERC1155_GATEWAY_IMPLEMENTATION_ADDR](https://scrollscan.com/address/0xAc92E88bAc1848A5FeEA5cf5A60e0abc3bD5Df94) | The ERC1155 gateway implementation on L2. |
+| [L2_ERC1155_GATEWAY_PROXY_ADDR](https://scrollscan.com/address/0x62597Cc19703aF10B58feF87B0d5D29eFE263bcc) | The ERC1155 gateway proxy on L2. |
+| [L2_SCROLL_OWNER_ADDR](https://scrollscan.com/address/0x13D24a7Ff6F5ec5ff0e9C40Fc3B8C9c01c65437B) | The Scroll owner contract on L2. |
+| [L2_1D_TIMELOCK_ADDR](https://scrollscan.com/address/0x2b14d0E4b042d11C7e3Fc653132a2c82EFa7d376) | The 1-day timelock contract on L2. |
+| [L2_7D_TIMELOCK_ADDR](https://scrollscan.com/address/0x98DE219A50584be7ca16A065f7714D220c0105F6) | The 7-day timelock contract on L2. |
+| [L2_14D_TIMELOCK_ADDR](https://scrollscan.com/address/0xf6069DB81239E5194bb53f83aF564d282357bc99) | The 14-day timelock contract on L2. |
+| [SCROLL_CHAIN_COMMITMENT_VERIFIER_ADDR](https://etherscan.io/address/0xC4362457a91B2E55934bDCb7DaaF6b1aB3dDf203) | The Scroll chain commitment verifier contract. |
+| [POSEIDON_UNIT2_ADDR](https://etherscan.io/address/0x3508174Fa966e75f70B15348209E33BC711AE63e) | The Poseidon unit 2 contract. |
+| [L1_BATCH_BRIDGE_GATEWAY_PROXY_ADDR](https://etherscan.io/address/0x5Bcfd99c34cf7E06fc756f6f5aE7400504852bc4) | The Batch Deposit proxy contract on L1. |
+| [L1_BATCH_BRIDGE_GATEWAY_IMPLEMENTATION_ADDR](https://etherscan.io/address/0x7999cdD5E2893475D89211A2E3FdA67a841E3233) | The Batch Deposit implementation on L1. |
+| [L2_BATCH_BRIDGE_GATEWAY_PROXY_ADDR](https://scrollscan.com/address/0xa1a12158bE6269D7580C63eC5E609Cdc0ddD82bC) | The Batch Deposit proxy contract on L2. |
+| [L2_BATCH_BRIDGE_GATEWAY_IMPLEMENTATION_ADDR](https://scrollscan.com/address/0x2c51f93E3075A007A746aa91F4BA07Aee8423b6f) | The Batch Deposit implementation on L2. |
diff --git a/src/content/docs/en/sdk/technical-stack/index.mdx b/src/content/docs/en/sdk/technical-stack/index.mdx
new file mode 100644
index 000000000..2eefe5810
--- /dev/null
+++ b/src/content/docs/en/sdk/technical-stack/index.mdx
@@ -0,0 +1,129 @@
+---
+section: sdk
+title: "Overview of Scroll SDK Stack"
+lang: "en"
+permalink: "sdk/technical-stack"
+excerpt: "An overview of the architecture and components making up Scroll SDK"
+---
+
+import Steps from '../../../../../components/Steps/Steps.astro';
+import Aside from "../../../../../components/Aside.astro"
+
+## Stack Technical Overview
+
+The architecture of a Scroll SDK chain is based on Scroll, and the architecture specifics of a Scroll SDK chain will match the base behavior of Scroll's network. For more information on how Scroll works, please see the [Technology](/en/technology) section.
+
+The articles in this section will be focus on the various parts a Scroll SDK chain operator will need to deploy and run.
+
+## Tooling
+
+### Scroll SDK Repo & Charts
+
+Scroll SDK can be found [on GitHub](https://github.com/scroll-tech/scroll-sdk). The SDK built is leveraging Kubernetes and is designed to be easy to launch and maintain for anyone familiar with Kubernetes and Helm.
+
+The repo consists of these major components:
+1. Example config files for preparing your network *(see [Configuration](/en/sdk/technical-stack/configuration))*
+1. Helm charts for deploying the necessary services and contracts *(see [Services](/en/sdk/technical-stack/services))*
+1. A docker image for building the correct configuration files for these services and gathering smart contract addresses before deployment *(see [Smart Contracts](/en/sdk/technical-stack/contracts))*
+1. An Ansible playbook for setting up a zk prover if not using a third party proof generation service *(see [Proof Generation](/en/sdk/technical-stack/proof-generation))*
+
+
+
+### Scroll SDK CLI
+
+Additionally, the [scroll-sdk-cli](https://github.com/scroll-tech/scroll-sdk-cli) tool is available to help with common automations and testing tasks. It greatly simplifies the process of creating a new Scroll SDK chain and includes a number of helpful commands for interacting with your chain.
+
+It also supports custom plugins using the [oclif framework](https://oclif.com/docs/plugins).
+
+## Scroll Proving SDK
+
+The Scroll Proving SDK is a Rust crate for integrating Scroll SDK support into your prover services. The proof generation providers should use this SDK to build their own Helm charts, allowing SDK operators to out-source proof generation.
+
+For an example service built using `scroll-proving-sdk`, see the `cloud.rs` example in the[Scroll Proving SDK](https://github.com/scroll-tech/scroll-proving-sdk/blob/haoyu/sindri_tokio/examples/cloud.rs) repo.
+
+{/* TODO: check if this branch has been merged into main */}
+
+## Deployment Process
+
+Scroll SDK has two deployment options: a local devnet and a production deployment.
+
+When deploying a local Devnet, all services are deployed by a single "chart". Configuration is minimal, and the deployment includes additional services like a hosting a database in the cluster. We assume users are working inside the `devnet` directory of a `scroll-sdk` cloned repo.
+
+In production deployments, each service is deployed as an independent chart. This is often more natural for tight control over upgrades and configuration. Production deployments assume that services like a database or monitoring stacks will be provided by the chain operator. Because of the additional modularity and flexibility, additional configuration is needed and some knowledge of Kubernetes is required. We also assume users will create a new repo for storing their production workflow and configuration files.
+
+### Devnet
+
+For a full devnet deployment walkthrough, see the [Devnet Deployment](/en/sdk/guides/devnet-deployment) guide.
+
+#### PreReqs
+
+To run a local devnet with all services running on a single machine (using minikube), you'll want the following items installed:
+
+- [Docker Engine](https://docs.docker.com/engine/install/) (or Docker Desktop)
+- [kubectl](https://kubernetes.io/docs/tasks/tools/)
+- [minikube](https://minikube.sigs.k8s.io/docs/start/) (for local dev cluster)
+- [Helm](https://helm.sh/docs/intro/install/)
+
+Docker and minikube will need to be running before starting the deployment process.
+
+
+#### Deployment
+
+
+1. Clone the Scroll SDK repo and navigate to the `./devnet` directory.
+
+ ```bash
+ git clone git@github.com:scroll-tech/scroll-sdk.git && cd scroll-sdk/devnet
+ ```
+
+1. Fetch all charts and create configuration files.
+
+ ```bash
+ make bootstrap
+ ```
+
+1. *(Optional)* Modify `./scroll-sdk/config.toml` with your settings and accounts.
+
+ For more information on modifying config.toml, see [Customization](/en/sdk/technical-stack/customization).
+
+1. *(Optional)* Modify `./scroll-sdk/values.yaml` to disable any unneeded services.
+
+ For more information on which services to set `enable:true`, see [Services](/en/sdk/technical-stack/services).
+
+1. *(Optional)* If modifications were made, run `make config` to update the configuration files.
+
+
+1. Launch the Scroll SDK services by running:
+ ```bash
+ make install
+ ```
+
+1. Wait for services to start and contracts to deploy, and you've got a new Scroll SDK chain!
+
+
+
+
+### Production
+
+For a full production deployment walkthrough, see the guides for [Digital Ocean](/en/sdk/guides/production-deployment/digital-ocean) and [AWS](/en/sdk/guides/production-deployment/aws).
+
+#### PreReqs
+
+Before getting started, be sure to install the following:
+
+- [Docker Engine](https://docs.docker.com/engine/install/) (or Docker Desktop)
+- [kubectl](https://kubernetes.io/docs/tasks/tools/)
+- [Helm](https://helm.sh/docs/intro/install/)
+
+For a production environment, you'll want to have a working Kubernetes cluster and `kubectl` configured to point to it. We assume users will create a new repo for storing their production workflow. Docker will be used locally in the configuration preparation workflow.
+
+In addition, you'll want to prepare the following items:
+- A PostgreSQL-compatible database with an admin user
+ - Up to 3 for chain services, optionally 1 more for Blockscout
+- A Kubernetes Monitor Service (i.e. Prometheus)
+- A Kubernetes Ingress Controller (i.e. Nginx)
+- A Secret Store for storing sensitive information (i.e. AWS Secrets, Hashicorp Vault) and a way to access it from Kubernetes using [External Secrets](https://external-secrets.io/latest/)
+
+More information on choosing and setting up these services for various cloud providers is provided in the [Guides](/en/sdk/guides) section.
\ No newline at end of file
diff --git a/src/content/docs/en/sdk/technical-stack/integrations.mdx b/src/content/docs/en/sdk/technical-stack/integrations.mdx
new file mode 100644
index 000000000..95cdb23a4
--- /dev/null
+++ b/src/content/docs/en/sdk/technical-stack/integrations.mdx
@@ -0,0 +1,45 @@
+---
+section: sdk
+title: "Scroll SDK Integrations"
+lang: "en"
+permalink: "sdk/integrations"
+excerpt: "An look at those building with Scroll SDK"
+---
+
+import Aside from "../../../../../components/Aside.astro"
+
+Scroll is collaborating with a number of projects to integrate their technologies with Scroll SDK. This article should help you understand additional tools and services available to chain operators.
+
+{/* TODO: Check list and add links */}
+## Rollup-As-A-Service Providers
+
+- AltLayer
+{/* - Gateway.fm */}
+- Unifra
+{/* - Caldera */}
+
+## Prover Partners
+
+A number of companies are providing proof generation services for Scroll SDK. For more information on how to use prover services, see the [Proof Generation](/en/sdk/technical-stack/proof-generation) article.
+
+- Sindri
+- Snarkify
+{/* - Gevulot */}
+{/* - Cystic */}
+
+## Block Explorers
+
+- Blockscout
+{/* - L2Scan */}
+{/* - Dora */}
+{/* - Socialscan */}
+
+## Indexers
+
+- Goldsky
+- SimpleHash
+- Hemera
+
+## Data Dashboards
+
+- SimpleHash
\ No newline at end of file
diff --git a/src/content/docs/en/sdk/technical-stack/proof-generation.mdx b/src/content/docs/en/sdk/technical-stack/proof-generation.mdx
new file mode 100644
index 000000000..4b4d41611
--- /dev/null
+++ b/src/content/docs/en/sdk/technical-stack/proof-generation.mdx
@@ -0,0 +1,99 @@
+---
+section: sdk
+date: Last Modified
+title: "Proof Generation"
+lang: "en"
+permalink: "sdk/technical-stack/proof-generation"
+excerpt: "Documents how zk proof generation is done on Scroll SDK."
+---
+
+import Aside from "../../../../../components/Aside.astro"
+
+{/* TODO: Review full page before launch */}
+Generating ZK Proofs is a key component for any ZK Rollup, and it can often be the most painful part of operating a zk rollup.
+
+## Proof Generation Flow
+
+
+## Mock Finalization
+
+Scroll SDK supports being run in two modes -- one using mock finalization, the other requiring valid zk proofs to finalize. Mock finalization is useful for devnets and testnets, where the zk proof generation is an unnecessary cost beyond brief testing periods.
+
+In its default configuration, testnets are without a ZK provers and simulate finalization. The L1 contract allows finalizing a batch without a valid proof, and the `rollup-relay` is configured to call the finalize method after 1 hour without a proof in the default configuration.
+
+To change this mock finalization delay, adjust `config.toml` to change `rollup.TEST_ENV_MOCK_FINALIZE_TIMEOUT` from `3600` to the number of seconds you want to delay.
+
+To disable mock finalization entirely, adjust `config.toml` to change `TEST_ENV_MOCK_FINALIZE_ENABLED` to `false`.
+
+
+
+## Outsourcing Proof Generation to External Service Providers
+
+Teams shouldn't need to become ZK infrastructure experts (or professional GPU sourcers) to run a chain. We've partnered with proof generation specialists to make outsoucing proof generation as easy as deploying one more chart.
+
+Using a proof generation service offers the following benefits:
+- Generate proofs on-demand through API-based services
+- Choose between enterprise solutions or proof marketplaces
+- Scale proof generation elastically based on your actual usage instead of reserving a specific capacity
+- Maintain flexibility to switch between providers, distribute load between them, or run your own GPUs
+
+The following providers already support generating Scroll SDK proofs:
+- Sindri
+ - [Chart Repo](https://github.com/Sindri-Labs/sindri-scroll-sdk)
+ - [Docs](https://sindri.app/docs/introduction/)
+- Snarkify
+ - [Chart Repo](https://github.com/snarkify/snarkify-scroll-proving-sdk/tree/main)
+ - [Docs](https://docs.snarkify.io/)
+
+{/* TODO: Confirm links to charts and docs */}
+
+
+
+See the [Enable Proof Generation using External Providers](/en/sdk/guides/digital-ocean-alt-gas-token#enable-proof-generation-using-external-providers) section of the Digital Ocean guide for an example of how to enable chunk, batch and bundle proofs using external providers.
+
+## Self-host a Prover
+
+
+
+
+
+Our automation code for deploying a prover differs from the rest of the stack. Because Kubernetes is designed to automatically manage resources, it doesn't fit as well as [Ansible](https://www.ansible.com/) for larger clusters of machines needing specific machine requirements.
+
+The Ansible playbook for running a prover is available in the GitHub repo [here](https://github.com/scroll-tech/scroll-sdk/tree/develop/ansible/playbooks).
+
+### Prequisities
+
+- One ubuntu server with at least 256GB memory, 32 cores, and a GPU with at least 20GB memory.
+- One user with `sudo` access, no password, and all permissions -- or you can change the [shared-vars.yaml](https://github.com/scroll-tech/scroll-sdk/blob/develop/ansible/playbooks/vars/shared-vars.yaml) to add the `ansible_become_password` variable in your file.
+
+### Configuration
+
+- Change the values of `rpc` for `mainnet` or `sepolia` in [shared-vars.yaml](https://github.com/scroll-tech/scroll-sdk/blob/develop/ansible/playbooks/vars/shared-vars.yaml) to your own.
+- Set the values of `release_version` and `docker_tag` -- this is determined by the `coordinator` service.
+- Set the values in [inventory](https://github.com/scroll-tech/scroll-sdk/blob/develop/ansible/playbooks/inventory/provers) for your `sepolia|mainnet' and 'chunk|batch' provers.
+- Optionally, set the values of `pj_path` in [shared-vars.yaml](https://github.com/scroll-tech/scroll-sdk/blob/develop/ansible/playbooks/vars/shared-vars.yaml) -- the default is `/prover/go-prover-docker`, but can be changed to the value you want to customize.
+
+### How to deploy your prover
+
+Be sure to set the correct value for the `export` statements below when setting environmental variables. _Do not include brackets._
+
+```bash
+export env=[mainnet|sepolia]
+export type=[chunk|batch]
+export user=[your_ssh_user]
+
+ansible-playbook --ssh-extra-args='-o StrictHostKeyChecking=no' --private-key $your_key prover-bootstrap.yaml -u $user -e env=$env -e type=$type -i inventory/provers
+
+# Reboot your prover manually, and finally launch this playbook
+ansible-playbook --ssh-extra-args='-o StrictHostKeyChecking=no' --private-key $your_key prover-deploy.yaml -u $user -e env=$env -e type=$type -i inventory/provers
+```
diff --git a/src/content/docs/en/sdk/technical-stack/services.mdx b/src/content/docs/en/sdk/technical-stack/services.mdx
new file mode 100644
index 000000000..463051222
--- /dev/null
+++ b/src/content/docs/en/sdk/technical-stack/services.mdx
@@ -0,0 +1,366 @@
+---
+section: sdk
+date: Last Modified
+title: "Scroll SDK Services"
+lang: "en"
+permalink: "sdk/technical-stack/services"
+excerpt: "The various components running to support the Scroll SDK."
+---
+
+import Aside from "../../../../../components/Aside.astro"
+
+{/* TODO: Review full page before launch */}
+
+## Overview
+
+Scroll SDK is composed of various services that work together to create a functional rollup. This article provides an overview of these services, their roles, and how they contribute to the overall architecture of Scroll SDK.
+
+We'll start by listing the services required for a minimal deployment, followed by a detailed description of each service. This information will help you understand the components of Scroll SDK and make informed decisions about which services to enable or disable based on your specific needs.
+
+In a devnet environment, services are enabled using the [values.yaml](https://github.com/scroll-tech/scroll-sdk/blob/develop/charts/scroll-sdk/values.yaml). Production environments deploy charts individually, and each service corresponds to a helm chart, as seen in this example [Makefile](https://github.com/scroll-tech/scroll-sdk/blob/develop/examples/Makefile.example).
+
+**New to Scroll’s Architecture?** Check out [this article](/en/technology/chain/rollup/) for more general info.
+
+## Helm & Kubernetes
+
+The Scroll SDK uses Helm charts and Kubernetes manifests to manage service deployments. Each service has its own configuration files that are automatically generated from the main `config.toml` file using the `scroll-sdk-cli` tool (described in [Configuration](/en/sdk/technical-stack/configuration)).
+
+The configuration flow works like this:
+
+1. Modify the main `config.toml` file with your deployment settings.
+2. The `scroll-sdk-cli` tool processes this file and generates service-specific config files
+3. These config files are mounted into the appropriate services as Kubernetes ConfigMaps
+4. Helm uses these configs along with the values files to deploy the services
+
+The key configuration files for each service are located in:
+
+- `charts/[service-name]/templates/` - Kubernetes manifests and Helm templates
+- `charts/[service-name]/values.yaml` - Default values for the service
+- `charts/[service-name]/values/production.yaml` - Production-specific values for the service, matching `examples/values/[service-name]-production.yaml`.
+
+For cloud deployments, we suggest using the `Makefile`, `config.toml` and chart values files found in the [examples directory](https://github.com/scroll-tech/scroll-sdk/tree/develop/examples) as a starting point.
+
+
+### Service-specific Config Values
+
+The `config.toml` file is used to generate various service-specific configuration files. When using the `scroll-sdk-cli`, these have the name `[service]-config.yaml` and are passed to charts as a value override of `scrollConfig` alongside the `production.yaml` files mentioned above.
+
+{/* TODO: Double check if this is how devnet works. */}
+
+Each service has a number configuration values -- some quite nuanced.
+
+In most instances, if it is not directly set or calculated from values in the `config.toml`, a sensible default is used. Occassionally, a feature most operators do not need during their initial deployment is here as well (for example LDAP configuration for the `admin-system-backend`). If you manually change these values, keep in mind that the `config.toml` processing script may overwrite your customizations -- please use git commits track changes.
+
+### Ingress
+
+By default, our charts use the `ingress-nginx` helm chart, which automatically creates ingress resources.
+
+The following services need to be exposed to clients outside of the cluster and have ingresses setup by default. In devnet, the host values end in `.scrollsdk`. If using Ingress DNS, these URLs should be accessible from the host machine, assuming a properly configured `etc/hosts` file:
+
+| Name | Host | Port |
+|---------------------|-------------------------------|------|
+| admin-system-dashboard | [admin-system-dashboard.scrollsdk](http://admin-system-dashboard.scrollsdk) | 80 |
+| blockscout | [blockscout.scrollsdk](http://blockscout.scrollsdk) | 80 |
+| blockscout *(backend)* | [blockscout-backend.scrollsdk](http://blockscout.scrollsdk) | 80 |
+| bridge-history-api | [bridge-history-api.scrollsdk](http://bridge-history-api.scrollsdk) | 80 |
+| frontends | [frontends.scrollsdk](http://frontends.scrollsdk) | 80 |
+| grafana | [grafana.scrollsdk](http://grafana.scrollsdk) | 80 |
+| l1-devnet | [l1-devnet.scrollsdk](http://l1-devnet.scrollsdk) | 80 |
+| l1-explorer | [l1-devnet-explorer.scrollsdk](http://l1-devnet-explorer.scrollsdk) | 80 |
+| l2-rpc | [l2-rpc.scrollsdk](http://l2-rpc.scrollsdk) | 80 |
+
+When using the example values files for production deployments, these hosts (and any corresponding TLS settings) are set in the service's `production.yaml` file, which is automatically configured by the `scroll-sdk-cli` tool. You will need to configure your host domain's DNS settings to point to the ingress controller or load balancer.
+
+We recommend using TLS for all services in production deployments, which can be configured in the `ingress.tls` section of the `values.yaml` file.
+
+### Secrets
+
+Scroll SDK uses a combination of [secrets](https://kubernetes.io/docs/concepts/configuration/secret/) and [configmaps](https://kubernetes.io/docs/concepts/configuration/configmap/) to manage configuration. Additionally, we use External Secrets to support storing secrets in a secret manager tools like AWS Secrets Manager or HashiCorp Vault.
+
+# Deployment Configurations
+
+Below, we describe three configurations for services:
+- **Default**: a robust, local test environment and the default `values.yaml` used by the devnet.
+- **Minimal**: the minimal required components for a testnet (with notes on possible replacements)
+- **Production**: the minimal recommended components for a mainnet, including ZK proof generation
+
+> ✅: Required
+>
+> ⚠️: Hosted options or substitutions are available.
+>
+
+| Service | Default | Minimal | Production |
+| ----------------------- | :-----: | :-----: | :--------: |
+| admin-system-backend | ✅ | | |
+| admin-system-cron | ✅ | | |
+| admin-system-dashboard | ✅ | | |
+| balance-checker | | | ⚠️ |
+| blockscout | ✅ | | |
+| bridge-history-api [^1] | ✅ | ✅ | ✅ |
+| bridge-history-fetcher [^1] | ✅ | ✅ | ✅ |
+| chain-monitor | | ✅ | ✅ |
+| contracts | ✅ | ✅ | ✅ |
+| coordinator-api [^2] | | | ✅ |
+| coordinator-cron [^2] | | | ✅ |
+| frontends | ✅ | | ⚠️ |
+| gas-oracle | ✅ | ✅ | ✅ |
+| grafana | ✅ | | ⚠️ |
+| kube-prometheus-stack | ✅ | | |
+| l1-devnet | ✅ | | |
+| l1-explorer | ✅ | | |
+| l2-bootnode | ✅ | | ✅ |
+| l2-rpc | ✅ | | ✅ |
+| l2-sequencer | ✅ | ✅ | ✅ |
+| loki-stack | ✅ | | ⚠️ |
+| postgresql | ✅ | ⚠️ | ⚠️ |
+| rollup-explorer-backend | ✅ | | |
+| rollup-node | ✅ | ✅ | ✅ |
+| rpc-gateway | | | ️ |
+{/* | scroll-monitor | ✅ | | | */}
+
+{/* TODO: add scroll-monitor and remove grafana + kube stack after PR is merged and devnet updated. */}
+
+[^1]: Services necessary for claiming funds bridged from L2 to L1 and used by bridge frontend. Could be replaced by [Bridge History SDK](https://github.com/scroll-tech/scroll-bridge-sdk) for other usage.
+[^2]: Only necessary for chains after testnet, when proof generation is needed.
+
+
+## Services Overview
+
+#### Anvil (`l1-devnet`)
+
+*Devnet only.*
+
+Foundry [Anvil](https://book.getfoundry.sh/reference/anvil/) serves as the default local base chain for devnet deployments. It provides a simulated Ethereum environment for testing and development purposes.
+
+#### Admin System Dashboard (`admin-system-dashboard`)
+
+The Admin System Dashboard is a simple Web UI for monitoring proofs in the Scroll SDK chain. Beyond giving insight into proof jobs and registered provers, it provides a high-level overview of the network's health and status, including the number of transactions, blocks, chunks, batches, and bundles.
+
+#### Admin System Backend (`admin-system-backend`)
+
+Handles the backend API for the Admin System Dashboard. Supports LDAP and 2FA.
+
+#### Admin System Cron (`admin-system-cron`)
+
+Handles the cron jobs for the Admin System.
+
+#### Blockscout (`blockscout`)
+
+[Blockscout](https://docs.blockscout.com/) is an open-source block explorer with an Indexer and WebUI configured specifically for the Scroll SDK chain. It allows users to view and interact with blockchain data in a user-friendly interface. We're working with the Blockscout team to implement more Scroll SDK specific features to better support new chain deployments.
+
+#### Bridge History API (`bridge-history-api`)
+
+The [Bridge History API](https://github.com/scroll-tech/scroll/tree/develop/bridge-history-api) is used by frontends for reporting a user's bridging history and generating withdrawal proofs for L2 → L1 bridge claims. It provides essential functionality for cross-chain operations.
+
+This service supports parallel deployments by setting the `controller.replicas` value.
+
+#### Bridge History Fetcher (`bridge-history-fetcher`)
+
+The [Bridge History Fetcher](https://github.com/scroll-tech/scroll/tree/0fd7a877cebc3be74aa4d5d2e1592a83f45ed75a/bridge-history-api) is an indexer that continuously collects all user bridging transactions. It ensures that bridging data is up-to-date and readily available for the Bridge History API.
+
+#### Balance Checker (`balance-checker`)
+
+The Balance Checker is a simple service to track and monitor the balances of some operator accounts and contracts, such as fee vaults and commit senders. Alerts happen via Slack notification.
+
+#### Chain Monitor (`chain-monitor`)
+
+The [Chain Monitor](https://github.com/scroll-tech/chain-monitor) is a security service that short-circuits batch finalization if certain invariants are not satisfied. While optional, it is recommended for enhanced security.
+
+#### Contracts (`contracts`)
+
+The Contracts service contains scripts to deploy necessary [chain contracts](https://github.com/scroll-tech/scroll-contracts) (rollup and bridge) on both L1 and L2. It ensures that the required smart contracts are in place for the Scroll SDK to function properly.
+
+#### Coordinator API (`coordinator-api`)
+
+The [Coordinator API](https://github.com/scroll-tech/scroll/tree/develop/coordinator) allows Provers to register as being open for work and manages scheduling and storage of proofs. It requires significant RAM to run and is disabled by default in the devnet.
+
+This service supports parallel deployments by setting the `controller.replicas` value.
+
+#### Coordinator Cron (`coordinator-cron`)
+
+The [Coordinator Cron](https://github.com/scroll-tech/scroll/tree/develop/coordinator) is a background job that monitors proving tasks for timeout and marks batches (aggregation tasks) as ready once all sub-proofs are ready.
+
+#### Frontends (`frontends`)
+
+[Frontends](https://github.com/scroll-tech/frontends/tree/scroll-stack) provide generic Web UIs for the Rollup Explorer, Bridge, and basic links for setting up your wallet. They offer user-friendly interfaces for interacting with the Scroll SDK chain and viewing its rollup progress.
+
+#### Gas Oracle (`gas-oracle`)
+
+The [Gas Oracle](https://github.com/scroll-tech/scroll/tree/develop/rollup) is a backend service that relays up-to-date fee information between L1 and L2 by updating the gas oracle contract on both layers. It helps maintain accurate gas pricing across the network.
+
+#### Grafana (`grafana`)
+
+*Devnet only.*
+
+{/* TODO: Remove after scroll-monitor is merged. */}
+
+[Grafana](https://grafana.com/docs/grafana/latest/) is an open-source tool for providing a Web UI for viewing metrics dashboards. It allows for visual monitoring and analysis of various system metrics and is pre-packaged with dashboards for viewing Scroll SDK information and logs from Loki.
+
+
+
+#### Rollup Explorer Backend (`rollup-explorer-backend`)
+
+The [Rollup Explorer Backend](https://github.com/scroll-tech/rollup-explorer-backend) is the backend indexer and API for supporting the Rollup Explorer page served by the Frontends service. It allows querying to see chunk and batch information from the rollup, including numbers of transactions and a batch's current finalization status.
+
+#### Rollup Node (`rollup-node`)
+
+The Rollup Node (also called the [Rollup Relayer](https://github.com/scroll-tech/scroll/tree/develop/rollup)) is a core component of the Scroll SDK architecture. It plays a crucial role in managing the rollup process, by proposing chunks and batches, commiting those to the basechain and relaying proofs for finalization.
+
+#### L1 Explorer (`l1-explorer`)
+
+*Devnet only.*
+
+The L1 Explorer is a [Blockscout]((https://docs.blockscout.com/)) instance for providing a block explorer interface for the L1 devnet service. It allows users to inspect transactions and blocks on the base layer when deployments are made to local networks.
+
+
+
+#### L2 Sequencer (`l2-sequencer`)
+
+The L2 Sequencer is the node responsible for producing L2 blocks using Clique Proof of Authority (PoA) consensus. It maintains the order of transactions on the L2 chain. It is an archive node of the network, running [`l2geth`](https://github.com/scroll-tech/go-ethereum), Scroll's fork of geth.
+
+In production deployments, we recommend running a backup sequencer node that can quickly take over for the primary sequencer in case of failure.
+
+#### L2 RPC (`l2-rpc`)
+
+The [L2 RPC node](https://github.com/scroll-tech/go-ethereum) is set up to be exposed to external RPC API consumers. It allows interaction with the L2 chain through standard Ethereum JSON-RPC calls, and incoming transactions are propogated to the mempool, to be picked up and included in blocks by the L2 Sequencer. For more information, see [Running a Scroll Node](/en/developers/guides/running-a-scroll-node/).
+
+This service supports parallel deployments by setting the `controller.replicas` value.
+
+#### L2 Bootnode (`l2-bootnode`)
+
+The [L2 Bootnode](https://github.com/scroll-tech/go-ethereum) is a dedicated node that helps synchronize additional follower nodes. It facilitates network discovery and connectivity, without being exposed to open RPC traffic.
+
+#### Loki Stack (`loki-stack`)
+
+*Devnet only.*
+
+The [Loki Stack](https://grafana.com/docs/loki/latest/) is a log aggregation system. It collects and manages logs from various services within the Scroll SDK ecosystem. By default, these logs are exposed through the Grafana UI.
+
+{/* TODO: Remove after scroll-monitor is merged. */}
+
+#### RPC Gateway (`rpc-gateway`)
+
+The RPC Gateway is a simple RPC load balancer that distributes requests among multiple L2 geth RPC nodes. It helps manage incoming RPC traffic efficiently. This does not replace the need for working with an RPC Infrastructure provider for mainnet deployments.
+
+#### PostgreSQL Database (`postgresql`)
+
+*Devnet only.*
+
+The [PostgreSQL Database](https://www.postgresql.org/) is used across services to coordinate data and tools. It provides a reliable and scalable database solution for the Scroll SDK, but can be replaced with compatible databases.
+
+{/* TODO: Remove after scroll-monitor is merged. */}
+
+#### Kube Prometheus Stack (`kube-prometheus-stack`)
+
+*Devnet only.*
+
+The Kube Prometheus Stack is a collection of Kubernetes manifests, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus Operator. It provides comprehensive monitoring and alerting capabilities for the Kubernetes cluster running the Scroll SDK.
+
+{/* TODO: Remove after scroll-monitor is merged. */}
+
+#### Database Configuration (`db`)
+
+*Devnet only.*
+
+Allows configurations for a DB outside of the default postgres service included in the stack. This provides flexibility in database setup and management for various services within the Scroll SDK ecosystem.
+
+{/* TODO: Is this even still used in devnet? */}
+
+## Cross-Service Communication
+
+The Scroll SDK uses a variety of techniques to pass information between services, with some data being onchain (both L1 and L2) and some being offchain (via p2p connections and database storage). The diagram below provides a visual overview of how services interact with one another.
+
+
+```mermaid
+graph LR
+ subgraph K[Key]
+ direction LR
+ SS[Single Service]
+ RL1{{"Hex Shape: Reads from L1"}}
+ RL2[Dashed Border: Reads from L2 Sequencer]:::L2S
+ SS -.p2p connection.- RL1
+ RL1 <-- "Writes to L1" --> RL2
+ end
+
+ classDef L2S stroke:#77b,stroke-width:2px,stroke-dasharray: 7 4 2 4
+```
+```mermaid
+%%{ init: { 'flowchart': { 'curve': 'monotoneX' } } }%%
+flowchart LR
+ L1[l1-devnet / L1 Full Node]
+
+ L1 <----> GO
+ L1 <----> RN
+ L1 <-.via browser and wallet.-> F
+
+ subgraph SSC["Scroll SDK Chain"]
+ direction LR
+
+ DB[(DB)]
+ L2{{L2-sequencer}}
+ RN{{rollup-node}}:::L2S
+ GO{{gas-oracle}}:::L2S
+
+ CM{{chain-monitor}}:::L2S
+ BHF{{bridge-history-fetcher}}:::L2S
+ CC[coordinator-cron]
+
+ L2R{{l2-rpc}}
+ L2B{{l2-bootnode}}
+ L2 -.- L2R
+ L2 -.- L2B
+
+ DB --> GO
+ DB --> CC
+ DB --> CM
+ DB --> BHF
+ DB --> RN
+ DB --> REB
+
+ CC --> CA
+ BHF --> BHA
+
+ subgraph EXT["External APIs"]
+ BHA{{bridge-history-api}}:::L2S
+ CA[coordinator-api]
+ REB[rollup-explorer-backend]
+ RG
+ BHA
+ end
+ end
+
+
+
+
+ L2R --> RG[rpc-gateway]
+
+
+
+ F{{frontends}}:::L2S
+ PR[prover]
+
+ BHA --> F
+ REB --> F
+ RG --> F
+ CA --> PR
+
+ classDef L2S stroke:#77b,stroke-width:2px,stroke-dasharray: 7 4 2 4
+ style EXT fill:#FFE0B2
+
+```
+
+{/* TODO: Assess is we want Aux services here */}
+ {/* subgraph AUX["Aux Services"]
+ direction TB
+ BC{{balance-checker}}
+ BS[blockscout]
+ GR[grafana]
+ LKI[loki]
+ end */}
+
+
+
\ No newline at end of file
diff --git a/src/pages/en/home/Navigate.astro b/src/pages/en/home/Navigate.astro
index 765074a6e..97f783106 100644
--- a/src/pages/en/home/Navigate.astro
+++ b/src/pages/en/home/Navigate.astro
@@ -4,6 +4,7 @@ import StartSvg from "~/assets/svgs/home/home-start.svg?raw"
import DevelopSvg from "~/assets/svgs/home/home-develop.svg?raw"
import TechnologySvg from "~/assets/svgs/home/home-technology.svg?raw"
import LearnSvg from "~/assets/svgs/home/home-learn.svg?raw"
+import SdkSvg from "~/assets/svgs/home/home-sdk.svg?raw"
import SectionHeader from "~/components/SectionHeader/index.astro"
const navList = [
@@ -31,6 +32,12 @@ const navList = [
content: "An introduction to the world of ZK and Scalability.",
link: "/en/learn",
},
+ {
+ icon: SdkSvg,
+ name: "SDK",
+ content: "Deploy a chain and own your blockspace.",
+ link: "/en/sdk",
+ },
]
---
diff --git a/src/pages/es/home/Navigate.astro b/src/pages/es/home/Navigate.astro
index cafd22cd6..07a37eec9 100644
--- a/src/pages/es/home/Navigate.astro
+++ b/src/pages/es/home/Navigate.astro
@@ -4,6 +4,7 @@ import StartSvg from "~/assets/svgs/home/home-start.svg?raw"
import DevelopSvg from "~/assets/svgs/home/home-develop.svg?raw"
import TechnologySvg from "~/assets/svgs/home/home-technology.svg?raw"
import LearnSvg from "~/assets/svgs/home/home-learn.svg?raw"
+import SdkSvg from "~/assets/svgs/home/home-sdk.svg?raw"
import SectionHeader from "~/components/SectionHeader/index.astro"
const navList = [
@@ -31,6 +32,12 @@ const navList = [
content: "Una introducción al mundo de ZK y de la escalabilidad.",
link: "/es/learn",
},
+ {
+ icon: SdkSvg,
+ name: "SDK",
+ content: "Lanza una cadena y administra tu propio espacio de bloques.",
+ link: "/es/sdk",
+ },
]
---
diff --git a/src/pages/tr/home/Navigate.astro b/src/pages/tr/home/Navigate.astro
index 60f311693..003c259ff 100644
--- a/src/pages/tr/home/Navigate.astro
+++ b/src/pages/tr/home/Navigate.astro
@@ -4,6 +4,7 @@ import StartSvg from "~/assets/svgs/home/home-start.svg?raw"
import DevelopSvg from "~/assets/svgs/home/home-develop.svg?raw"
import TechnologySvg from "~/assets/svgs/home/home-technology.svg?raw"
import LearnSvg from "~/assets/svgs/home/home-learn.svg?raw"
+import SdkSvg from "~/assets/svgs/home/home-sdk.svg?raw"
import SectionHeader from "~/components/SectionHeader/index.astro"
const navList = [
@@ -31,6 +32,12 @@ const navList = [
content: "ZK ve ölçeklenebilirliğin dünyasına giriş.",
link: "/tr/learn",
},
+ {
+ icon: SdkSvg,
+ name: "SDK",
+ content: "Bir zincir ayağa kaldırın ve kendi ekosisteminize sahip olun.",
+ link: "/tr/sdk",
+ },
]
---
diff --git a/src/pages/zh/home/Navigate.astro b/src/pages/zh/home/Navigate.astro
index f38124f09..ef658295d 100644
--- a/src/pages/zh/home/Navigate.astro
+++ b/src/pages/zh/home/Navigate.astro
@@ -4,6 +4,7 @@ import StartSvg from "~/assets/svgs/home/home-start.svg?raw"
import DevelopSvg from "~/assets/svgs/home/home-develop.svg?raw"
import TechnologySvg from "~/assets/svgs/home/home-technology.svg?raw"
import LearnSvg from "~/assets/svgs/home/home-learn.svg?raw"
+import SdkSvg from "~/assets/svgs/home/home-sdk.svg?raw"
import SectionHeader from "~/components/SectionHeader/index.astro"
const navList = [
@@ -31,6 +32,12 @@ const navList = [
content: "介绍 ZK 和扩容的世界",
link: "/zh/learn",
},
+ {
+ icon: SdkSvg,
+ name: "SDK",
+ content: "部署链并拥有您的区块空间。",
+ link: "/zh/sdk",
+ },
]
---
diff --git a/src/styles/index.css b/src/styles/index.css
index e9ff59c02..2f33a9317 100644
--- a/src/styles/index.css
+++ b/src/styles/index.css
@@ -62,9 +62,11 @@ nav ul {
:where(a:hover) {
@apply text-link-hover;
}
+
:where(a:focus) {
@apply text-link-pressed;
}
+
article > section :is(ul, ol) > * + * {
margin-top: 0.75rem;
}
@@ -102,6 +104,7 @@ article > section ::marker {
font-weight: bold;
@apply text-marker;
}
+
.dark article > section ::marker {
@apply text-dark-marker;
}
@@ -247,6 +250,7 @@ th {
font-weight: bold;
@apply text-black dark:text-white-800;
}
+
td,
th {
padding: 8px 30px;
@@ -309,13 +313,16 @@ h2.heading {
border-color: var(--orange-500);
color: var(--orange-500);
}
+
.header-link:focus-within {
border-left-color: hsla(var(--color-gray-40), 1);
@apply text-light;
}
+
.header-link:hover svg {
color: var(--orange-500);
}
+
.header-link:hover span {
color: var(--orange-500);
}
@@ -330,6 +337,7 @@ h2.heading {
.header-link.depth-3 {
padding-left: 2rem;
}
+
.header-link.depth-4 {
padding-left: 3rem;
}
@@ -339,6 +347,7 @@ h2.heading {
color: inherit;
text-decoration: none;
}
+
.header-link a span {
font-size: 16px;
line-height: normal;
@@ -387,9 +396,11 @@ h2.heading {
--max-width: 46em;
--fullwidth-max-width: 80rem;
}
+
.header-link {
margin-left: 1rem;
}
+
h2.heading {
font-size: 16px;
padding: 0.1rem 1rem;
@@ -404,3 +415,7 @@ h2.heading {
display: inline-block;
margin-left: var(--space-2x);
}
+
+.mermaid-diagram > img {
+ height: auto;
+}