- );
-};
diff --git a/components/UseCasesTangle.tsx b/components/UseCasesTangle.tsx
index 49748c60..db0e9947 100644
--- a/components/UseCasesTangle.tsx
+++ b/components/UseCasesTangle.tsx
@@ -53,35 +53,19 @@ export const ParticipateArea = () => {
-
-
);
};
diff --git a/components/logos/TangleLogo.tsx b/components/logos/TangleLogo.tsx
new file mode 100644
index 00000000..cdc05eeb
--- /dev/null
+++ b/components/logos/TangleLogo.tsx
@@ -0,0 +1,59 @@
+const TangleLogo = (props) => (
+
+);
+
+export default TangleLogo;
diff --git a/components/logos/WebbLogo.tsx b/components/logos/WebbLogo.tsx
deleted file mode 100644
index 89718085..00000000
--- a/components/logos/WebbLogo.tsx
+++ /dev/null
@@ -1,90 +0,0 @@
-const WebbLogo = (props) => (
-
-);
-
-export default WebbLogo;
diff --git a/pages/_meta.json b/pages/_meta.json
index 8a1f554b..6c39ef47 100644
--- a/pages/_meta.json
+++ b/pages/_meta.json
@@ -13,12 +13,12 @@
},
"home": {
"title": "Home",
- "href": "https://webb.tools/",
+ "href": "https://tangle.tools/",
"newWindow": true
},
"docs": {
"title": "Docs",
- "href": "https://docs.webb.tools/",
+ "href": "https://docs.tangle.tools/",
"newWindow": false
},
"faucet": {
@@ -36,8 +36,8 @@
"newWindow": true
},
"resourcesWhitepaper": {
- "title": "Webb Protocol Whitepaper",
- "href": "https://github.com/webb-tools",
+ "title": "Tangle Network Whitepaper",
+ "href": "https://github.com/webb-tools/tangle/blob/main/Tangle_Network_Whitepaper_V1.pdf",
"newWindow": true
},
"resourcesCommunity": {
@@ -90,7 +90,7 @@
},
"whitepaper": {
"title": "Whitepaper",
- "href": "https://eprint.iacr.org/2023/260",
+ "href": "https://github.com/webb-tools/tangle/blob/main/Tangle_Network_Whitepaper_V1.pdf",
"newWindow": true,
"display": "hidden"
},
diff --git a/pages/docs/_meta.json b/pages/docs/_meta.json
index 9cf97104..a722714e 100644
--- a/pages/docs/_meta.json
+++ b/pages/docs/_meta.json
@@ -1,15 +1,17 @@
{
"index": {
- "title": "Quickstart"
+ "title": "Overview"
},
- "tangle-network": "Tangle Network",
- "overview": "About Webb",
- "concepts": "Concepts",
- "projects": "Projects",
- "protocols": "Protocols",
- "ecosystem-roles": "Ecosystem Roles",
+ "build": "Build",
+ "node": "Node",
+ "learn": "Learn",
+ "governance": "Govern",
"community": "Community",
"faqs": "FAQs",
"glossary": "Glossary",
- "contribute": "Contribute or Submit a Bug"
+ "contribute": "Contribute or Submit a Bug",
+ "webb_docs_link": {
+ "title": "Webb Docs",
+ "href": "https://docs.webb.tools/docs"
+ }
}
diff --git a/pages/docs/tangle-network/build/_meta.json b/pages/docs/build/_meta.json
similarity index 82%
rename from pages/docs/tangle-network/build/_meta.json
rename to pages/docs/build/_meta.json
index 8d616567..24d7f2cf 100644
--- a/pages/docs/tangle-network/build/_meta.json
+++ b/pages/docs/build/_meta.json
@@ -3,5 +3,5 @@
"network-information-configuration": "Network Configuration",
"json-rpc-endpoints": "RPC Endpoints",
"pallets": "Pallets",
- "precompile-addresses": "Precompiles"
+ "precompiles": "Precompiles"
}
diff --git a/pages/docs/tangle-network/build/deploy-using-hardhat.mdx b/pages/docs/build/deploy-using-hardhat.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/deploy-using-hardhat.mdx
rename to pages/docs/build/deploy-using-hardhat.mdx
diff --git a/pages/docs/tangle-network/build/json-rpc-endpoints.mdx b/pages/docs/build/json-rpc-endpoints.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/json-rpc-endpoints.mdx
rename to pages/docs/build/json-rpc-endpoints.mdx
diff --git a/pages/docs/tangle-network/build/network-information-configuration.mdx b/pages/docs/build/network-information-configuration.mdx
similarity index 97%
rename from pages/docs/tangle-network/build/network-information-configuration.mdx
rename to pages/docs/build/network-information-configuration.mdx
index fc14eaa1..0b8b3e4c 100644
--- a/pages/docs/tangle-network/build/network-information-configuration.mdx
+++ b/pages/docs/build/network-information-configuration.mdx
@@ -2,9 +2,6 @@
title: Tangle Network - Getting Started Guide for Developers
---
-import { UseCasesArea, ParticipateArea, TestNetworkArea } from "../../../../components/UseCasesTangle";
-import FullWebbCTA from "../../../../components/FullWebbCTA";
-
# Getting Started with Tangle Network
## Networks in the Tangle Ecosystem
@@ -39,12 +36,6 @@ We will offer two types of block explorers to accommodate the Ethereum API and S
| BlockScout | EVM | [link](https://https://explorer.tangle.tools/) |
| PolkadotJS | Substrate | [link](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Frpc.tangle.tools#/explorer) |
-## Interfaces
-
-### Polkadot Apps Interface
-
-
-
## Funding Your Accounts
Before starting development on our TestNets, fund your account with tTNT tokens, strictly intended for testing purposes and devoid of real-world value. You can obtain this at [Webb Faucet](https://faucet.webb.tools/.)
diff --git a/pages/docs/tangle-network/build/pallets/_meta.json b/pages/docs/build/pallets/_meta.json
similarity index 100%
rename from pages/docs/tangle-network/build/pallets/_meta.json
rename to pages/docs/build/pallets/_meta.json
diff --git a/pages/docs/tangle-network/build/pallets/balances-and-accounts.mdx b/pages/docs/build/pallets/balances-and-accounts.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/pallets/balances-and-accounts.mdx
rename to pages/docs/build/pallets/balances-and-accounts.mdx
diff --git a/pages/docs/tangle-network/build/pallets/collectives.mdx b/pages/docs/build/pallets/collectives.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/pallets/collectives.mdx
rename to pages/docs/build/pallets/collectives.mdx
diff --git a/pages/docs/tangle-network/build/pallets/consensus-mechanism.mdx b/pages/docs/build/pallets/consensus-mechanism.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/pallets/consensus-mechanism.mdx
rename to pages/docs/build/pallets/consensus-mechanism.mdx
diff --git a/pages/docs/tangle-network/build/pallets/crosschain-pallets.mdx b/pages/docs/build/pallets/crosschain-pallets.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/pallets/crosschain-pallets.mdx
rename to pages/docs/build/pallets/crosschain-pallets.mdx
diff --git a/pages/docs/tangle-network/build/pallets/democracy.mdx b/pages/docs/build/pallets/democracy.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/pallets/democracy.mdx
rename to pages/docs/build/pallets/democracy.mdx
diff --git a/pages/docs/tangle-network/build/pallets/frontier.mdx b/pages/docs/build/pallets/frontier.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/pallets/frontier.mdx
rename to pages/docs/build/pallets/frontier.mdx
diff --git a/pages/docs/tangle-network/build/pallets/identity.mdx b/pages/docs/build/pallets/identity.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/pallets/identity.mdx
rename to pages/docs/build/pallets/identity.mdx
diff --git a/pages/docs/tangle-network/build/pallets/interoperability.mdx b/pages/docs/build/pallets/interoperability.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/pallets/interoperability.mdx
rename to pages/docs/build/pallets/interoperability.mdx
diff --git a/pages/docs/tangle-network/build/pallets/pallet-overview.mdx b/pages/docs/build/pallets/pallet-overview.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/pallets/pallet-overview.mdx
rename to pages/docs/build/pallets/pallet-overview.mdx
diff --git a/pages/docs/tangle-network/build/pallets/scheduler.mdx b/pages/docs/build/pallets/scheduler.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/pallets/scheduler.mdx
rename to pages/docs/build/pallets/scheduler.mdx
diff --git a/pages/docs/tangle-network/build/pallets/treasury.mdx b/pages/docs/build/pallets/treasury.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/pallets/treasury.mdx
rename to pages/docs/build/pallets/treasury.mdx
diff --git a/pages/docs/tangle-network/build/precompile-addresses.mdx b/pages/docs/build/precompiles.mdx
similarity index 100%
rename from pages/docs/tangle-network/build/precompile-addresses.mdx
rename to pages/docs/build/precompiles.mdx
diff --git a/pages/docs/concepts/_meta.json b/pages/docs/concepts/_meta.json
deleted file mode 100644
index 94a9fff9..00000000
--- a/pages/docs/concepts/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "anchor-system": "Anchor System",
- "distributed-key-gen": "Distributed Key Generation (DKG)",
- "tss-governance": "Threshhold Signature Scheme (TSS)"
-}
diff --git a/pages/docs/concepts/anchor-system/architecture.mdx b/pages/docs/concepts/anchor-system/architecture.mdx
deleted file mode 100644
index 5324af0b..00000000
--- a/pages/docs/concepts/anchor-system/architecture.mdx
+++ /dev/null
@@ -1,50 +0,0 @@
----
-title: System Architecture
-description: Webb Anchor System is an interoperable zero-knowledge proof based system
----
-
-import AnchorArchitecture from '../../../../components/images/AnchorArchitecture'
-
-# System
-
-Given **_G=(A,E)_** a graph of anchors and edges between them. Fix an element schema/structure **_S_** for element hashes inserted into
-the anchors’ merkle trees.
-
-1. Users insert elements as hashes of the predefined structure into anchors.
-2. Oracles listen for insertions, construct `EdgeMetadata` objects, and relay these objects to the external governance system.
-3. Oracles listen for output from the external governance system and relay this output to the target anchors.
-4. Each anchor processes received output containing `EdgeMetadata` objects and updates their internal `EdgeList`.
-5. Users generate proofs of membership of their insertions in _one-of-many_ of the anchors’ merkle trees and target submission on one anchor.
- Upon completion they submit these proofs to the relayer network.
-6. Relayers listen for delegations and submit these proofs to their target anchor, where they are verified on-chain.
-
-## Architecture of an Anchor System Instance
-
-Below is an illustration of how an Anchor System instance interacts with other network participants, namely a governing distributed
-key generation protocol, and the oracle / relayer network. We model the external governance system as a DKG which generates threshold
-signatures of proposals, specifically of EdgeMetadata objects. These signed proposals are used to update the anchors on each compatible blockchain.
-
-
-
-## Extra Resources
-
-### History
-
-The Webb ecosystem combines multiple architectures to create an interoperable privacy set to enable cross-chain private transactions with scalable privacy and further to inspire the more general
-Anchor System spec.
-
-Our goal was to merge and augment both the Chainbridge and other privacy protocols as well as increase the relayer responsibilities to design
-our private bridge protocol. The modifications can be summarized as:
-
-- Augmenting Anchors to additionally track an edge list to other Anchors.
- - Anchors are private transaction systems that function both as pool but also as cross-chain bridges.
- - Anchors use a zero-knowledge one-of-many merkle tree membership proof instead of one for a single merkle tree.
- - Anchors can mint/burn the underlying token being deposited and withdrawn.
-- Augmenting ChainBridge's Bridge into a private bridge protocol.
- - We create a new AnchorHandler which modifiers the edge list of Anchors needing updates.
- - We augment the Bridge relayers with a multi-party threshold signing scheme, so that one or many threshold networks can govern the bridge.
-
-## Further reading
-
-- [Chainbridge Documentation](https://chainbridge.chainsafe.io/)
-- [Chainbridge Implementation](https://github.com/ChainSafe/ChainBridge)
diff --git a/pages/docs/concepts/anchor-system/overview.mdx b/pages/docs/concepts/anchor-system/overview.mdx
deleted file mode 100644
index eb806174..00000000
--- a/pages/docs/concepts/anchor-system/overview.mdx
+++ /dev/null
@@ -1,139 +0,0 @@
----
-title: Overview
-description: Webb Anchor System is an interoperable zero-knowledge proof based system
----
-
-import AnchorSystem from '../../../../components/images/AnchorSystem'
-import AnchorRelayerSystem from '../../../../components/images/AnchorRelayerSystem'
-
-## Overview
-
-The Anchor System is a bridged system between a set of identical smart contract protocols.
-
-The smart contract protocol (or pallet protocol) defines a system for connecting merkle trees on different
-blockchains in a graph-like framework so that anchors maintain the latest state of their neighboring anchors.
-This state is utilized to prove the existence of data in any anchor from any anchor. The primary goal of the
-Anchor System is to satisfy 2 properties:
-
-- **Liveness** - all anchors can continue to be updated in a partially-synchronous environment
-- **Safety** - all anchor updates are guaranteed to be valid
-
-## Data Structures
-
-Each anchor maintains an on-chain merkle tree and an edge list to the latest valid neighboring anchors’ merkle
-tree roots. Each anchor updates its edges of its neighbors using an external governance system, such as a
-single-signer, multi-signature, threshold-signature, or alternative voting / messaging protocol.
-
-An edge is a directed connection between 2 anchors. In the language of graphs. A bridge is a
-graph **_G=(A, E)_** where **_A_** is the set of anchor nodes and **_E_** is the set of directed edges between
-them. We represent this data structure using an `Edge` or `EdgeMetadata` that looks like:
-
-```rust
-// An edge for a specific chain
-pub struct EdgeMetadata {
- /// chain id of the connected merkle tree
- pub src_chain_id: ChainID,
- /// root of source chain anchor's native merkle tree
- pub root: Element,
- /// height of source chain anchor's native merkle tree
- pub latest_leaf_index: LastLeafIndex,
- /// target system of the anchor (contract address or tree id)
- pub target: Element,
-}
-```
-
-### Anchor State
-
-Each anchor maintains an edge list containing `EdgeMetadata` objects and a history of the merkle root updates
-up to some bounded limit.
-
-```rust
-/// An example of an edge list data structure
-struct EdgeList {
- /// Map of edges (
- pub edges: HashMap
- /// Map of historical roots (chain -> index -> merkle root)
- pub history: HashMap>
-}
-```
-
-### Elements of an Anchor
-
-The elements of an anchor are intended to be 32-byte hashes of arbitrary data. The preimage to these hashes
-represent the underlying application for a connected bridge of anchors. For each unique element being inserted
-into an anchor, we assume the existence of a zero-knowledge program that verifies knowledge of the preimage of
-the element.
-
-Oftentimes these hashes are hashes of structured data that are relevant for an end-user application. For example,
-it might be the hash of a [unspent transaction output (UTXO)](https://en.wikipedia.org/wiki/Unspent_transaction_output)
-in the tree that allows someone to prove knowledge of a UTXO in one anchor from a different anchor directly.
-
-
-
-
-
-## Zero-knowledge Membership
-
-The main utility of the Anchor System is the ability to prove membership of elements in any anchor on a bridge in
-zero-knowledge. These proofs of membership disclose no more data than is necessary to prove that _there exists an
-element with some defined preimage structure in one-of-many of the anchors._
-
-The primary sketch for a zero-knowledge membership program and proof in the Anchor System is:
-
-1. A proof of knowledge of preimage of an element in the anchor.
-2. A proof of knowledge of membership of an element in a merkle tree.
-3. A proof of knowledge of membership of a merkle root in a set of merkle roots.
-
-The membership proof here is unique for a given element but need not be in practice. That is, we could envision
-an Anchor System where multiple element types are supported and multiple zero-knowledge programs are provided
-for verification of membership proofs. For example, we may consider an Anchor System over a UTXO a decentralized
-identity ([DID](https://www.w3.org/TR/did-core/)) elements together.
-
-## Oracle Network
-
-The Anchor System encompasses a system with a variety of agents and requires some of these agents to play the role
-of **witnessing** and **relaying** changes across all anchors to each other and to the external governance system.
-We call these agents **oracles** and define their roles as:
-
-- Listening for new insertions into anchors’ merkle trees.
-- Composing the `EdgeMetadata` objects from these observations.
-- Relaying these `EdgeMetadata` objects to the governing system of the Anchor System instance.
-- Relaying the output of the governing system back to the respective anchors.
-
-The oracles of the Anchor System instance are tightly coupled with the external governance system being used. For more
-information on how these oracles are used in practice can be found [here](https://github.com/webb-tools/relayer).
-
-
-
-
-
-## Relayer Network
-
-Other agents in the Anchor System are responsible with preserving privacy for users interacting with the anchor instances.
-While we use the term relaying/relayer interchangeably, we remark here on a specific relaying role that involves the relaying
-of zero-knowledge proof data to their target system for users.
-
-In any blockchain context, users are required to possess funds in order to interact and transact with smart contracts.
-Therefore, if a privacy-interested user intends to prove membership of some element’s preimage in one of the anchors’
-merkle trees, they **must** be able to do so without compromising their privacy by requiring them to submit these proofs
-themselves. They **must** be able to delegate the submission of their proof through some overlay network.
-
-The relayer network provides this delegation service. Anyone can run a relayer and provide this service. We consider an agent
-a relayer if they expose an API that enables the submission of zero-knowledge proofs of membership for specific anchor
-instances and if this agent submits the relevant transaction on-chain on behalf of the user delegating their proof through this API.
-
-Relayer incentives are key component of ensuring privacy for end users utilizing the applications built over the Anchor System
-instance. Incentives are highly coupled to the applications implemented. For example, for a private bridge application, users can
-pay a fee to relayers by committing to a specified fee at proof generation which is extracted and transferred out of the users
-transfer to the relayer’s account.
-
-## Oracle & Relayer Similarities
-
-In practice, the oracle and relayers roles are synergistic and often taken out by the same participant in the protocol. While we
-remark about these roles separately, we often consider them as being undertaken by the same party. We use the terms interchangeably
-to represent the network that provides **both** relaying and oracle services in the Anchor System.
-
-### Further reading
-
-- [Merkle Tree](https://en.wikipedia.org/wiki/Merkle_tree)
-- [Unspent Transaction Output](https://en.wikipedia.org/wiki/Unspent_transaction_output)
diff --git a/pages/docs/concepts/anchor-system/signature-bridge.mdx b/pages/docs/concepts/anchor-system/signature-bridge.mdx
deleted file mode 100644
index 46dfc01c..00000000
--- a/pages/docs/concepts/anchor-system/signature-bridge.mdx
+++ /dev/null
@@ -1,100 +0,0 @@
----
-title: Signature Bridge
-description: Webb Anchor System is an interoperable zero-knowledge proof based system
----
-
-# Signature Bridge
-
-The purpose of the signature bridge is many-fold:
-
-1. Verification of message signatures by the DKG
-2. Executing the message's execution data through the respective **Handler**.
-3. Setting new resource IDs to their **Handlers**.
-
-## API
-
-A signature bridge should support a simple API for setting resource IDs and executing proposals:
-
-- `adminSetResourceWithSignature` - sets a new resource ID mapping to its respective **Handler**.
-- `executeProposalWithSignature` - verifies the proposal's signature against the governor and executes the proposal through a **Handler**.
-
-The Signature Bridge knows which handler to route and execute a proposal through using a mapping between **Resource Ids** and **Handlers**. This mapping lives on the **Signature Bridge**.
-
-## Execution Handlers
-
-The purpose of execution handlers is to parse an incoming proposal/message and call a function on a proxy smart contract. The flow looks like:
-
-- Signature Bridge -> Execution Handler -> Execution Context
-
-The Execution Handler knows which contract to interact with using a mapping between **Resource Ids** and **Execution Contexts**. This map lives on the respective **Handler**.
-
-### Anchor Handler
-
-The purpose of the Anchor Handler is to execute parameter updates for an Anchor contract.
-
-Examples of parameters that we may want to update:
-
-- The new `EdgeMetadata` for a neighboring anchor.
-- The handler controlling the anchor.
-- The verifier for the zero-knowledge proofs.
-
-### Token Wrapper Handler
-
-The purpose of the Token Wrapper Handler is to execute parameter updates for an Token Wrapper contract.
-
-Examples of parameters that we may want to update:
-
-- The fee for wrapping tokens.
-- The tokens that are allowed to be wrapped.
-- The amount of tokens one receives after wrapping.
-- The handler controlling the token wrapper.
-
-### Treasury Handler
-
-The purpose of the Treasury Handler is to execute parameter updates for a Treasury contract.
-
-Examples of parameters that we may want to update:
-
-- The recipient of fees.
-- The recipient of interest on the treasury.
-- The handler controlling the treasury.
-
-## Resource Ids
-
-Resource IDs are identifiers of a target system and the chain that the target system lives on. Examples of these contains:
-
-- The anchor contract address and the typed chain ID of the chain this contract is deployed on.
-- The anchor merkle tree ID and the typed chain ID of the chain this pallet is deployed on.
-- The token wrapper contract address and the typed chain ID of the chain this contract is deployed on.
-
-Resource IDs are used in proposals that are sent to the DKG or any external signing/governance system. They are inserted into the proposal headers.
-
-### Usage
-
-When we create a new contract / target system that is meant to be controlled by the Signature Bridge we must first create its resource ID. This is straightforward to do, simply:
-
-1. Take the contract's address / identify a numerical identifier for the storage instance (such as a tree ID).
-2. Take the typed chain ID where this target system will live.
-3. Concatenate these values in a 32-byte value.
-
-#### Example
-
-An example in Rust can be found [here](https://github.com/webb-tools/webb-rs/blob/main/proposals/src/header.rs#L215).
-
-```rust
-pub fn new(
- target_system: TargetSystem,
- typed_chain_id: TypedChainId,
-) -> ResourceId {
- let mut bytes = [0u8; 32];
- let target_system_bytes: [u8; TargetSystem::LENGTH] =
- target_system.into();
- let f = 0;
- let t = TargetSystem::LENGTH;
- bytes[f..t].copy_from_slice(&target_system_bytes);
- let f = t;
- let t = t + TypedChainId::LENGTH;
- bytes[f..t].copy_from_slice(&typed_chain_id.into_bytes());
- Self(bytes)
-}
-```
diff --git a/pages/docs/ecosystem-roles/_meta.json b/pages/docs/ecosystem-roles/_meta.json
deleted file mode 100644
index 1c899241..00000000
--- a/pages/docs/ecosystem-roles/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "role-overview": "Overview of Roles",
- "relayer": "Relayer",
- "validator": "Validator"
-}
diff --git a/pages/docs/ecosystem-roles/relayer/_meta.json b/pages/docs/ecosystem-roles/relayer/_meta.json
deleted file mode 100644
index 423a0f8f..00000000
--- a/pages/docs/ecosystem-roles/relayer/_meta.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "overview": "Overview",
- "running-relayer": "Running a Relayer",
- "private-tx": "Private Transaction Relaying",
- "anchor-update": "Anchor Update",
- "data-querying": "Data Querying",
- "roadmap": "Roadmap"
-}
diff --git a/pages/docs/ecosystem-roles/relayer/anchor-update.mdx b/pages/docs/ecosystem-roles/relayer/anchor-update.mdx
deleted file mode 100644
index 35bfc155..00000000
--- a/pages/docs/ecosystem-roles/relayer/anchor-update.mdx
+++ /dev/null
@@ -1,75 +0,0 @@
----
-title: Signature Relaying
-description: A description of Webb relayer signature relaying capabilities.
----
-
-import Callout from "../../../../components/Callout";
-
-# Anchor Update Relaying
-
-As part of the Webb Protocol, the Anchor Update Relayer performs a crucial role in both proposing and relaying signed data payloads. This function is central to the Anchor System's operations and governance.
-
-## Role Components
-
-1. **Proposing:** The relayer acts as the chief proponent of anchor updates, which are sent to the Distributed Key Generation protocol (DKG) for signing. When new data is inserted into the Merkle trees of the Anchors and VAnchors, the relayer formulates an update proposal for the DKG's consideration and potential signing.
-
-2. **Relaying:** Once the anchor updates are signed, the relayer is responsible for submitting these signed payloads to the smart contract SignatureBridges that verify and process the proposals. The relayer also conveys these payloads to the appropriate SignatureBridge instances or Governable instances.
-
-To summarize, the relayer's duties towards the DKG can be described as follows:
-
-- Proposing updates intended to be signed by the DKG
-- Listening to and proposing updates for consideration
-- Facilitating the DKG's signing of these updates via a threshold-signature scheme
-
-A consensus among a threshold of relayers is required to move the update into a queue for the DKG's signing.
-
-# Customizing Your Participation
-
-The relayer network is designed with flexibility in mind. You can customize your relayer to support only certain functions as per your preference. For instance, you might choose to disable governance relaying and data querying, focusing solely on private transaction relaying.
-
-Please be aware that by default, all functions - including governance relaying, private transaction relaying, and data querying - are enabled. Adjustments will need to be made manually should you wish to specialize your relayer's operations.
-
-To set up the relayer exclusively for data proposing and signature relaying we need to add the following fields to our configuration file, under the
-`[evm..contracts]` section:
-
-### 1. Add the Required Fields to your configuration file:
-
-- **`contract`**: Name of the contract to support (e.g. "VAnchor", "SignatureBridge")
-- **`address`**: Address of the contract
-- **`events-watcher`**: Event watcher should be set to `true` for this strategy and include a specified polling interval
-- **`proposal-signing-backend`**: Indicates how proposals are signed (e.g. "Mocked", "DKG")
-- **`linked-anchors`**: Entries for this anchor contract's connected edges. These fields are used to determine the generation of AnchorUpdate proposals
-
-### 2. Under the `[features]` section we need to turn off data-querying and private-relaying.
-
-- **`data-query`**: Should be set to `false` for this strategy
-- **`governance-relay`**: Should be set to `true` for this strategy (set to `true` by default)
-- **`private-relay`**: Should be set to `false` for this strategy
-
-# Example
-
-An example configuration file for the Goerli network that is configured for governance relaying:
-
-
- You will need to update the linked-anchors and contract addresses for the applicable chains.
-
-
-```sh filename="config file" copy
-[[evm.goerli.contracts]]
-contract = "VAnchor"
-address = "0x03b88eD9Ff9bE84e4baD3F55D67AE5ABA610523C"
-events-watcher = { enabled = true, polling-interval = 10000 }
-proposal-signing-backend = { type = "DKG", private-key = "$GOVERNOR_PRIVATE_KEY" }
-linked-anchors = [
- { chain = "ropsten", chain-id = "3", address = "0x66e04f6ae26c310e39f5bf24d873909e6d3b64c7" },
- { chain = "rinkeby", chain-id = "4", address = "0x91127f21d63029eb5b2de05b4b1e9fd3497ee95b"},
- { chain = "polygontestnet", chain-id = "80001", address = "0x1371efed369498718bee3eb5d58e5d3dec86be85" },
- { chain = "optimismtestnet", chain-id = "69", address = "0x5353cede4b8fea148fb1f66f45d3ec27bff2224d" },
- { chain = "arbitrumtestnet", chain-id = "421611", address = "0x4953110789d0cb6de126f4ea88890670ccfe6906" },
-]
-
-[features]
-private-relay = false
-data-query = false
-governance-relay = true
-```
diff --git a/pages/docs/ecosystem-roles/relayer/data-querying.mdx b/pages/docs/ecosystem-roles/relayer/data-querying.mdx
deleted file mode 100644
index ecdd2392..00000000
--- a/pages/docs/ecosystem-roles/relayer/data-querying.mdx
+++ /dev/null
@@ -1,53 +0,0 @@
----
-title: Relayer Data Querying
-description: An outline of Webb relayer data querying capabilities.
----
-
-import Callout from "../../../../components/Callout";
-
-# Relayer Data Querying
-
-If you wish to only support certain functions within the relayer network you can configure the relayer to only support those functions.
-For instance, you can turn off governance relaying and data querying and only relay private transactions. It is important to note
-that governace relaying, private transaction relaying and data querying are all on by default.
-
-To set up the relayer exclusively for data querying we need to add the following fields to under the
-`[evm..contracts]` section:
-
-### Required Fields
-
-- **`contract`**: Name of the contract to support (e.g. "VAnchor", "SignatureBridge")
-- **`address`**: Address of the contract
-- **`events-watcher`**: Event watcher should be set to `true` for this strategy and include a specified polling interval
-- **`linked-anchors`**: Entries for this anchor contract's connected edges. These fields are used to determine the generation of AnchorUpdate proposals
-
-Under the `[features]` section we need to turn off data-querying and private-relaying.
-
-- **`data-query`**: Should be set to `true` for this strategy (set to `true` by default)
-- **`governance-relay`**: Should be set to `false` for this strategy
-- **`private-relay`**: Should be set to `false` for this strategy
-
-An example configuration file for the Goerli network that is configured for governance relaying:
-
-
- You will need to update the linked-anchors and contract addresses for the applicable chains.
-
-
-```sh filename="config file" copy
-[[evm.goerli.contracts]]
-contract = "VAnchor"
-address = "0x03b88eD9Ff9bE84e4baD3F55D67AE5ABA610523C"
-events-watcher = { enabled = true, polling-interval = 10000 }
-linked-anchors = [
- { chain = "ropsten", chain-id = "3", address = "0x66e04f6ae26c310e39f5bf24d873909e6d3b64c7" },
- { chain = "rinkeby", chain-id = "4", address = "0x91127f21d63029eb5b2de05b4b1e9fd3497ee95b"},
- { chain = "polygontestnet", chain-id = "80001", address = "0x1371efed369498718bee3eb5d58e5d3dec86be85" },
- { chain = "optimismtestnet", chain-id = "69", address = "0x5353cede4b8fea148fb1f66f45d3ec27bff2224d" },
- { chain = "arbitrumtestnet", chain-id = "421611", address = "0x4953110789d0cb6de126f4ea88890670ccfe6906" },
-]
-
-[features]
-private-relay = false
-governance-relay = false
-data-query = true
-```
diff --git a/pages/docs/ecosystem-roles/relayer/managing-liabilities.mdx b/pages/docs/ecosystem-roles/relayer/managing-liabilities.mdx
deleted file mode 100644
index 67f33114..00000000
--- a/pages/docs/ecosystem-roles/relayer/managing-liabilities.mdx
+++ /dev/null
@@ -1,175 +0,0 @@
-# Managing liabilities and compliance needs for Relayers
-
-**Before reading this, be sure to read and understand the [Relayer Overview.](/docs/ecosystem-roles/relayer/overview.mdx)**
-
-## Disclaimer
-
-This guide is intended to provide a general overview of the regulatory landscape, potential liabilities, risks, and mitigation strategies associated with operating a relayer in the Webb Protocol system. It is not intended to provide legal advice and should not be used as a substitute for seeking professional legal counsel.
-
-The information contained in this guide is provided on an "as is" basis, without any guarantees of completeness, accuracy, or timeliness, and without warranties of any kind, express or implied. We strive to provide useful and accurate information, but laws and regulations vary by jurisdiction and are subject to change. The applicability of specific laws and regulations may depend on your individual circumstances.
-
-While we have made every attempt to ensure the information in this guide was correct at the time of publication, we are not responsible for any errors or omissions, or for the results obtained from the use of this information.
-
-No reader should act or refrain from acting based on the information in this guide without first obtaining advice from a qualified professional. In no event shall we be liable for any loss or damage (including, without limitation, direct, indirect, or consequential loss or damage) arising from the use of the information in this guide.
-
-We strongly recommend that all potential relayers consult with a qualified legal professional who is familiar with the blockchain and cryptocurrency industry in their jurisdiction. This will ensure you receive advice tailored to your specific circumstances and stay in compliance with all relevant laws and regulations.
-
-## Regulatory Landscape
-
-Operating a relayer in the Webb Protocol system can bring with it an obligation to comply with several laws and regulations. This section aims to provide an overview of the regulatory landscape pertinent to relayer activities, though it is essential to consult with legal professionals for advice tailored to individual circumstances.
-
-**1. Overview of Regulatory Environment**
-
-The regulatory environment for blockchain and cryptocurrency activities varies greatly worldwide, with some jurisdictions welcoming the technology and others imposing strict controls or outright bans. Understanding the regulatory implications in the region where you operate is vital, as failure to comply with local laws can result in severe penalties.
-
-**2. Anti-Money Laundering (AML) and Know Your Customer (KYC) Regulations**
-
-AML and KYC regulations are designed to prevent illegal activities such as money laundering and terrorist financing. While the Webb Protocol itself provides privacy-enhanced transactions, it doesn't exempt relayers from compliance with these rules. AML/KYC rules typically involve verifying the identity of users, monitoring transactions, and reporting suspicious activities to relevant authorities. Non-compliance with these regulations could result in significant fines and other legal consequences.
-
-**3. Securities Laws**
-
-Cryptocurrencies can sometimes fall under the classification of securities, depending on their usage and characteristics. In the US, for instance, the Howey Test is often applied to determine if a cryptocurrency functions as a security. If tokens used or relayed in the Webb Protocol system are classified as securities, it may impose additional regulatory burdens on relayers, such as registration and disclosure requirements.
-
-**4. Data Privacy Laws**
-
-Privacy laws such as the EU's General Data Protection Regulation (GDPR) and the California Consumer Privacy Act (CCPA) impose strict rules on the collection and processing of personal data. While the role of relayers in the Webb Protocol does not typically involve handling personal data, any ancillary services provided by the relayer that involve collecting, storing, or processing personal data will be subject to these regulations.
-
-**5. Blockchain-Specific Regulations**
-
-As blockchain technology continues to evolve, many jurisdictions are introducing laws and regulations that specifically address blockchain and cryptocurrency activities. These range from licensing requirements for certain types of activities, reporting obligations for transactions over a certain value, and rules concerning the security and custody of digital assets.
-
-**6. Compliance and Regulatory Updates**
-
-Given the rapidly evolving nature of the blockchain and cryptocurrency industry, regulatory changes are frequent. It is crucial for relayers to stay up to date with the current laws and regulations in their respective jurisdictions. Regular consultation with legal advisors, attendance at industry conferences and webinars, and subscribing to industry-specific news feeds are among the effective ways of staying informed of relevant regulatory updates.
-
-Understanding the regulatory landscape is critical for any entity operating in the blockchain and cryptocurrency space. Failure to comply with applicable laws and regulations can lead to substantial penalties and reputational damage. As such, potential relayers should seek legal advice to ensure their operations align with all relevant legal requirements.
-
-## Liabilities and Risks
-
-Operating a relayer in the Webb Protocol system involves potential liabilities and risks. This section will provide an overview of these risks, though it is not exhaustive, and operators are urged to seek professional advice to understand all potential liabilities and risks.
-
-**1. Regulatory Risks**
-
-The rapidly evolving nature of regulations in the blockchain and cryptocurrency industry presents a significant risk to operators. Changes in law could result in new compliance requirements or render certain operations illegal. To manage this risk, relayers should seek legal advice and continually monitor the regulatory landscape.
-
-**2. Legal Liabilities**
-
-Potential legal liabilities could arise from a variety of scenarios. For example, if a relayer is used to facilitate transactions involving illicit funds, the operator could potentially be implicated in money laundering activities. Additionally, if a relayer is found to be operating in violation of securities laws, they could face lawsuits from regulatory authorities or private individuals.
-
-**3. Technological Risks**
-
-Relayers face risks associated with the technology they rely on. Vulnerabilities in the software could lead to theft of tokens, interruption of service, or other damages. Therefore, operators must ensure they use up-to-date and secure software and implement strong security measures.
-
-**4. Privacy Risks**
-
-While the Webb Protocol itself is designed for privacy-enhanced transactions, relayers could potentially be used to de-anonymize transactions, especially if combined with other data. Such actions could result in legal repercussions under data protection laws. Therefore, operators must be careful to respect user privacy and adhere to all applicable privacy laws.
-
-**5. Financial Risks**
-
-Relayers might also face financial risks. Changes in the value of the tokens used within the Webb Protocol could affect the profitability of operating a relayer. Similarly, if a relayer operator invests significant funds into setting up and maintaining their operation, they could lose their investment if they are unable to generate sufficient returns.
-
-**6. Reputational Risks**
-
-Non-compliance with regulations, involvement in illicit activities, or failure to provide reliable service could harm a relayer's reputation. In a competitive market, reputation is critical to attracting and retaining users. Consequently, operators should ensure they operate ethically and reliably to maintain a strong reputation.
-
-## Mitigation Strategies
-
-To manage the potential liabilities and risks involved in operating a relayer, it's essential to develop and implement effective mitigation strategies. Here are some suggested strategies, but please note that they should be customized to your specific circumstances and legal jurisdiction:
-
-**1. Regular Legal Consultations**
-
-It's crucial to work closely with legal advisors who are knowledgeable about blockchain and cryptocurrency regulations in your jurisdiction. They can provide guidance on compliance with current laws and update you on any changes in the regulatory landscape that may affect your operations.
-
-**2. Compliance Programs**
-
-Develop comprehensive AML/KYC compliance programs to ensure that your operations do not facilitate illegal activities. Depending on your jurisdiction and the nature of your operations, this might involve implementing user identification processes, monitoring transactions for suspicious activities, and reporting certain information to regulatory authorities.
-
-**3. Technological Safeguards**
-
-Implement robust security measures to protect your systems from cyber threats. This could include the use of firewalls, intrusion detection systems, and encryption. Regular software updates and security audits are also essential to ensure that your systems remain secure against evolving threats.
-
-**4. Data Privacy Protection**
-
-Respect for user privacy is paramount. Even though relayers are not typically handling personal data, any ancillary services provided should be designed with privacy in mind. Familiarize yourself with data protection laws in your jurisdiction and ensure your operations comply with them.
-
-**5. Financial Management**
-
-Sound financial management practices can help mitigate financial risks. Keep a close eye on the market dynamics of the tokens used in the Webb Protocol, and ensure that your operations remain profitable even if token values fluctuate.
-
-**6. Ethical Operations**
-
-Maintaining a strong reputation requires operating ethically and reliably. Ensure your services are reliable and that you communicate clearly and transparently with users. If any issues arise, deal with them promptly and fairly to maintain users' trust.
-
-**7. Insurance Coverage**
-
-Consider obtaining insurance coverage to protect against potential losses from legal liabilities, cyber-attacks, or other risks. Consult with an insurance broker familiar with the blockchain and cryptocurrency industry to understand what coverage options are available to you.
-
-Developing effective mitigation strategies is a key part of managing the liabilities and risks of operating a relayer. Regularly reviewing and updating these strategies is also important, as the operational and regulatory environment can change rapidly. As always, seek professional advice when developing these strategies to ensure they are comprehensive and comply with all relevant laws and regulations.
-
-## **Regulatory Examples**
-
-While the regulations governing the blockchain and cryptocurrency sector vary significantly across different jurisdictions, here are some examples that can illustrate the diversity in the regulatory landscape:
-
-**1. United States**
-
-In the United States, the regulation of digital assets and blockchain technology is complex and involves multiple agencies. The Securities and Exchange Commission (SEC) has jurisdiction over securities, and it has argued that some tokens can be considered securities under the Howey Test. The Commodity Futures Trading Commission (CFTC) has jurisdiction over commodities and futures and has classified Bitcoin as a commodity.
-
-Simultaneously, the Financial Crimes Enforcement Network (FinCEN) mandates that money transmitters (which could include some relayers) must register with the federal government and follow AML/KYC regulations. Additionally, each state has its own regulations regarding money transmission.
-
-**2. European Union**
-
-The EU has proposed a comprehensive framework for digital assets and blockchain, known as the Markets in Crypto Assets (MiCA) regulation. MiCA, once in effect, will set clear rules for crypto assets not currently covered by EU financial services legislation. It aims to ensure consumer and investor protection and market stability, while supporting innovation. MiCA would establish specific requirements for issuers and service providers, and it includes provisions on governance, capital requirements, information disclosures, and investor rights.
-
-**3. China**
-
-China has taken a stricter stance on cryptocurrencies. While it hasn't banned owning cryptocurrencies, it has prohibited financial institutions and payment companies from providing services related to cryptocurrency transactions. China has also clamped down on mining operations and warned against speculative trading.
-
-**4. Singapore**
-
-Singapore has established itself as a hub for blockchain and digital asset businesses, with the Monetary Authority of Singapore (MAS) providing clear guidance on digital assets. Under the Payment Services Act, digital payment token services (which could include some relayers) must hold a license from MAS and comply with AML/CFT requirements.
-
-**5. Switzerland**
-
-Switzerland has a supportive regulatory environment for blockchain and digital assets, with specific guidelines on initial coin offerings (ICOs) and a classification system for digital assets. The Swiss Financial Market Supervisory Authority (FINMA) classifies tokens into payment tokens, utility tokens, and asset tokens, each with different regulatory implications.
-
-These examples underscore the diversity in the regulatory environment for blockchain and digital assets worldwide. Regulations can change quickly and vary significantly by jurisdiction, emphasizing the importance for potential relayers to seek professional legal advice specific to their situation and jurisdiction.
-
-## Illustrative Cases
-
-To help understand the potential liabilities and risks involved in operating a relayer in the Webb Protocol system, let's consider some hypothetical scenarios:
-
-**1. Case of Money Laundering**
-
-Suppose a relayer facilitates a series of transactions involving a large amount of cryptocurrency. Later, it's discovered that these transactions were part of a money-laundering scheme. The operator failed to implement adequate AML/KYC procedures, making them unable to identify the suspicious nature of these transactions.
-
-In this case, the operator could be held legally responsible for facilitating illegal activities, leading to potential fines, legal repercussions, and damage to the relayer's reputation. It underscores the importance of implementing rigorous AML/KYC measures and continuously monitoring transactions for suspicious activities.
-
-**2. Case of Regulatory Misclassification**
-
-Imagine a relayer operating in a jurisdiction where specific tokens are classified as securities. However, the operator, believing these tokens to be utility tokens, doesn't comply with securities regulations, resulting in unregistered securities transactions.
-
-Once the regulatory authority discovers this, the operator could face severe penalties for violating securities laws, including fines, forced disgorgement, and potential legal action from parties affected by the unregistered transactions. It highlights the necessity of understanding the regulatory classifications of tokens in your jurisdiction and the legal implications of these classifications.
-
-**3. Case of Technological Breach**
-
-Consider a relayer that becomes the target of a sophisticated cyber-attack. The attacker exploits a software vulnerability and manages to disrupt the relayer's operation, leading to significant downtime. Meanwhile, users' transactions are affected, leading to financial losses and user dissatisfaction.
-
-This scenario could result in legal action from affected parties, damage to the relayer's reputation, and loss of users. It demonstrates the need for robust security measures, regular software updates, and a comprehensive disaster recovery plan to mitigate the impact of potential cyber-attacks.
-
-**4. Case of Privacy Violation**
-
-Suppose a relayer provides ancillary services that involve the collection and storage of personal data. Due to a misunderstanding of data protection regulations, the operator fails to implement appropriate measures to protect this data, leading to a breach of users' privacy.
-
-In this case, the operator could face significant penalties under data protection laws, such as GDPR or CCPA, and a severe loss of trust from users. It serves as a reminder that any services involving personal data must comply with data protection laws, and user privacy should be respected at all times.
-
-These hypothetical cases provide examples of how the failure to properly manage the regulatory landscape, comply with legal obligations, implement technological safeguards, and respect user privacy can lead to significant liabilities and risks. As always, it's crucial to seek professional advice to ensure all potential issues are adequately addressed.
-
-## Conclusion
-
-Operating a relayer in the Webb Protocol system presents a unique opportunity in the rapidly evolving blockchain and cryptocurrency industry. While the potential rewards can be significant, it's important to understand and manage the associated liabilities and risks.
-
-Remember that laws and regulations vary by jurisdiction and can change frequently. Regular legal consultations and proactive compliance measures can help manage regulatory risks. Additionally, implementing strong security measures, respecting user privacy, and practicing good financial management can help mitigate other potential liabilities.
-
-This document serves as an initial guide for understanding the role of relayers in the Webb Protocol and the associated regulatory landscape, liabilities, and risks. However, the specifics can depend greatly on your particular situation and jurisdiction, so it's crucial to seek tailored legal and professional advice.
-
-We invite all potential relayers to [join our Discord or other community channels](https://webb.tools/community) to further discuss any questions or concerns you might have about operating a relayer in the Webb Protocol system.
diff --git a/pages/docs/ecosystem-roles/relayer/overview.mdx b/pages/docs/ecosystem-roles/relayer/overview.mdx
deleted file mode 100644
index 2b6db98a..00000000
--- a/pages/docs/ecosystem-roles/relayer/overview.mdx
+++ /dev/null
@@ -1,42 +0,0 @@
----
-title: Overview
-description: A description overview of the Webb relayer.
----
-
-import RelayerImages from '../../../../components/images/Relayer'
-import { RelayerImpl } from "../../../../components/RepoArea";
-import Callout from "../../../../components/Callout";
-
-# Understanding the Role of a Relayer in the Webb Protocol
-
-In the Webb Protocol, relayers hold a critical role as multi-functional agents responsible for facilitating private transactions, data queries, and governance updates. Acting as oracles, relayers listen to external data sources (in this case, the state of the anchors for a bridge), and relay this data. This relayed data updates the state of each Anchor and allows applications to reference properties of data stored across connected Anchors.
-
-The relayer system in the Webb Protocol consists of three main functions:
-
-1. **[Private Transaction Relay](notion://www.notion.so/hicommonwealth/private-tx.mdx)**: Relayers play a key role in executing privacy-preserving transactions within the Shielded Pool . They assist users by submitting zero-knowledge proofs, a cryptographic method that allows proof of knowledge without revealing the information itself. Users generate this proof data and format it into an appropriate payload. The relayer then takes this payload and submits it to the network on behalf of the users, ensuring the smooth execution of private transactions.
-
-2. **[Data Query Relay](notion://www.notion.so/hicommonwealth/data-querying.mdx)**: Relayers also serve as aids to users who need to generate witness data for their zero-knowledge proofs by caching the leaves of the trees of the Shielded Pool system they support, for easy retrieval. This process mirrors the standard oracle-style data query relay, providing data services to users by connecting to external resources and submitting this information to the blockchain. This function is vital for users who require data from external sources to execute their transactions.
-
-3. **[Anchor Update Relay](notion://www.notion.so/hicommonwealth/anchor-update.mdx)**: This type of relayer has a significant role in proposing and relaying signed data payloads to Anchor nodes, which are crucial for the governance of the Anchor System. Their responsibilities include:
-
- - **Proposing**: The relayer initiates anchor updates, sent to the Distributed Key Generation protocol (DKG) for signing. When new data is added to the Merkle trees of the Anchors and VAnchors, the relayer formulates an update proposal for the DKG's review and potential approval.
- - **Relaying**: Once the anchor updates are signed, the relayer submits these signed payloads to the smart contract SignatureBridges that verify and process the proposals. Additionally, the relayer transmits these payloads to the appropriate SignatureBridge instances or Governable instances.
-
-Relayer nodes in the Webb Protocol are highly configurable, enabling operators to choose the function(s) they wish to support - private transactions, data queries, governance actions, or any combination thereof. All functions are enabled by default, but operators can manually disable any they do not wish to support. This versatility offers operators the freedom to specialize in certain functions based on their expertise, interest, or strategic considerations, making relayers an integral part of the Webb Protocol ecosystem.
-
-With these capabilities, relayers can execute each function separately, enabling operators to specialize in a specific function or combine them arbitrarily. As such, relayers are not only the backbone of the Webb Protocol but also offer significant opportunities for node operators to contribute to the protocol's operation and success.
-
-
-
-
-
-## Contribute to the Webb Relayer
-
-We are actively making improvements to the Webb Relayer. Check out the below Relayer repository and source documentation to start. Have feedback to share about a Webb
-relayer? We want to hear from you, share your thoughts [here](https://github.com/webb-tools/feedback/discussions/categories/webb-relayer-feedback).
-
-
- Please keep in mind that this repo is in active development, and may not be fully functional. If you find a bug, please report it [at our Relayer repo](https://github.com/webb-tools/relayer/issues/new)
-
-
-
diff --git a/pages/docs/ecosystem-roles/relayer/private-tx.mdx b/pages/docs/ecosystem-roles/relayer/private-tx.mdx
deleted file mode 100644
index 1ca1d90a..00000000
--- a/pages/docs/ecosystem-roles/relayer/private-tx.mdx
+++ /dev/null
@@ -1,73 +0,0 @@
----
-title: Private Transaction Relaying
-description: An outline of Webb relayer private transaction relaying capabilities.
----
-
-import Callout from "../../../../components/Callout";
-
-# Private Transaction Relaying
-
-A Private Transaction Relayer aids users in submitting proofs for private transactions that occur within the Shielded Pools of the Webb Protocol.
-
-In essence, users create zero-knowledge proof data - a kind of cryptographic proof where one can prove they know certain information without revealing that information itself. They then format this data into an appropriate payload.
-
-The job of the Private Transaction Relayer is to take this payload and submit it to the network on behalf of the users. In this way, the relayer plays a critical role in ensuring the smooth execution of private transactions within the system.
-
-# Participate as a Private Transaction Relayer
-
-The relayer node is highly configurable, allowing you to tailor its functions to suit your needs. If you prefer to support specific functions only, you can easily adjust your relayer's settings accordingly.
-
-For example, you might decide to exclusively relay private transactions. In such a case, you would disable the governance relaying and data querying functions.
-
-Please note that **by default, all functions - including governance relaying, private transaction relaying, and data querying - are enabled.** If you want to specialize your relayer, you will need to manually turn off the functions you don't wish to support.
-
---
-
-## Exclusively Private Transactions Relay Node
-
-To set up your relayer node exclusively for relaying private transactions we need to add the following fields to under the
-`[evm..contracts]` section in our configuration file:
-
-### 1. Add the Required Fields
-
-- **`contract`**: Name of the contract to support (e.g. "VAnchor", "SignatureBridge")
-- **`address`**: Address of the contract
-- **`size`**: Represents the minimum size configured for deposit / withdraw on the contract
-- **`events-watcher`**: Event watcher should be set to `false` for this strategy
-- **`withdraw-fee-percentage`**: Determines the fee taken by this relayer
-- **`withdraw-gaslimit`**: Specifies the maximum amount of gas that will be used when submitting a withdraw transaction
-
-### 2. Under the `[features]` section in our configuration file, we need to turn off `data-querying` and `governance-relaying.`
-
-- **`data-query`**: Should be set to `false` for this strategy
-- **`governance-relay`**: Should be set to `false` for this strategy
-- **`private-relay`**: Should be set to `true` for this strategy (set to `true` by default)
-
-### 3. Under the `[evm-etherscan]` section we need to provide `EtherscanApiConfiguration`.
-
-## Example
-
-An example configuration file for the goerli network and VAnchor contract should look similar to this:
-
-
- You will need to update the linked-anchors and contract addresses for the applicable chains.
-
-
-```sh filename="config file" copy
-[[evm.goerli.contracts]]
-contract = "VAnchor"
-address = "0x03b88eD9Ff9bE84e4baD3F55D67AE5ABA610523C"
-deployed-at = 6896976
-size = 0.01
-events-watcher = { enabled = false }
-withdraw-config = { withdraw-fee-percentage = 0, withdraw-gaslimit = "0x350000" }
-
-[features]
-data-query = false
-governance-relay = false
-private-relay = true
-
-[evm-etherscan.goerli]
-chain-id = 1
-api-key = "$ETHERSCAN_GOERLI_API_KEY"
-```
diff --git a/pages/docs/ecosystem-roles/relayer/relayer-node.mdx.archive b/pages/docs/ecosystem-roles/relayer/relayer-node.mdx.archive
deleted file mode 100644
index 34b2ef79..00000000
--- a/pages/docs/ecosystem-roles/relayer/relayer-node.mdx.archive
+++ /dev/null
@@ -1,273 +0,0 @@
----
-title: Deploying with Docker
-description: An overview of Webb Tangle node and Webb Relayer deployment process.
----
-
-import Callout from "../../../../components/Callout";
-
-# Deploying Tangle Validator and Relayer
-
-It is likely that network participants that are running a Tangle validator node may also want to run a relayer node. This guide
-will walk you through the process of deploying a Tangle validator and a Webb Relayer. By the end of this document, you will have set up a Webb Relayer
-at a publicly accessible endpoint alongside a Tangle validator node both of which will be running within a Docker container.
-
-## Prerequisites
-
-It is a requirement to have Docker installed on the Linux machine, for instructions on how to install Docker on the machine
-please visit the offical Docker installation documentation [here](https://docs.docker.com/desktop/install/linux-install/).
-
-When connecting to Tangle on Kusama, it will take a few days to completely
-sync the embedded relay chain. Make sure that your system meets the requirements which can read [here](/docs/tangle-network/node/hardware/).
-
-## Using Docker Compose
-
-The quickest and easiest way to get started is to make use of our published Docker Tangle image. In doing so, users simply
-create a local directory to store the chain data, download the latest chainspec for standalone testnet, set their keys, and run the start
-command to get up and running.
-
-### **1. Pull the Tangle Docker image:**
-
-We will used the pre-built Tangle Docker image to generate and insert the required keys for our node.
-
-```sh filename="pull" copy
-# Only use "main" if you know what you are doing, it will use the latest and maybe unstable version of the node.
-
-docker pull ghcr.io/webb-tools/tangle/tangle-standalone:main
-```
-
-### **2. Create a local directory to store the chain data:**
-
-Let us create a directory where we will store all the data for our node. This includes the chain data, keys, and logs.
-
-```sh filename="mkdir" copy
-mkdir /var/lib/tangle/
-```
-
-### **3. Generate and store keys:**
-
-We need to generate the required keys for our node. For more information on these keys, please see the [Required Keys]() section.
-The keys we need to generate include the following:
-
-- DKG key (Ecdsa)
-- Aura key (Sr25519)
-- Account key (Sr25519)
-- Grandpa key (Ed25519)
-
-Let's now insert our required secret keys, we will not pass the SURI in the command, instead it will be interactive, where you
-should paste your SURI when the command asks for it.
-
-**Account Keys**
-
-```sh filename="Acco" copy
-# it will ask for your suri, enter it.
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type acco
-```
-
-**Aura Keys**
-
-```sh filename="Aura" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type aura
-```
-
-**Im-online Keys** - **these keys are optional**
-
-```sh filename="Imonline" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type imon
-```
-
-**DKG Keys**
-
-```sh filename="DKG" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- tangle-standalone key insert --base-path /data \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Ecdsa \
- --key-type wdkg
-```
-
-**Grandpa Keys**
-
-```sh filename="Grandpa" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- tangle-standalone key insert --base-path /data \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Ed25519 \
- --key-type gran
-```
-
-To ensure you have successfully generated the keys correctly run:
-
-```sh filename="ls" copy
-ls ~/webb/tangle/chains/*/keystore
-# You should see a some file(s) there, these are the keys.
-```
-
-### **4.Creating Docker compose file:**
-
-Now that we have generated the keys, we can start the Tangle Validator and Relayer. We will use the `docker-compose` file provided
-in the [Tangle repo](https://github.com/webb-tools/tangle/tree/51e8ad6d1ba83a40475ce8debdb4c28c2ccf081a/docker).
-
-Let's start by creating a docker-compose file:
-
-```sh filename="nano" copy
-nano ~/webb/tangle/docker-compose.yml
-```
-
-Add the following lines:
-
-```yaml filename="docker-compose.yml" copy
-# This an example of a docker compose file which contains both Relayer and Tangle Node.
-version: "3"
-
-services:
- webb_relayer:
- # Here you should checkout
- # https://github.com/webb-tools/relayer/pkgs/container/relayer/versions?filters%5Bversion_type%5D=tagged
- # For the latest stable version. Only use "edge" if
- # you know what you are doing, it will use the latest and maybe
- # unstable version of the relayer.
- image: ghcr.io/webb-tools/relayer:${RELAYER_RELEASE_VERSION}
- container_name: webb_relayer
- env_file: .env
- depends_on:
- - caddy
- ports:
- - "$WEBB_PORT:$WEBB_PORT"
- volumes:
- - $PWD/config:/config
- - relayer_data:/store
- restart: always
- command: /webb-relayer -vvv -c /config
-
- tangle_standalone:
- # Here you should checkout
- # https://github.com/webb-tools/tangle/pkgs/container/tangle-standalone/versions?filters%5Bversion_type%5D=tagged
- # For the latest stable version. Only use "main" if
- # you know what you are doing, it will use the latest and maybe
- # unstable version of the node.
- image: ghcr.io/webb-tools/tangle/tangle-standalone:${TANGLE_RELEASE_VERSION}
- container_name: tangle_standalone
- env_file: .env
- ports:
- - "30333:30333"
- - "9933:9933"
- - "9944:9944"
- - "9615:9615"
- volumes:
- - tangle_data:/data
- restart: always
- entrypoint: /tangle-standalone
- command:
- [
- "--base-path=/data",
- "--validator",
- "--chain=/data/chainspecs/tangle-standalone.json",
- "--",
- "--execution=wasm",
- ]
-
-volumes:
- relayer_data:
- driver: local
- driver_opts:
- type: none
- o: bind
- device: $PWD/relayer/data
- tangle_data:
- driver: local
- driver_opts:
- type: none
- o: bind
- device: $PWD/tangle/
-```
-
-### **5. Set environment variables:**
-
-Prior to spinning up the Docker containers, we need to set some environment variables. Below displays an example `.env` file
-but you will need to update to reflect your own environment.
-
-```sh filename="export variables" copy
-export TANGLE_RELEASE_VERSION=main
-export RELAYER_RELEASE_VERSION=0.5.0-rc1
-export BASE_PATH=/tmp/data/
-export CHAINSPEC_PATH=/tmp/chainspec
-export WEBB_PORT=9955
-```
-
-### **5. Start Relayer and Validator node:**
-
-With our keys generated and our docker-compose file created, we can now start the relayer and validator node.
-
-```sh filename="compose up" copy
-docker compose up -d
-```
-
-The `docker-compose` file will spin up a container running Tangle validator node and another running a Webb Relayer.
-
-## Update the Client
-
-As Tangle development continues, it will sometimes be necessary to upgrade your node software. Node operators will be notified
-on our Discord channel when upgrades are available and whether they are necessary (some client upgrades are optional).
-The upgrade process is straightforward and is the same for a full node or validator.
-
-1. Stop the docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-2. Get the latest version of Tangle from the Tangle GitHub Release page
-
-3. Use the latest version to spin up your node. To do so, replace the version in the Full Node or validator command with the latest and run it
-
-Once your node is running again, you should see logs in your terminal.
-
-## Purge Your Node
-
-If you need a fresh instance of your Tangle node, you can purge your node by removing the associated data directory.
-
-You'll first need to stop the Docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-If you did not use the `-v` flag to specify a local directory for storing your chain data when you spun up your node, then the data folder is related to the Docker container itself. Therefore, removing the Docker container will remove the chain data.
-
-If you did spin up your node with the `-v` flag, you will need to purge the specified directory. For example, for the suggested data directly, you can run the following command to purge your parachain node data:
-
-```sh filename="rm" copy
-# purges standalone data
-sudo rm -rf /data/chains/*
-```
-
-Now that your chain data has been purged, you can start a new node with a fresh data directory!
-
-## Logs
-
-If you'd like to run the node with verbose logs, you may add the following arguments during initial setup. Adjust the target for the desired logging level (debug | error | info| trace | warn):
-
-```bash
--ldkg=debug \
--ldkg_metadata=debug \
--lruntime::offchain=debug \
--ldkg_proposal_handler=debug \
--ldkg_proposals=debug
-```
diff --git a/pages/docs/ecosystem-roles/relayer/roadmap.mdx b/pages/docs/ecosystem-roles/relayer/roadmap.mdx
deleted file mode 100644
index 5cb5b2ae..00000000
--- a/pages/docs/ecosystem-roles/relayer/roadmap.mdx
+++ /dev/null
@@ -1,34 +0,0 @@
----
-title: Relayer Roadmap
-description: A brief outline of Webb relayer roadmap.
----
-
-# Relayer Roadmap
-
-We aim to build a relayer infrastructure that gradually improves in its performance, efficiency, and overall
-security. We do this in stages by first building the necessary infrastructure to listen to events across a
-connected set of Anchors and react properly across a Bridge to preserve the liveness of the protocol.
-
-### **Version 1 - Multi-sig governance functionality**
-
-The first iteration of the bridge relayer is responsible for event listening and transaction execution.
-
-- Event listening across a set of connected anchors to learn of new deposits on a Bridge.
-- Proposal creation and vote submission over the Bridge's governance process.
-- Data replication and an API for inspecting Anchor state off-chain.
-
-### **Version 2 - Multi-party Threshold governance functionality**
-
-We then plan to integrate a DKG into a Substrate node for future governance purposes.
-
-- Direct integration into a (currently) Substrate chain or potentially other blockchain node (Cosmos-SDK)
- - Bootstraps off existing validator set and participates in consensus.
-- Execution of a distributed key generation protocol to generate a threshold key.
-
-### **Version 3 - Auxiliary staking and punishment functionality**
-
-We will integrate the DKG directly into the Bridge governance system, allowing the bridge to be governed by a threshold key (or many).
-
-- Integrate staking mechanism over finality authorities for registering local key matter.
-- Integrate punishment mechanism over improper behavior in the DKG.
-- Develop an on-chain pallet to govern the threshold and generate threshold signatures of `SignatureBridge.sol` governance transactions.
diff --git a/pages/docs/ecosystem-roles/relayer/running-relayer/_meta.json b/pages/docs/ecosystem-roles/relayer/running-relayer/_meta.json
deleted file mode 100644
index 05c00810..00000000
--- a/pages/docs/ecosystem-roles/relayer/running-relayer/_meta.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "quick-start": "Quickstart",
- "running-docker": "Running with Docker",
- "cloud-setup": "Running with Systemd",
- "config-options": "Configuration Options",
- "cli-usage": "CLI Usage",
- "relayer-api": "Relayer API"
-}
diff --git a/pages/docs/ecosystem-roles/relayer/running-relayer/cli-usage.mdx b/pages/docs/ecosystem-roles/relayer/running-relayer/cli-usage.mdx
deleted file mode 100644
index 10bf9222..00000000
--- a/pages/docs/ecosystem-roles/relayer/running-relayer/cli-usage.mdx
+++ /dev/null
@@ -1,94 +0,0 @@
----
-title: CLI Reference
-description: Explore Webb Relayers command line interface.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-
-# Command-Line Reference
-
-When starting up your own Webb relayer, there are some required and optional flags that can be used.
-
-This page provides a comprehensive list of the available command lines available on the Webb relayer.
-
-After installing the Webb [`relayer`](https://github.com/webb-tools/relayer) binary (or pull from Docker), you can view the relayers command line interface (CLI). For a complete list of the available flags, you can spin up your relayer and input `--help`
-to the end of the command. The command will vary depending on how you choose to spin up your relayer, and if you're using Docker or Systemd.
-
-
-
-
- ```sh filename="docker run" copy
- docker run --platform linux/amd64 ghcr.io/webb-tools/relayer:0.5.0-rc1 --help
- ```
-
-
-
-
- ```sh filename="help" copy
- # If you used the release binary from github
- ./webb-relayer --help
-
- # Or if you compiled the binary
- ./target/release/webb-relayer --help
- ```
-
-
-
-
-Start the relayer from a config file:
-
-```sh filename="config file" copy
-webb-relayer -vvv -c
-```
-
-USAGE:
-
-```sh filename="usage" copy
-webb-relayer [FLAGS] [OPTIONS]
-```
-
-## Available Flags
-
-The below lists outlines the available flags for your convienance.
-
-#### `--help`
-
-Prints help information
-
-```sh filename="help" copy
-webb-relayer --help
-```
-
-#### `--tmp`
-
-Create the Database Store in a temporary directory and will be deleted when the process exits
-
-```sh filename="tmp" copy
-webb-relayer --tmp
-```
-
-#### `--version`
-
-Prints relayer version information
-
-```sh filename="version" copy
-webb-relayer --version
-```
-
-#### `--verbose`
-
-A level of verbosity, and can be used multiple times
-
-```sh filename="vvvv" copy
-webb-relayer --vvvv
-```
-
-## Available Options
-
-#### `--config-dir `
-
-Directory that contains configration files
-
-```sh filename="config-dir" copy
-webb-relayer --config dir ./config
-```
diff --git a/pages/docs/ecosystem-roles/relayer/running-relayer/cloud-setup.mdx b/pages/docs/ecosystem-roles/relayer/running-relayer/cloud-setup.mdx
deleted file mode 100644
index d18db051..00000000
--- a/pages/docs/ecosystem-roles/relayer/running-relayer/cloud-setup.mdx
+++ /dev/null
@@ -1,253 +0,0 @@
----
-title: Running with Systemd
-description: Running relayer with Systemd
----
-
-import Callout from "../../../../../components/Callout";
-
-# Running with Systemd
-
-The following guide will walk you through setting up a relayer as a system service. By the end of this document, you will have set up a Webb Relayer at a publicly accessible via an endpoint behind a reverse proxy, and
-fulfill the requirements for listing your relayer on `app.webb.tools`.
-
-## Getting Started
-
-
- These instructions assume the user has access to a server on any Linux VM, and is logged into a user with sudo permissions.
-
-
-### Prerequisites
-
-It is a requirement to have Nginx installed on the Linux machine, for instructions on how to install Nginx on the machine
-please visit the offical Nginx installation documentation [here](https://docs.nginx.com/nginx/admin-guide/installing-nginx/installing-nginx-open-source/).
-
-## Basic Environment Setup
-
-Following the instructions below, you will be able to run the relayer as a system service.
-
-**Update Ubuntu packages**
-
-```sh filename="apt update" copy
-# Update ubuntu packages
-sudo apt update && sudo apt upgrade
-```
-
-**Update Snap package**
-
-```sh filename="apt install" copy
-# Update snap packages
-sudo apt install -y snapd
-sudo snap install core; sudo snap refresh core
-```
-
-**Install dependencies**
-
-```sh filename="apt install" copy
-# Install dependencies
-sudo apt install gcc cmake pkg-config libssl-dev git clang libclang-dev
-sudo apt install build-essential
-```
-
-**Install Rust**
-
-```sh filename="rust" copy
-# Install rust
-curl https://sh.rustup.rs -sSf | sh -s -- -y
-export PATH=~/.cargo/bin:$PATH
-source ~/.cargo/env
-```
-
-**Install Certbot**
-
-```sh filename="certbot" copy
-# Install certbot
-sudo snap install --classic certbot && sudo ln -s /snap/bin/certbot /usr/bin/certbot
-```
-
-**Build Relayer from source**
-
-```sh filename="build" copy
-# Build from source
-git clone https://github.com/webb-tools/relayer.git
-cd relayer && cargo build --release --features cli
-```
-
-## Creating a System Service
-
-1. Setup the relayer as a system service:
-
-Let's first create a service file for the relayer:
-
-Next, we will paste the following into the service file, and replace the `` with the user that will be running the relayer:
-
-- This assumes the repo has been cloned in the home directory of the user
-- Paste the following into the service file, and replace the ``:
-
-```
-sudo tee /etc/systemd/system/webb-relayer.service > /dev/null << EOF
-[Unit]
-Description=WebbRelayer
-
-[Service]
-Type=exec
-WorkingDirectory=/home//relayer
-ExecStart=cargo run --features cli --bin webb-relayer -- -c /home//relayer/config/ -vvv
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-2. Enable and start the system service:
-
-```sh filename="enable & start" copy
-sudo systemctl enable webb-relayer && sudo systemctl start webb-relayer
-```
-
-## Nginx Setup
-
-1. Configure your registered domain name with your cloud service provider.
-
-2. Install nginx if it isn't already on your machine:
-
-```sh filename="nginx" copy
-sudo apt install nginx
-```
-
-**First, we will configure the endpoint linked to your domain name at port 80 for certificate generation**
-
-3. Create nginx site files for your domain:
-
-```sh filename="site files" copy
-cd /etc/nginx/sites-available &&
-
-sudo cp default &&
-
-sudo ln -s /etc/nginx/sites-available/ /etc/nginx/sites-enabled/
-```
-
-4. Modify the nginx sites-available file to:
-
-```console filename="default" copy
-server {
- listen 80;
- listen [::]:80;
-
- root /var/www//html;
- index index.html index.htm index.nginx-debian.html;
-
- server_name ;
-
- location / {
- try_files $uri $uri/ =404;
- }
-}
-```
-
-5. Check the nginx configuration
-
-```sh filename="status nginx" copy
-sudo nginx -t
-```
-
-6. If no issues exist, restart the nginx service:
-
-```sh filename="restart nginx" copy
-sudo systemctl restart nginx
-```
-
-**Next we will create the self-signed certificate and reconfigure for https and wss support**
-
-7. Create the self-signed certificate:
-
-```sh filename="certonly" copy
-sudo certbot certonly --nginx
-```
-
-8. Modify the nginx site file:
-
-```sh filename="site file" copy
-map $http_upgrade $connection_upgrade {
- default upgrade;
- '' close;
-}
-
-server {
-
- # SSL configuration
- #
- listen 443 ssl;
- listen [::]:443 ssl;
-
- root /var/www//html;
-
- server_name ;
- ssl_certificate /etc/letsencrypt/live//cert.pem;
- ssl_certificate_key /etc/letsencrypt/live//privkey.pem;
- ssl_session_timeout 5m;
- ssl_protocols SSLv2 SSLv3 TLSv1 TLSv1.1 TLSv1.2;
- ssl_ciphers HIGH:!aNULL:!MD5;
- ssl_prefer_server_ciphers on;
-
- location / {
- proxy_pass http://127.0.0.1:9955;
- proxy_pass_request_headers on;
- proxy_http_version 1.1;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header Upgrade $http_upgrade;
- proxy_set_header Connection $connection_upgrade;
- }
-}
-```
-
-9. Check Nginx configuration and restart the service:
-
-```sh filename="restart nginx" copy
-sudo nginx -t && sudo systemctl restart nginx
-```
-
-### Monitoring Setup
-
-Relayers will want to setup monitoring to ensure maximum uptime and automatic restarts when things go awry.
-
-1. Install monit
-
-```sh filename="install monit" copy
-sudo apt install -y monit
-```
-
-2. modify the monitrc file at: `/etc/monit/monitrc`
-
-```sh filename="monitrc" copy
-set httpd port 2812 and
-use address localhost
-allow localhost
-
-set daemon 10
-set log /var/log/monit.log
-set idfile /var/lib/monit/id
-set statefile /var/lib/monit/state
-set eventqueue
- basedir /var/lib/monit/events
- slots 100
-
-check process webb-relayer matching target/release/webb-relayer
- start program = "/bin/systemctl restart webb-relayer"
- stop program = "/bin/systemctl kill webb-relayer"
- if cpu > 90% for 20 cycles then exec "/bin/systemctl stop webb-relayer" and repeat every 10 cycles
- if cpu > 90% for 64 cycles then exec "/bin/systemctl kill webb-relayer" and repeat every 10 cycles
- if cpu > 90% for 64 cycles then alert
- if does not exist for 1 cycles then start
-```
-
-3. restart monit and validate:
-
-```sh filename="restart & validate" copy
-sudo monit reload && sudo monit validate
-```
-
-## Dapp Integration
-
-After completing the above steps, submit a PR with changes for your https endpoint in the [Webb Dapp](https://github.com/webb-tools/webb-dapp) repo.
diff --git a/pages/docs/ecosystem-roles/relayer/running-relayer/config-options.mdx b/pages/docs/ecosystem-roles/relayer/running-relayer/config-options.mdx
deleted file mode 100644
index 8c2d0927..00000000
--- a/pages/docs/ecosystem-roles/relayer/running-relayer/config-options.mdx
+++ /dev/null
@@ -1,39 +0,0 @@
----
-title: Configuration Options
-description: Configuration options available on the Webb relayer
----
-
-## Configuration Options
-
-**Note:** You can also review the different chain configurations for EVM and Substrate (Tangle Network)
-
-- [`SubstrateConfig`](https://github.com/webb-tools/relayer/blob/develop/config/README.md#substrate-node-configuration)
-- [`EvmChainConfig`](https://github.com/webb-tools/relayer/blob/develop/config/README.md#evm-chain-configuration)
-
-### Chain Configuration
-
-| Field | Description | Optionality |
-| --------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------- |
-| `http-endpoint` | Http(s) Endpoint for quick Request and Response | Required |
-| `ws-endpoint` | Websocket Endpoint for long living connections | Required |
-| `name` | The chain name | Required |
-| `explorer` | Block explorer, used for generating clickable links for transactions that happens on this chain. | Optional |
-| `chain-id` | Chain specific id. | Required |
-| `private-key` | The Private Key of this account on this network. See [PrivateKey Docs for secure setup]() | Required |
-| `beneficiary` | The address of the account that will receive relayer fees. | Optional |
-| `runtime` | Indicates Substrate runtime to use | Required for Substrate |
-| `suri` | Interprets a string in order to generate a key Pair. In the case that the pair can be expressed as a direct derivation from a seed | Required for Substrate |
-| `pallets` | Supported pallets for a particular Substrate node | Optional |
-
-### Contract Configuration
-
-| Field | Description | Optionality |
-| -------------------------- | ------------------------------------------------------------------------------------------ | ----------- |
-| `contract` | Chain contract. Must be either: - VAnchor - SignatureBridge | Required |
-| `address` | The address of this contract on this chain. | Required |
-| `deployed-at` | The block number where this contract got deployed at. | Required |
-| `events-watcher` | Control the events watcher for this contract. | Optional |
-| `withdraw-config` | Config the fees and gas limits of your private transaction relayer. | Optional |
-| `proposal-signing-backend` | a value of `ProposalSigingBackend` (for example `{ type = "DKGNode", node = "dkg-node" }`) | Optional |
-
-For specific examples, please refer to either data-querying setup, private transaction setup, or signature relaying setup docs.
diff --git a/pages/docs/ecosystem-roles/relayer/running-relayer/quick-start.mdx b/pages/docs/ecosystem-roles/relayer/running-relayer/quick-start.mdx
deleted file mode 100644
index 68475435..00000000
--- a/pages/docs/ecosystem-roles/relayer/running-relayer/quick-start.mdx
+++ /dev/null
@@ -1,212 +0,0 @@
----
-title: Relayer Quickstart
-description: Get up to speed for running the relayer.
----
-
-import Callout from "../../../../../components/Callout";
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import RelayerExampleConfig from "../../../../../components/images/relayer/ExampleConfig";
-
-# Relayer Quickstart
-
-You can spin up the Relayer in a few simple commands. This guide will walk you through the steps to get started.
-
-By the end of this guide, you will have a running relayer that is publicly accessible via HTTPS endpoint behind a reverse proxy.
-
-## Prerequisites
-
-Before you begin, ensure that you have the following prerequisites:
-
-- A Unix-based operating system (Linux, macOS, WSL, etc.)
-- curl
-- Hardware requirements:
-- Minimum 2 cores CPU
-- 4 GB RAM
-- 50 GB free disk space (preferably SSD)
-- A publicly accessible domain name (optional, but recommended)
-
-## Installation
-
-1. Open your terminal and run the following command to download the latest version of the relayer:
-
-```sh filename="download latest version" copy
-curl -fsSL https://git.io/get-webb-relayer.sh | sh
-```
-
-The script will download the relayer binary (or update it if it already exists) and place it in your `$HOME/.webb` directory. The script will also suggest adding the directory to your `PATH` environment variable.
-
-Alternatively, if you wish to download a specific version of the relayer, run the following command:
-
-```sh filename="download specific version" copy
-curl -fsSL https://git.io/get-webb-relayer.sh | sh -s
-```
-
-This will download the relayer binary (or update it if it already exists) and place it in your `$HOME/.webb` directory. The script
-will also suggest adding the directory to your `PATH` environment variable.
-
-### Verify Installation
-
-2. Verify that the relayer was installed successfully by running the following command:
-
-```sh filename="verify version" copy
-~/.webb/webb-relayer --version
-```
-
-## Configuration
-
-Before running the relayer, you will need to configure it. The relayer could be configured by using TOML or JSON files along with environment variables.
-The Relayer will look for configuration files in the default locations, but you can also specify the path to the configuration file using the `--config` flag.
-
-- Default locations for configuration files:
-
- - `$HOME/.config/webb-relayer/` (Linux)
- - `$HOME/Library/Application Support/tools.webb.webb-relayer/` (macOS)
-
-For simplicity, we will use the TOML configuration file format in this guide and we will place the configuration file in the default locations.
-
-### Download the Example Configuration File
-
-To download the example configuration file, run the following command:
-
-
-
-
- ```sh filename="download" copy
- curl -fsSL https://raw.githubusercontent.com/webb-tools/relayer/main/config/example/config.toml -o ~/.config/webb-relayer/config.toml
- ```
-
-
-
-
- ```sh filename="download" copy
- curl -fsSL https://raw.githubusercontent.com/webb-tools/relayer/main/config/example/config.toml -o ~/Library/Application\ Support/tools.webb.webb-relayer/config.toml
- ```
-
-
-
-
-### Configure the Relayer
-
-Open the configuration file in your favorite text editor and take a look at the configuration options. The configuration file is well-documented and should be self-explanatory.
-
-
-
-
-
-### Environment Variables
-
-The relayer can also be configured using environment variables. The environment variables will override the values in the configuration file.
-For example, if you want to override the `port` value in the configuration file, you can set the `WEBB_PORT` environment variable.
-
-For example, to modify the `port` value using the environment variable, you can do the following:
-
-```sh filename="export" copy
-export WEBB_PORT=9955
-```
-
-For our example configuration file, the following environment variables are required:
-
-- `PRIVATE_KEY` - The private key of the relayer. This can be a hex-encoded private key or a mnemonic phrase.
-
-create a new file called `.env` in the same directory as your current directory and add the following line:
-
-```sh filename="PRIVATE_KEY" copy
-# Hex-encoded private for the relayer (64 characters) prefixed with 0x
-PRIVATE_KEY="0x..."
-```
-
-save the file and exit the editor.
-
-## Running the Relayer
-
-To run the relayer, run the following command:
-
-```sh filename="vvv" copy
-~/.webb/webb-relayer -vvv
-```
-
-
- The `--tmp` flag will start the relayer with a temporary database. This is useful for testing, but you should not use this flag in production.
-
-
-You should see the following output:
-
-```rust filename="output"
- 2023-03-14T14:13:08.315804Z DEBUG webb_relayer_config::cli: Getting default dirs for webb relayer
- at crates/relayer-config/src/cli.rs:61
-
- 2023-03-14T14:13:08.316568Z DEBUG webb_relayer_config::cli: Using temp dir for store
- at crates/relayer-config/src/cli.rs:131
-
- 2023-03-14T14:13:08.352455Z INFO webb_relayer::service: Starting the server on 0.0.0.0:9955
- at services/webb-relayer/src/service.rs:141
-
- 2023-03-14T14:13:08.355367Z DEBUG webb_relayer::service: Starting Background Services for (goerli) chain.
- at services/webb-relayer/src/service.rs:172
-
- 2023-03-14T14:13:08.880637Z WARN webb_relayer::service: Tx Queue disabled for (5)
- at services/webb-relayer/src/service.rs:1053
-
- 2023-03-14T14:13:08.880641Z DEBUG webb_relayer::service: VAnchor events watcher for (0x9678…5077) Started.
- at services/webb-relayer/src/service.rs:648
-
- 2023-03-14T14:13:08.880660Z DEBUG webb_probe: kind: lifecycle, started: true
- at services/webb-relayer/src/main.rs:80
-
- 2023-03-14T14:13:08.880666Z WARN webb_relayer::service: Governance relaying is not enabled for relayer
- at services/webb-relayer/src/service.rs:1197
-
- 2023-03-14T14:13:16.286918Z INFO webb_event_watcher_traits::evm::event_watcher: 🔄 #8650987 of #8654131 (99.9637%)
- at crates/event-watcher-traits/src/evm/event_watcher.rs:218
- in webb_event_watcher_traits::evm::event_watcher::run with chain_id: Ok(5), address: 0x9678…5077, tag: VAnchor Contract Watcher
-
- 2023-03-14T14:13:23.496016Z INFO webb_event_watcher_traits::evm::event_watcher: 🔄 #8653887 of #8654131 (99.9972%)
- at crates/event-watcher-traits/src/evm/event_watcher.rs:218
- in webb_event_watcher_traits::evm::event_watcher::run with chain_id: Ok(5), address: 0x9678…5077, tag: VAnchor Contract Watcher
-
- 2023-03-14T14:13:39.302727Z INFO webb_event_watcher_traits::evm::event_watcher: 🔄 #8654129 of #8654131 (100.0000%)
- at crates/event-watcher-traits/src/evm/event_watcher.rs:218
- in webb_event_watcher_traits::evm::event_watcher::run with chain_id: Ok(5), address: 0x9678…5077, tag: VAnchor Contract Watcher
-
-```
-
-### Verifying the Relayer is Running
-
-To verify that the relayer is running, you can use the `/api/v1/ip` endpoint:
-
-
-
-
- ```sh filename="local" copy
- curl http://localhost:9955/api/v1/ip
- ```
-
-
-
-
- ```sh filename="server" copy
- curl http://:9955/api/v1/ip
- ```
-
-
-
-
-You should see the following output:
-
-```json filename="public-ip" copy
-{"ip":""}
-```
-
-**Congratulations**! You have successfully started the relayer.
-
-## What's Next?
-
-Now that you have the relayer running, you need to run it behind a reverse proxy. You can use [Caddy](https://caddyserver.com/) or [Nginx](https://www.nginx.com/) for this.
-
-Once you have the relayer running behind a reverse proxy, you should be able to access it from your domain name. For example, if you are running the relayer on `relayer.example.com`, you should be able to access the relayer at `https://relayer.example.com/api/v1/ip`.
-
-After that you may like to configure the relayer by supporting more chains. You can find more information about that in our [Relayer Configuration examples](https://github.com/webb-tools/relayer/tree/main/config).
-
-## Dapp Integration
-
-After completing the above steps, submit a PR with changes for your https endpoint in the [Webb Dapp](https://github.com/webb-tools/webb-dapp) repo.
diff --git a/pages/docs/ecosystem-roles/relayer/running-relayer/relayer-api.mdx b/pages/docs/ecosystem-roles/relayer/running-relayer/relayer-api.mdx
deleted file mode 100644
index 6896b72e..00000000
--- a/pages/docs/ecosystem-roles/relayer/running-relayer/relayer-api.mdx
+++ /dev/null
@@ -1,104 +0,0 @@
----
-title: Relayer API
-description: Explore Webb Relayers API.
----
-
-# Relayer API
-
-The relayer has 3 endpoints available to query from. They are outlined below for your convenience.
-
-## **Retrieving nodes IP address:**
-
-```sh filename="Retrieving IP address" copy
-/api/v1/ip
-```
-
-**Expected Response:**
-
-```json filename="respose"
-{
- "ip": "127.0.0.1"
-}
-```
-
-## **Retrieve relayer configuration**
-
-```sh filename="Retrieving info" copy
-/api/v1/info
-```
-
-**Expected Response:**
-
-```json filename="respose"
-{
- "evm": {
- "rinkeby": {
- "enabled": true,
- "chainId": 4,
- "beneficiary": "0x58fcd47ece3ed24ace88fee06efd90dcb38f541f",
- "contracts": [{
- "contract": "Anchor",
- "address": "0x626fec5ffa7bf1ee8ced7dabde545630473e3abb",
- "deployedAt": 8896800,
- "eventsWatcher": {
- "enabled": true,
- "pollingInterval": 15000
- },
- "size": 0.1,
- "proposalSigningBackend": { "type": "DKGNode", "node": "dkg-local" },
- "withdrawFeePercentage": 0.05
- }]
- }
- },
- "substrate": {},
- "experimental": {
- "smart-anchor-updates": false,
- "smart-anchor-updates-retries": 0
- }
-}
-```
-
-## **Retrieve historical leaves cache**
-
-##### Parameters
-
-- `target_system` (Could be `evm` or `substrate`).
-- `chain_id`
-- `contract_address`
-
-##### For evm
-
-```sh filename="evm" copy
-/api/v1/leaves/evm/4/0x626fec5ffa7bf1ee8ced7dabde545630473e3abb
-```
-
-##### For substrate
-
-> Note: Since substrate dosent have contract address we use `tree_id`
-
-```sh filename="tree_id" copy
-/api/v1/leaves/substrate/4/9
-```
-
-**Expected Response:**
-
-```json filename="respose"
-{
- "leaves": ["0x2e5c62af48845c095bfa9b90b8ec9f6b7bd98fb3ac2dd3039050a64b919951dd", "0x0f89f0ef52120b8db99f5bdbbdd4019b5ea4bcfef14b0c19d261268da8afdc24", "0x3007c62f678a503e568534487bc5b0bc651f37bbe1f34668b4c8a360f15ba3c3"],
- "lastQueriedBlock": "0x9f30a8"
-}
-```
-
-## **Retrieve Metrics information**
-
-```sh filename="metrics" copy
-/api/v1/metrics
-```
-
-**Expected Response:**
-
-```json filename="respose"
- {
- "metrics": "# HELP bridge_watcher_back_off_metric specifies how many times the bridge watcher backed off\n# TYPE bridge_watcher_back_off_metric counter\nbridge_watcher_back_off_metric 0\n# HELP gas_spent_metric The total number of gas spent\n# TYPE gas_spent_metric counter\ngas_spent_metric 0\n# HELP handle_proposal_execution_metric How many times did the function handle_proposal get executed\n# TYPE handle_proposal_execution_metric counter\nhandle_proposal_execution_metric 0\n# HELP proposal_queue_attempt_metric How many times a proposal is attempted to be queued\n# TYPE proposal_queue_attempt_metric counter\nproposal_queue_attempt_metric 0\n# HELP total_active_relayer_metric The total number of active relayers\n# TYPE total_active_relayer_metric counter\ntotal_active_relayer_metric 0\n# HELP total_fee_earned_metric The total number of fees earned\n# TYPE total_fee_earned_metric counter\ntotal_fee_earned_metric 0\n# HELP total_number_of_data_stored_metric The Total number of data stored\n# TYPE total_number_of_data_stored_metric counter\ntotal_number_of_data_stored_metric 1572864\n# HELP total_number_of_proposals_metric The total number of proposals proposed\n# TYPE total_number_of_proposals_metric counter\ntotal_number_of_proposals_metric 0\n# HELP total_transaction_made_metric The total number of transaction made\n# TYPE total_transaction_made_metric counter\ntotal_transaction_made_metric 0\n# HELP transaction_queue_back_off_metric How many times the transaction queue backed off\n# TYPE transaction_queue_back_off_metric counter\ntransaction_queue_back_off_metric 0\n"
- }
-```
diff --git a/pages/docs/ecosystem-roles/relayer/running-relayer/running-docker.mdx b/pages/docs/ecosystem-roles/relayer/running-relayer/running-docker.mdx
deleted file mode 100644
index bb8c5321..00000000
--- a/pages/docs/ecosystem-roles/relayer/running-relayer/running-docker.mdx
+++ /dev/null
@@ -1,330 +0,0 @@
----
-title: Deploying with Docker
-description: Deploy a Webb relayer with only a few steps.
----
-
-import Callout from "../../../../../components/Callout";
-
-# Deploying using Docker
-
-A Webb relayer can be spun up quickly using Docker. For more information on installing Docker,
-please visit the official Docker [docs](https://docs.docker.com/get-docker/).
-
-By the end of this document, you will have set up a Webb Relayer at a publicly accessible via an endpoint behind a reverse proxy, and
-fulfill the requirements for listing your relayer on `app.webb.tools`.
-
-## Getting Started
-
-
- These instructions assume the user has access to a server on any Linux VM, and is logged into a user with sudo permissions.
-
-
-Before we begin we want to `ssh` into the VM and update the system using the specified system package manager:
-
-```sh filename="apt update" copy
-# Update packages
-sudo apt update && sudo apt upgrade
-```
-
-### Prerequisites
-
-It is a requirement to have Docker installed on the Linux machine, for instructions on how to install Docker on the machine
-please visit the offical Docker installation documentation [here](https://docs.docker.com/desktop/install/linux-install/).
-
-## Configuring Relayer for Docker
-
-Before building and running a Webb relayer, we need create a set of configuration files for the relayer to be aware of the smart
-contracts and the features we intend to make use of. They're are three distinct configurations to consider, for an overview of each them please refer
-to the Relayer Overview page [here]().
-
-Let's create a new directory called `webb-relayer`. This is where we will store all our configuration files, and secrets.
-
-```sh filename="mkdir" copy
-mkdir -p ~/webb-relayer/{config,data,secrets}
-```
-
-### Data Query Configuration
-
-This section will outline the minimum requirements to setup a Webb relayer for data querying purposes. For an extensive review of all data querying
-configuration and available endpoints please refer to the data-querying page [here](/docs/relayer/data-querying), the private transaction relaying page [here](/docs/ecosystem-roles/relayer/private-tx/), or the
-signature relaying page [here](/docs/ecosystem-roles/relayer/anchor-update/).
-
-We want to create a `toml` file to outline our configuration details:
-
-```sh filename="nano" copy
-# main.toml
-nano ~/webb-relayer/config/main.toml
-```
-
-Let's update to include the required fields for data querying:
-
-```toml filename="main.toml" copy
-# Webb Relayer Network Port
-# default: 9955
-port = 9955
-
-[features]
-governance-relay = false
-data-query = true
-private-tx-relay = false
-```
-
-For this example, we will use ETH Goerli Testnet (`chain_id = 5`).
-Create a file for the configuration related to this chain:
-
-```sh filename="nano" copy
-# goerli.toml
-nano ~/webb-relayer/config/goerli.toml
-```
-
-Next we want to add the required fields to query the data from the Anchor deployed
-on Goerli Testnet. For an exhasutive list of configuration please refer to the [Configuration Options]() doc. Let's add the following to the file:
-
-```toml filename="goerli.toml" copy
-# Block which represents properties for a network
-[evm.goerli]
-name = "goerli"
-
-# Http(s) Endpoint for quick Req/Res
-http-endpoint = "$GOERLI_HTTPS_URL"
-
-# Websocket Endpoint for long living connections
-ws-endpoint = "$GOERLI_WSS_URL"
-
-# Block Explorer
-# Optinal, and used for generating clickable links
-# for transactions that happen on this chain.
-explorer = "https://goerli.etherscan.io"
-
-# chain specific id from evm opcode
-chain-id = 5
-
-# Value to indicate that the relayer should enable services for this chain
-enabled = true
-
-# No block confirmations required to process it.
-block-confirmations = 2
-
-# Block which represents the configuration for a supported contract on the network
-[[evm.goerli.contracts]]
-# The contract can be one of these values
-# - VAnchor (Variable Anchor)
-# - SignatureBridge
-contract = "VAnchor"
-
-# The address of the contract
-address = "0x98c1024dfd61a8f7439108acedcb51a27754f2af"
-
-# The deployed block number of the contract. When a relayer does not have information for
-# this contract in its store, it will start to sync and query for relevant historical data
-# starting at this block number
-deployed-at = 7865473
-
-# Configuration for the events watcher of this contract. The events-watcher can be switched on/off
-# and the polling interval specifies the period of time (in ms) that the events-watcher thread
-# will wait before issuing another query for new events.
-events-watcher = { enabled = true, polling-interval = 15000 }
-```
-
-#### Setting up `.env` File
-
-As you may have noticed, there are a few environment variables inside the configuration file,
-and we will have to supply them. To do so, lets create a `.env` file with these values:
-
-```sh filename="nano" copy
-nano ~/webb-relayer/.env
-```
-
-Add the following fields:
-
-```sh filename=".env" copy
-# The internal Webb Relayer Port
-# this will not be the public port, but will be used internally
-# inside docker.
-WEBB_PORT=9955
-# Change this version to the latest Docker image by the relayer
-# from here: https://github.com/webb-tools/relayer/pkgs/container/relayer/versions?filters%5Bversion_type%5D=tagged
-RELAYER_VERSION=0.5.0-rc1
-# These are related to the Reverse Proxy we will use.
-# Your email address. Mainly used when creating an ACME
-# account with your CA, and is highly recommended
-# in case there are problems with your certificates.
-LETSENCRYPT_EMAIL=me@example.com
-# This will be your domain for the relayer
-# Make sure it points to the public IP for your VM.
-DOMAIN=example.com
-# Here you can provide or use your infura.io RPC endpoints,
-# But for this example, we will use a free-to-use public RPC endpoints
-# From https://chainlist.org/chain/5
-INFURA_PROJECT_ID=1234567890
-GOERLI_HTTPS_URL=https://goerli.infura.io/v3/${INFURA_PROJECT_ID}
-GOERLI_WSS_URL=wss://goerli.infura.io/ws/v3/${INFURA_PROJECT_ID}
-```
-
-Great we have successfully setup our data-querying configuration. Now we need to setup the Docker container that will
-run the reverse proxy and the Webb relayer.
-
-## Creating Docker Compose File
-
-The section will cover setting up the required docker service in a docker compose file. Let's start by creating
-a docker-compose file:
-
-```sh filename="nano" copy
-nano ~/webb-relayer/docker-compose.yml
-```
-
-Add the following lines:
-
-```yaml filename="docker-compose.yml" copy
-version: "3"
-
-services:
- caddy:
- image: caddy:alpine
- container_name: caddy
- restart: unless-stopped
- env_file: .env
- ports:
- - "80:80"
- - "443:443"
- volumes:
- - $PWD/config/Caddyfile:/etc/caddy/Caddyfile
- - caddy_data:/data
- - caddy_config:/config
-
- webb_relayer:
- image: ghcr.io/webb-tools/relayer:$RELAYER_VERSION
- container_name: webb_relayer
- env_file: .env
- depends_on:
- - caddy
- ports:
- - "$WEBB_PORT:$WEBB_PORT"
- volumes:
- - $PWD/config:/config
- - relayer_data:/store
- restart: always
- command: /webb-relayer -vvv -c /config
-
-volumes:
- relayer_data:
- driver: local
- driver_opts:
- type: none
- o: bind
- device: $PWD/data
- caddy_data:
- driver: local
- driver_opts:
- type: none
- o: bind
- device: $PWD/secrets
- caddy_config:
-```
-
-### Caddy Reverse Proxy
-
-This guide makes use of [Caddy](https://caddyserver.com/) as a reverse proxy. Caddy is a powerful reverse proxy that
-makes it incredibly easy to setup with only a few lines. Let's take a look at the configuration:
-
-```sh filename="nano" copy
-nano ~/webb-relayer/config/Caddyfile
-```
-
-Add the following lines:
-
-```sh filename="Caddyfile" copy
-{
- # Remove the below line to disable debug logging, could be helpful
- # but noisy.
- #debug
- email {$LETSENCRYPT_EMAIL}
- # Here we are using the staging server to test our setup
- # but you can remove this line to use the production server
- # when you are ready to go live
- acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
- # If you are testing this locally, in your machine, or if you are
- # not using a domain name yet, you can turn off the Auto HTTPS feature
- # by uncommenting the line below
- #
- # auto_https off
-}
-
-relayer.{$DOMAIN} {
- reverse_proxy {
- to http://webb_relayer:{$WEBB_PORT}
- }
-}
-```
-
-We have now successfully setup the reverse proxy, and we are ready to run the relayer.
-
-## Running the Relayer
-
-Go to `~/webb-relayer` and then fire-up the following command:
-
-```sh filename="compose up" copy
-cd ~/webb-relayer
-# Then run docker
-docker compose up # add -d if you want to run it in the backgroud.
-```
-
-You should see a lot of logs, and if everything is working correctly, we should be able to query our relayers
-endpoint to view the configuration we outlined above.
-
-### Test the Setup
-
-Let's make sure we have successfully setup a data-querying relayer. To do so, we will query the relayer's endpoint:
-
-```sh filename="test"
-# Replace this with your domain name, that you added inside the .env file
-# if running locally, you should just go assume the DOMAIN is localhost:9955
-https://$DOMAIN/api/v1/info
-```
-
-If everything is working correctly, you should see a response similar to this:
-
-```sh filename="response"
-{
- evm: {
- "5": {
- name: "goerli",
- enabled: true,
- chainId: 5,
- beneficiary: null,
- contracts: [
- {
- contract: "VAnchor",
- address: "0x98c1024dfd61a8f7439108acedcb51a27754f2af",
- deployedAt: 7865473,
- eventsWatcher: {
- "enable-data-query": true,
- enabled: true,
- pollingInterval: 15000,
- },
- withdrawConfig: null,
- proposalSigningBackend: null,
- linkedAnchors: null,
- },
- ],
- },
- },
- substrate: {},
- cosmwasm: {},
- experimental: {
- "smart-anchor-updates": false,
- "smart-anchor-updates-retries": 0,
- },
- features: {
- "data-query": true,
- "governance-relay": false,
- "private-tx-relay": false,
- },
-}
-```
-
-To configure your relayer further, checkout our [Config directory](https://github.com/webb-tools/relayer/tree/main/config) in our GitHub.
-
-## Dapp Integration
-
-After completing the above steps, submit a PR with changes for your https endpoint in the [Webb Dapp](https://github.com/webb-tools/webb-dapp) repo.
diff --git a/pages/docs/ecosystem-roles/role-overview.mdx b/pages/docs/ecosystem-roles/role-overview.mdx
deleted file mode 100644
index 095436ed..00000000
--- a/pages/docs/ecosystem-roles/role-overview.mdx
+++ /dev/null
@@ -1,38 +0,0 @@
-# Roles and Functions
-
-The Webb ecosystem encompasses multiple systems, protocols and roles that enable enhanced security and privacy across our use cases. Infrastructure providers can mix and match different functions to participate in, with varying responsibilities and rewards.
-Some of these roles may participate in networks like Tangle Network, others may participate in zkApps, or both.
-
-First, let's look at the functions that infrastructure providers (roles) accomplish:
-
-#### 1. Validating
-
-Validators are nodes in the network tasked with the important role of ensuring that transactions and blocks adhere to the necessary rules and protocols of the network. They verify transaction signatures, prevent double-spending, and ensure correctly formatted new blocks.
-
-#### 2. Light Client Relaying
-
-Light Client Relayers bridge the gap between light clients and full nodes in a blockchain network. They enable light clients, which only download a fraction of the blockchain, to validate transactions and participate in the network without downloading the entire blockchain.
-
-#### 3. Anchor Update Relaying
-
-Anchor Update Relayers propose changes and relay these proposals within the Anchor system. They monitor changes, propose updates to the Distributed Key Generation (DKG) system based on new data insertions, and relay these proposals once signed, playing a vital role in the governance of the Anchor system.
-
-#### 4. Private Transaction Relaying
-
-Private Transaction Relayers assist users with privacy-preserving transactions within the Shielded Pools of the Webb Protocol. They submit proofs for private transactions on behalf of the users, ensuring the smooth execution of these transactions.
-
-### Recommended Roles and their Functions
-
-#### Validators
-
-A validator is a network participant responsible for verifying and validating new transactions and adding them to the blockchain.
-
-We recommend validators participate in the following:
-
-- Consensus Validation
-- Light Client Relaying
-- Governance Relaying
-
-#### Relayers
-
-In the Webb Protocol, a Relayer performs multiple critical functions, acting as an oracle, a data relayer, and a participant in protocol governance. While a Relayer node is set to perform all of these roles by default, you can choose to participate in one or several types of relaying shown above.
diff --git a/pages/docs/ecosystem-roles/validator/_meta.json b/pages/docs/ecosystem-roles/validator/_meta.json
deleted file mode 100644
index d0b606d8..00000000
--- a/pages/docs/ecosystem-roles/validator/_meta.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "quickstart": "Quickstart",
- "validation": "Validator Overview",
- "validator-rewards": "Validator Rewards",
- "required-keys": "Required Keys",
- "requirements": "Hardware Requirements",
- "deploy-with-docker": "Deploying with Docker",
- "systemd": "Running with Systemd",
- "monitoring": "Node Monitoring",
- "api-reference": "API Reference",
- "troubleshooting": "Troubleshooting"
-}
diff --git a/pages/docs/ecosystem-roles/validator/api-reference/_meta.json b/pages/docs/ecosystem-roles/validator/api-reference/_meta.json
deleted file mode 100644
index 47ee08e8..00000000
--- a/pages/docs/ecosystem-roles/validator/api-reference/_meta.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "cli": "CLI Options"
-}
diff --git a/pages/docs/ecosystem-roles/validator/api-reference/cli.mdx b/pages/docs/ecosystem-roles/validator/api-reference/cli.mdx
deleted file mode 100644
index 27bb4151..00000000
--- a/pages/docs/ecosystem-roles/validator/api-reference/cli.mdx
+++ /dev/null
@@ -1,409 +0,0 @@
----
-title: CLI Reference
-description: Explore Webb's command line interface.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-
-# Command-Line Reference
-
-When starting up your own Tangle node, there are some required and optional flags that can be used.
-
-This page outlines the most common flags used, for an exhaustive documentation of all available flags and options please refer to
-the official Substrate documentation [here](https://substrate.io/), as well as, a list of out of the box command line tools that
-ships with all Substrate based nodes including the Tangle node [here](https://docs.substrate.io/reference/command-line-tools/).
-
-After installing the [`tangle`](/repo/docs/getting-started/add-to-project) binary (or pull from Docker), you can view the Tangle command line interface (CLI). For a complete list of the available flags, you can spin up your Tangle node with `--help`
-added to the end of the command. The command will vary depending on how you choose to spin up your node, and if you're using Docker or Systemd.
-
-
-
-
- ```sh filename="help" copy
- docker run --platform linux/amd64 --network="host" -v "/var/lib/data" --entrypoint ./tangle-standalone \
- ghcr.io/webb-tools/tangle/tangle-standalone:main \
- --help
- ```
-
-
-
-
- ```sh filename="help" copy
- # If you used the release binary
- ./tangle-standalone --help
-
- # Or if you compiled the binary
- ./target/release/tangle-standalone --help
- ```
-
-
-
-
-If you have compiled the tangle-parachain binary its important to note that the command-line arguments
-provided first will be passed to the parachain node, while the arguments
-provided after `--` will be passed to the relay chain node.
-
-```sh filename="args" copy
-tangle-parachain --
-```
-
-USAGE:
-
-```sh filename="usage" copy
-tangle-parachain [OPTIONS] [-- ...]
-tangle-parachain
-```
-
-## Common Flags
-
-The below lists the most commonly used flags for your convienance.
-
-#### `--alice`
-
-Shortcut for `--name Alice --validator` with session keys for `Alice` added to keystore. Commonly
-used for development or local test networks.
-
-```sh filename="alice" copy
-tangle-standalone --alice
-```
-
-#### `--blocks-pruning `
-
-Specify the blocks pruning mode, a number of blocks to keep or 'archive'.
-
-Default is to keep all finalized blocks. otherwise, all blocks can be kept (i.e
-'archive'), or for all canonical blocks (i.e 'archive-canonical'), or for the last N
-blocks (i.e a number).
-
-NOTE: only finalized blocks are subject for removal!
-
-```sh filename="blocks-pruning" copy
-tangle-standalone --blocks-pruning 120
-```
-
-#### `--bob`
-
-Shortcut for `--name Bob --validator` with session keys for `Bob` added to keystore. Commonly
-used for development or local test networks.
-
-```sh filename="bob" copy
-tangle-standalone --bob
-```
-
-#### `--bootnodes`
-
-Specify a list of bootnodes.
-
-```sh filename="bootnodes" copy
-tangle-standalone --bootnodes /ip4/127.0.0.1/tcp/30333/p2p/12D3KooWAWueKNxuNwMbAtss3nDTQhMg4gG3XQBnWdQdu2DuEsZS
-```
-
-#### `--chain `
-
-Specify the chain specification.
-
-It can be one of the predefined ones (dev, local, or staging) or it can be a path to a
-file with the chainspec (such as one exported by the `build-spec` subcommand).
-
-```sh filename="local" copy
-tangle-standalone --chain standalone-local
-```
-
-#### `--charlie`
-
-Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to keystore. Commonly
-used for development or local test networks.
-
-```sh filename="charlie" copy
-tangle-standalone --charlie
-```
-
-#### `--collator`
-
-Run node as collator. (Not applicable at this time.)
-
-Note that this is the same as running with `--validator`.
-
-```sh filename="collator" copy
-tangle-standalone --collator
-```
-
-#### `-d, --base-path `
-
-Specify custom base path.
-
-```sh filename="base path" copy
-tangle-standalone --base-path /data
-```
-
-#### `--db-cache `
-
-Limit the memory the database cache can use
-
-```sh filename="db-cache" copy
-tangle-standalone --db-cache 128
-```
-
-#### `--detailed-log-output`
-
-Enable detailed log output.
-
-This includes displaying the log target, log level and thread name.
-
-This is automatically enabled when something is logged with any higher level than
-`info`.
-
-```sh filename="log-output" copy
-tangle-standalone --detailed-log-output
-```
-
-#### `--dev`
-
-Specify the development chain.
-
-This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, `--alice`, and
-`--tmp` flags, unless explicitly overridden.
-
-```sh filename="dev" copy
-tangle-standalone --dev
-```
-
-#### `--execution `
-
-The execution strategy that should be used by all execution contexts
-
-[possible values: native, wasm, both, native-else-wasm]
-
-`native` - only execute with the native build
-`wasm` - only execute with the Wasm build
-`both` - execute with both native and Wasm builds
-`nativeelsewasm` - execute with the native build if possible and if it fails, then execute with Wasm
-
-```sh filename="wasm" copy
-tangle-standalone --execution wasm
-```
-
-#### `--force-authoring`
-
-Enable authoring even when offline
-
-```sh filename="authoring" copy
-tangle-parachain --force-authoring
-```
-
-#### `--keystore-path `
-
-Specify custom keystore path
-
-```sh filename="keystore path" copy
-tangle-standalone --keystore-path /tmp/chain/data/
-```
-
-#### `--keystore-uri `
-
-Specify custom URIs to connect to for keystore-services
-
-```sh filename="keystore url" copy
-tangle-standalone --keystore-uri foo://example.com:8042/over/
-```
-
-#### `--name `
-
-The human-readable name for this node.
-
-The node name will be reported to the telemetry server, if enabled.
-
-```sh filename="name" copy
-tangle-standalone --name zeus
-```
-
-#### `--node-key `
-
-The secret key to use for libp2p networking.
-
-The value is a string that is parsed according to the choice of `--node-key-type` as
-follows:
-
-`ed25519`: The value is parsed as a hex-encoded Ed25519 32 byte secret key, i.e. 64 hex
-characters.
-
-The value of this option takes precedence over `--node-key-file`.
-
-WARNING: Secrets provided as command-line arguments are easily exposed. Use of this
-option should be limited to development and testing. To use an externally managed secret
-key, use `--node-key-file` instead.
-
-```sh filename="node-key" copy
-tangle-standalone --node-key b6806626f5e4490c27a4ccffed4fed513539b6a455b14b32f58878cf7c5c4e68
-```
-
-#### `--node-key-file `
-
-The file from which to read the node's secret key to use for libp2p networking.
-
-The contents of the file are parsed according to the choice of `--node-key-type` as
-follows:
-
-`ed25519`: The file must contain an unencoded 32 byte or hex encoded Ed25519 secret key.
-
-If the file does not exist, it is created with a newly generated secret key of the
-chosen type.
-
-```sh filename="node-key-file" copy
-tangle-standalone --node-key-file ./node-keys-file/
-```
-
-#### `--port `
-
-Specify p2p protocol TCP port
-
-```sh filename="port" copy
-tangle-standalone --port 9944
-```
-
-#### `--prometheus-external`
-
-Expose Prometheus exporter on all interfaces.
-
-Default is local.
-
-```sh filename="prometheus" copy
-tangle-standalone --prometheus-external
-```
-
-#### `--prometheus-port `
-
-Specify Prometheus exporter TCP Port
-
-```sh filename="prometheus-port" copy
-tangle-standalone --prometheus-port 9090
-```
-
-#### `--rpc-cors `
-
-Specify browser Origins allowed to access the HTTP & WS RPC servers.
-
-A comma-separated list of origins (protocol://domain or special `null` value). Value of
-`all` will disable origin validation. Default is to allow localhost and
-https://polkadot.js.org origins. When running in --dev mode the default is to allow all origins.
-
-```sh filename="rpc-cors" copy
-tangle-standalone --rpc-cors "*"
-```
-
-#### `--rpc-external`
-
-Listen to all RPC interfaces.
-
-Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC
-proxy server to filter out dangerous methods. More details:
-https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs. Use
-`--unsafe-rpc-external` to suppress the warning if you understand the risks.
-
-```sh filename="rpc-external" copy
-tangle-standalone --rpc-external
-```
-
-#### `--rpc-port `
-
-Specify HTTP RPC server TCP port
-
-```sh filename="rpc-port" copy
-tangle-standalone --rpc-port 9933
-```
-
-#### `--state-pruning `
-
-Specify the state pruning mode, a number of blocks to keep or 'archive'.
-
-Default is to keep only the last 256 blocks, otherwise, the state can be kept for all of
-the blocks (i.e 'archive'), or for all of the canonical blocks (i.e
-'archive-canonical').
-
-```sh filename="state-pruning" copy
-tangle-standalone --state-pruning 128
-```
-
-#### `--telemetry-url `
-
-The URL of the telemetry server to connect to.
-
-This flag can be passed multiple times as a means to specify multiple telemetry
-endpoints. Verbosity levels range from 0-9, with 0 denoting the least verbosity.
-Expected format is 'URL VERBOSITY'.
-
-```sh filename="wss" copy
-tangle-standalone --telemetry-url 'wss://foo/bar 0'
-```
-
-#### `--validator`
-
-Enable validator mode.
-
-The node will be started with the authority role and actively participate in any
-consensus task that it can (e.g. depending on availability of local keys).
-
-```sh filename="validator" copy
-tangle-standalone --validator
-```
-
-#### `--wasm-execution `
-
-Method for executing Wasm runtime code
-
-[default: compiled]
-[possible values: interpreted-i-know-what-i-do, compiled]
-
-`compiled` - this is the default and uses the Wasmtime compiled runtime
-`interpreted-i-know-what-i-do` - uses the wasmi interpreter
-
-```sh filename="wasm-execution" copy
-tangle-standalone --wasm-execution compiled
-```
-
-#### `--ws-external`
-
-Listen to all Websocket interfaces.
-
-Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC
-proxy server to filter out dangerous methods. More details:
-https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs. Use
-`--unsafe-ws-external` to suppress the warning if you understand the risks.
-
-```sh filename="ws-external" copy
-tangle-standalone --ws-external
-```
-
-#### `--ws-port `
-
-Specify WebSockets RPC server TCP port
-
-```sh filename="ws-port" copy
-tangle-standalone --ws-port 9944
-```
-
-## Subcommands
-
-The following subcommands are available:
-
-USAGE:
-
-```sh filename="subcommand" copy
-tangle-standalone
-```
-
-| Subcommand | Description |
-| -------------------- | --------------------------------------------------------------------------------------------------- |
-| benchmark | Sub-commands concerned with benchmarking. The pallet benchmarking moved to the `pallet` sub-command |
-| build-spec | Build a chain specification |
-| check-block | Validate blocks |
-| export-blocks | Export blocks |
-| export-genesis-state | Export the genesis state of the standalone node |
-| export-genesis-wasm | Export the genesis wasm of the standalone node |
-| export-state | Export the state of a given block into a chain spec |
-| help | Print this message or the help of the given subcommand(s) |
-| import-blocks | Import blocks |
-| key | Key management cli utilities |
-| purge-chain | Remove the whole chain |
-| revert | Revert the chain to a previous state |
-| try-runtime | Try some testing command against a specified runtime state |
diff --git a/pages/docs/ecosystem-roles/validator/deploy-with-docker/_meta.json b/pages/docs/ecosystem-roles/validator/deploy-with-docker/_meta.json
deleted file mode 100644
index 1f5e039a..00000000
--- a/pages/docs/ecosystem-roles/validator/deploy-with-docker/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "full-node": "Full Node",
- "validator-node": "Validator Node",
- "relayer-node": "Relayer Node"
-}
diff --git a/pages/docs/ecosystem-roles/validator/deploy-with-docker/full-node.mdx b/pages/docs/ecosystem-roles/validator/deploy-with-docker/full-node.mdx
deleted file mode 100644
index 8efdb7d4..00000000
--- a/pages/docs/ecosystem-roles/validator/deploy-with-docker/full-node.mdx
+++ /dev/null
@@ -1,119 +0,0 @@
----
-title: Deploying with Docker
-description: Deploy a Tangle node with only a few steps.
----
-
-import Callout from "../../../../../components/Callout";
-
-# Deploying with Docker
-
-An Tangle node can be spun up quickly using Docker. For more information on installing Docker,
-please visit the official Docker [docs](https://docs.docker.com/get-docker/). When connecting to Tangle on Kusama, it will take a few days to completely
-sync the embedded relay chain. Make sure that your system meets the requirements which can read [at Hardware](/docs/tangle-network/node/validator/hardware).
-
-## Using Docker
-
-The quickest and easiest way to get started is to make use of our published Docker Tangle image. In doing so, users simply pull down the image from ghcr.io,
-set their keys, fetch the applicable chainspec and run the start command to get up and running.
-
-### **1. Pull the Tangle Docker image:**
-
-```sh filename="pull" copy
-# Only use "main" if you know what you are doing, it will use the latest and maybe unstable version of the node.
-
-docker pull ghcr.io/webb-tools/tangle/tangle-standalone:main
-```
-
-### **2. Create a local directory to store the chain data:**
-
-Let us create a directory where we will store all the data for our node. This includes the chain data, and logs.
-
-```sh filename="mkdir" copy
-mkdir /var/lib/tangle/
-```
-
-### **3. Fetch applicable chainspec(s):**
-
-To join the Tangle Test network, we need to fetch the appropriate chainspec for the Tangle network.
-Download the latest chainspec for standalone testnet:
-
-```sh filename="get chainspec" copy
-# Fetches chainspec for Tangle network
-wget https://raw.githubusercontent.com/webb-tools/tangle/main/chainspecs/testnet/tangle-standalone.json
-```
-
-Please make a reference where you have stored this `json` file as we will need it in the next steps.
-
-**Note:** Full nodes do not participate in block production or consensus so no required keys are necessary.
-
-**4. Start Tangle full node:**
-
-To start the node run the following command:
-
-```sh filename="docker run" copy
-docker run --rm -it -v /var/lib/tangle/:/data ghcr.io/webb-tools/tangle/tangle-standalone:main \
- --chain tangle-testnet \
- --name="YOUR-NODE-NAME" \
- --base-path /data \
- --rpc-cors all \
- --port 9946 \
- --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-```
-
-
- For an overview of the above flags, please refer to the [CLI Usage](/docs/ecosystem-roles/validator/api-reference/cli/) page of our documentation.
-
-
-Once Docker pulls the necessary images, your Tangle node will start, displaying lots of information,
-such as the chain specification, node name, role, genesis state, and more.
-
-If you followed the installation instructions for Tangle, once synced, you will be connected to peers and see
-blocks being produced on the Tangle network! Note that in this case you need to also sync to the Polkadot/Kusama
-relay chain, which might take a few days.
-
-### Update the Client
-
-As Tangle development continues, it will sometimes be necessary to upgrade your node software. Node operators will be notified
-on our Discord channel when upgrades are available and whether they are necessary (some client upgrades are optional).
-The upgrade process is straightforward and is the same for a full node.
-
-1. Stop the docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-2. Get the latest version of Tangle from the Tangle GitHub Release [page](https://github.com/webb-tools/tangle/pkgs/container/tangle%2Ftangle-standalone)
-
-3. Pull the latest version of Tangle binary by doing `docker pull ghcr.io/webb-tools/tangle/tangle-standalone:{VERSION_CODE}`.
- Example, if the latest version of Tangle is v0.1.2, then the command would be `docker pull ghcr.io/webb-tools/tangle/tangle-standalone:v0.1.12`
-
-4. Restart the tangle container and you should have the updated version of the client.
-
-### Purge Your Node
-
-If you need a fresh instance of your Tangle node, you can purge your node by removing the associated data directory.
-
-You'll first need to stop the Docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-If you did not use the `-v` flag to specify a local directory for storing your chain data when you spun up your node, then the data folder is related to the Docker container itself. Therefore, removing the Docker container will remove the chain data.
-
-If you did spin up your node with the `-v` flag, you will need to purge the specified directory. For example, for the suggested data directly, you can run the following command to purge your parachain node data:
-
-```sh filename="rm" copy
-# purges standalone data
-sudo rm -rf /data/chains/*
-```
-
-If you ran with parachain node you can run the following command to purge your relay-chain node data:
-
-```sh filename="rm" copy
-# purges relay chain data
-sudo rm -rf /data/polkadot/*
-```
-
-Now that your chain data has been purged, you can start a new node with a fresh data directory!
diff --git a/pages/docs/ecosystem-roles/validator/deploy-with-docker/relayer-node.mdx b/pages/docs/ecosystem-roles/validator/deploy-with-docker/relayer-node.mdx
deleted file mode 100644
index 037f90ca..00000000
--- a/pages/docs/ecosystem-roles/validator/deploy-with-docker/relayer-node.mdx
+++ /dev/null
@@ -1,268 +0,0 @@
----
-title: Deploying with Docker
-description: An overview of Webb Tangle node and Webb Relayer deployment process.
----
-
-import Callout from "../../../../../components/Callout";
-
-# Deploying Tangle Validator and Relayer
-
-It is likely that network participants that are running a Tangle validator node may also want to run a relayer node. This guide
-will walk you through the process of deploying a Tangle validator and a Webb Relayer. By the end of this document, you will have set up a Webb Relayer
-at a publicly accessible endpoint alongside a Tangle validator node both of which will be running within a Docker container.
-
-## Prerequisites
-
-It is a requirement to have Docker installed on the Linux machine, for instructions on how to install Docker on the machine
-please visit the offical Docker installation documentation [here](https://docs.docker.com/desktop/install/linux-install/).
-
-When connecting to Tangle on Kusama, it will take a few days to completely
-sync the embedded relay chain. Make sure that your system meets the requirements which can read [on the Hardware page.](/docs/tangle-network/node/validator/hardware).
-
-## Using Docker Compose
-
-The quickest and easiest way to get started is to make use of our published Docker Tangle image. In doing so, users simply
-create a local directory to store the chain data, download the latest chainspec for standalone testnet, set their keys, and run the start
-command to get up and running.
-
-### **1. Pull the Tangle Docker image:**
-
-We will used the pre-built Tangle Docker image to generate and insert the required keys for our node.
-
-```sh filename="pull" copy
-# Only use "main" if you know what you are doing, it will use the latest and maybe unstable version of the node.
-
-docker pull ghcr.io/webb-tools/tangle/tangle-standalone:main
-```
-
-### **2. Create a local directory to store the chain data:**
-
-Let us create a directory where we will store all the data for our node. This includes the chain data, keys, and logs.
-
-```sh filename="mkdir" copy
-mkdir /var/lib/tangle/
-```
-
-### **3. Generate and store keys:**
-
-We need to generate the required keys for our node. For more information on these keys, please see the [Required Keys]() section.
-The keys we need to generate include the following:
-
-- DKG key (Ecdsa)
-- Aura key (Sr25519)
-- Account key (Sr25519)
-- Grandpa key (Ed25519)
-
-Let's now insert our required secret keys, we will not pass the SURI in the command, instead it will be interactive, where you
-should paste your SURI when the command asks for it.
-
-**Account Keys**
-
-```sh filename="Acco" copy
-# it will ask for your suri, enter it.
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type acco
-```
-
-**Aura Keys**
-
-```sh filename="Aura" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type aura
-```
-
-**Im-online Keys** - **these keys are optional**
-
-```sh filename="Imonline" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type imon
-```
-
-**DKG Keys**
-
-```sh filename="DKG" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- tangle-standalone key insert --base-path /data \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Ecdsa \
- --key-type wdkg
-```
-
-**Grandpa Keys**
-
-```sh filename="Grandpa" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- tangle-standalone key insert --base-path /data \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Ed25519 \
- --key-type gran
-```
-
-To ensure you have successfully generated the keys correctly run:
-
-```sh filename="ls" copy
-ls ~/webb/tangle/chains/*/keystore
-# You should see a some file(s) there, these are the keys.
-```
-
-### **4.Creating Docker compose file:**
-
-Now that we have generated the keys, we can start the Tangle Validator and Relayer. We will use the `docker-compose` file provided
-in the [Tangle repo](/docs/ecosystem-roles/validator/deploy-with-docker/relayer-node/).
-
-Let's start by creating a docker-compose file:
-
-```sh filename="nano" copy
-nano ~/webb/tangle/docker-compose.yml
-```
-
-Add the following lines:
-
-```yaml filename="docker-compose.yml" copy
-# This an example of a docker compose file which contains both Relayer and Tangle Node.
-version: "3"
-
-services:
- webb_relayer:
- # Here you should checkout
- # https://github.com/webb-tools/relayer/pkgs/container/relayer/versions?filters%5Bversion_type%5D=tagged
- # For the latest stable version. Only use "edge" if
- # you know what you are doing, it will use the latest and maybe
- # unstable version of the relayer.
- image: ghcr.io/webb-tools/relayer:${RELAYER_RELEASE_VERSION}
- container_name: webb_relayer
- env_file: .env
- depends_on:
- - caddy
- ports:
- - "$WEBB_PORT:$WEBB_PORT"
- volumes:
- - $PWD/config:/config
- - relayer_data:/store
- restart: always
- command: /webb-relayer -vvv -c /config
-
- tangle_standalone:
- # Here you should checkout
- # https://github.com/webb-tools/tangle/pkgs/container/tangle-standalone/versions?filters%5Bversion_type%5D=tagged
- # For the latest stable version. Only use "main" if
- # you know what you are doing, it will use the latest and maybe
- # unstable version of the node.
- image: ghcr.io/webb-tools/tangle/tangle-standalone:${TANGLE_RELEASE_VERSION}
- container_name: tangle_standalone
- env_file: .env
- ports:
- - "30333:30333"
- - "9933:9933"
- - "9944:9944"
- - "9615:9615"
- volumes:
- - tangle_data:/data
- restart: always
- entrypoint: /tangle-standalone
- command:
- [
- "--base-path=/data",
- "--validator",
- "--chain=/data/chainspecs/tangle-standalone.json",
- "--",
- "--execution=wasm",
- ]
-
-volumes:
- relayer_data:
- driver: local
- driver_opts:
- type: none
- o: bind
- device: $PWD/relayer/data
- tangle_data:
- driver: local
- driver_opts:
- type: none
- o: bind
- device: $PWD/tangle/
-```
-
-### **5. Set environment variables:**
-
-Prior to spinning up the Docker containers, we need to set some environment variables. Below displays an example `.env` file
-but you will need to update to reflect your own environment.
-
-```sh filename="export variables" copy
-export TANGLE_RELEASE_VERSION=main
-export RELAYER_RELEASE_VERSION=0.5.0-rc1
-export BASE_PATH=/tmp/data/
-export CHAINSPEC_PATH=/tmp/chainspec
-export WEBB_PORT=9955
-```
-
-### **5. Start Relayer and Validator node:**
-
-With our keys generated and our docker-compose file created, we can now start the relayer and validator node.
-
-```sh filename="compose up" copy
-docker compose up -d
-```
-
-The `docker-compose` file will spin up a container running Tangle validator node and another running a Webb Relayer.
-
-## Update the Client
-
-As Tangle development continues, it will sometimes be necessary to upgrade your node software. Node operators will be notified
-on our Discord channel when upgrades are available and whether they are necessary (some client upgrades are optional).
-The upgrade process is straightforward and is the same for a full node or validator.
-
-1. Stop the docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-2. Get the latest version of Tangle from the Tangle GitHub Release page
-
-3. Use the latest version to spin up your node. To do so, replace the version in the Full Node or validator command with the latest and run it
-
-Once your node is running again, you should see logs in your terminal.
-
-## Purge Your Node
-
-If you need a fresh instance of your Tangle node, you can purge your node by removing the associated data directory.
-
-You'll first need to stop the Docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-If you did not use the `-v` flag to specify a local directory for storing your chain data when you spun up your node, then the data folder is related to the Docker container itself. Therefore, removing the Docker container will remove the chain data.
-
-If you did spin up your node with the `-v` flag, you will need to purge the specified directory. For example, for the suggested data directly, you can run the following command to purge your parachain node data:
-
-```sh filename="rm" copy
-# purges standalone data
-sudo rm -rf /data/chains/*
-```
-
-If you ran with parachain node you can run the following command to purge your relay-chain node data:
-
-```sh filename="rm" copy
-# purges relay chain data
-sudo rm -rf /data/polkadot/*
-```
-
-Now that your chain data has been purged, you can start a new node with a fresh data directory!
diff --git a/pages/docs/ecosystem-roles/validator/deploy-with-docker/validator-node.mdx b/pages/docs/ecosystem-roles/validator/deploy-with-docker/validator-node.mdx
deleted file mode 100644
index 6961a91f..00000000
--- a/pages/docs/ecosystem-roles/validator/deploy-with-docker/validator-node.mdx
+++ /dev/null
@@ -1,239 +0,0 @@
----
-title: Deploying with Docker
-description: Deploy a Tangle validator node with only a few steps.
----
-
-import Callout from "../../../../../components/Callout";
-
-# Deploying with Docker
-
-A Tangle node can be spun up quickly using Docker. For more information on installing Docker,
-please visit the official Docker [docs](https://docs.docker.com/get-docker/). When connecting to Tangle on Kusama, it will take a few days to completely sync the embedded relay chain. Make sure that your system meets the requirements which can read [here](/docs/tangle-network/node-operators/requirements).
-
-## Standalone Testnet
-
-### **1. Pull the Tangle Docker image:**
-
-Although we can make use of the provided `docker-compose` file in the [Tangle repo](https://github.com/webb-tools/tangle/tree/main/docker/tangle-standalone), we pull the `tangle-standalone:main` Docker image from ghcr.io
-so that we can generate and insert our required keys before starting the node.
-
-```sh filename="pull" copy
-# Only use "main" if you know what you are doing, it will use the latest and maybe unstable version of the node.
-
-docker pull ghcr.io/webb-tools/tangle/tangle-standalone:main
-```
-
-### **2. Create a local directory to store the chain data:**
-
-Let us create a directory where we will store all the data for our node. This includes the chain data, keys, and logs.
-
-```sh filename="mkdir" copy
-mkdir /var/lib/tangle/
-```
-
-### **3. Fetch applicable chainspec(s):**
-
-To join the Tangle Test network as node operator we need to fetch the appropriate chainspec for the Tangle network.
-Download the latest chainspec for standalone testnet:
-
-```sh filename="get chainspec" copy
-# Fetches chainspec for Tangle network
-wget https://raw.githubusercontent.com/webb-tools/tangle/main/chainspecs/testnet/tangle-standalone.json
-```
-
-Please make a reference where you have stored this `json` file as we will need it in the next steps.
-
-### **4. Generate and store keys:**
-
-We need to generate the required keys for our node.
-The keys we need to generate include the following:
-
-- DKG key (Ecdsa)
-- Aura key (Sr25519)
-- Account key (Sr25519)
-- Grandpa key (Ed25519)
-- ImOnline key (Sr25519)
-
-Let's now insert our required secret keys, we will not pass the SURI in the command, instead it will be interactive, where you
-should paste your SURI when the command asks for it.
-
-**Account Keys**
-
-```sh filename="Acco" copy
-# it will ask for your suri, enter it.
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type acco
-```
-
-**Aura Keys**
-
-```sh filename="Aura" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type aura
-```
-
-**Im-online Keys** - **these keys are optional (required if you are running as a validator)**
-
-```sh filename="Imonline" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- key insert --base-path /var/lib/tangle/ \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Sr25519 \
- --key-type imon
-```
-
-**DKG Keys**
-
-```sh filename="DKG" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- tangle-standalone key insert --base-path /data \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Ecdsa \
- --key-type wdkg
-```
-
-**Grandpa Keys**
-
-```sh filename="Grandpa" copy
-docker run --rm -it --platform linux/amd64 --network="host" -v "/var/lib/data" \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
- tangle-standalone key insert --base-path /data \
- --chain /data/chainspecs/tangle-standalone.json \
- --scheme Ed25519 \
- --key-type gran
-```
-
-To ensure you have successfully generated the keys correctly run:
-
-```sh filename="ls" copy
-ls ~/webb/tangle/chains/*/keystore
-# You should see a some file(s) there, these are the keys.
-```
-
-**Caution:** Ensure you insert the keys using the instructions at [generate keys](#generate-and-store-keys),
-if you want the node to auto generate the keys, add the `--auto-insert-keys` flag.
-
-### **5. Start Tangle Validator node:**
-
-To start the node run the following command:
-
-```sh filename="docker run" copy
-docker run --platform linux/amd64 --network="host" -v "/var/lib/data" --entrypoint ./tangle-standalone \
-ghcr.io/webb-tools/tangle/tangle-standalone:main \
---base-path=/data \
---chain tangle-testnet \
---name="YOUR-NODE-NAME" \
---execution wasm \
---wasm-execution compiled \
---trie-cache-size 0 \
---validator \
---telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-```
-
-
- For an overview of the above flags, please refer to the [CLI Usage](/docs/ecosystem-roles/validator/api-reference/cli/) page of our documentation.
-
-
-Once Docker pulls the necessary images, your Tangle node will start, displaying lots of information,
-such as the chain specification, node name, role, genesis state, and more.
-
-If you followed the installation instructions for Tangle, once synced, you will be connected to peers and see
-blocks being produced on the Tangle network!
-
-```sh filename="logs"
-2023-03-22 14:55:51 Tangle Standalone Node
-2023-03-22 14:55:51 ✌️ version 0.1.15-54624e3-aarch64-macos
-2023-03-22 14:55:51 ❤️ by Webb Technologies Inc., 2017-2023
-2023-03-22 14:55:51 📋 Chain specification: Tangle Testnet
-2023-03-22 14:55:51 🏷 Node name: cooing-morning-2891
-2023-03-22 14:55:51 👤 Role: FULL
-2023-03-22 14:55:51 💾 Database: RocksDb at /Users/local/Library/Application Support/tangle-standalone/chains/local_testnet/db/full
-2023-03-22 14:55:51 ⛓ Native runtime: tangle-standalone-115 (tangle-standalone-1.tx1.au1)
-2023-03-22 14:55:51 Bn254 x5 w3 params
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 new validator set of size 5 has been processed for era 1
-2023-03-22 14:55:52 🔨 Initializing Genesis block/state (state: 0xfd16…aefd, header-hash: 0x7c05…a27d)
-2023-03-22 14:55:52 👴 Loading GRANDPA authority set from genesis on what appears to be first startup.
-2023-03-22 14:55:53 Using default protocol ID "sup" because none is configured in the chain specs
-2023-03-22 14:55:53 🏷 Local node identity is: 12D3KooWDaeXbqokqvEMqpJsKBvjt9BUz41uP9tzRkYuky1Wat7Z
-2023-03-22 14:55:53 💻 Operating system: macos
-2023-03-22 14:55:53 💻 CPU architecture: aarch64
-2023-03-22 14:55:53 📦 Highest known block at #0
-2023-03-22 14:55:53 〽️ Prometheus exporter started at 127.0.0.1:9615
-2023-03-22 14:55:53 Running JSON-RPC HTTP server: addr=127.0.0.1:9933, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 Running JSON-RPC WS server: addr=127.0.0.1:9944, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.0.125/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.0.125/tcp/30305
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.88.12/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.88.12/tcp/30305
-```
-
-### Run via Docker Compose
-
-The docker-compose file will spin up a container running Tangle standalone node, but you have to set the following environment variables. Remember to customize your the values depending on your environment and then copy paste this to CLI.
-
-```sh filename="set variables" copy
-RELEASE_VERSION=main
-CHAINSPEC_PATH=/tmp/chainspec/
-```
-
-After that run:
-
-```sh filename="compose up" copy
-docker compose up -d
-```
-
-## Update the Client
-
-As Tangle development continues, it will sometimes be necessary to upgrade your node software. Node operators will be notified
-on our Discord channel when upgrades are available and whether they are necessary (some client upgrades are optional).
-The upgrade process is straightforward and is the same for a full node.
-
-1. Stop the docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-2. Get the latest version of Tangle from the Tangle GitHub Release [page](https://github.com/webb-tools/tangle/pkgs/container/tangle%2Ftangle-standalone)
-
-3. Pull the latest version of Tangle binary by doing `docker pull ghcr.io/webb-tools/tangle/tangle-standalone:{VERSION_CODE}`.
- Example, if the latest version of Tangle is v0.1.2, then the command would be `docker pull ghcr.io/webb-tools/tangle/tangle-standalone:v0.1.12`
-
-4. Restart the tangle container and you should have the updated version of the client.
-
-Once your node is running again, you should see logs in your terminal.
-
-## Purge Your Node
-
-If you need a fresh instance of your Tangle node, you can purge your node by removing the associated data directory.
-
-You'll first need to stop the Docker container:
-
-```sh filename="docker stop" copy
-sudo docker stop `CONTAINER_ID`
-```
-
-If you did not use the `-v` flag to specify a local directory for storing your chain data when you spun up your node, then the data folder is related to the Docker container itself. Therefore, removing the Docker container will remove the chain data.
-
-If you did spin up your node with the `-v` flag, you will need to purge the specified directory. For example, for the suggested data directly, you can run the following command to purge your standalone node data:
-
-```sh filename="rm" copy
-# purges standalone data
-sudo rm -rf /data/chains/*
-```
-
-Now that your chain data has been purged, you can start a new node with a fresh data directory!
diff --git a/pages/docs/ecosystem-roles/validator/monitoring/quickstart.mdx b/pages/docs/ecosystem-roles/validator/monitoring/quickstart.mdx
deleted file mode 100644
index a39eae5b..00000000
--- a/pages/docs/ecosystem-roles/validator/monitoring/quickstart.mdx
+++ /dev/null
@@ -1,59 +0,0 @@
----
-title: Quickstart
-description: Creating monitoring stack for Tangle node.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Monitoring Tangle Node
-
-The following is a guide outlining the steps to setup monitoring for an Tangle node. If you do not have Tangle node setup yet, please
-review the **How to run an Tangle node** setup guide [here](https://docs.webb.tools/v1/node-operators/run-tangle-node). It is important to note that
-this guide's purpose is to help you get started with monitoring your Tangle node, not to advise on how to setup a node securely. Please
-take additional security and privacy measures into consideration.
-
-Here is how our final configuration will look like at the end of this guide.
-
-- **Prometheus** is the central module; it pulls metrics from different sources to provide them to the Grafana dashboard and Alert Manager.
-- **Grafana** is the visual dashboard tool that we access from the outside (through SSH tunnel to keep the node secure).
-- **Alert Manager** listens to Prometheus metrics and pushes an alert as soon as a threshold is crossed (CPU % usage for example).
-- **Tangle Node** natively provides metrics for monitoring.
-- **Process exporter** provides processes metrics for the dashboard (optional).
-- **Loki** provides log aggregation system and metrics.
-- **Promtail** is the agent responsible for gathering logs, and sending them to Loki.
-
-
- Running the monitoring stack requires that you are already running the tangle network node with at least the following ports exports:
- - Prometheus : `https://localhost:9615`
-
-
-## Docker usage
-
-The quickest way to setup monitoring for your node is to use our provided `docker-compose` file. The docker image starts all the above monitoring
-tools with the exception of `Node exporter`. `node-exporter` is ommitted since some metrics are not available when running inside a docker container.
-
-Follow the instructions [here](/prometheus) to start the prometheus node exporter.
-
-### Prerequisites
-
-Before starting the monitoring stack, ensure the configs are setup correctly,
-
-- (Optional) Set the `__SLACK_WEBHOOK_URL__` in `alertmanager.yml` to receive slack alerts
-- Ensure the promtail mount path matches your log directory
-
-Note : All containers require connection to the localhost, this behaviour is different in Linux/Windows/Mac, the configs within the `docker-compose` and yml
-files assume a linux environment. Refer [this](https://stackoverflow.com/questions/24319662/from-inside-of-a-docker-container-how-do-i-connect-to-the-localhost-of-the-mach) to make necessary adjustments for your environment.
-
-### Usage
-
-**To start the monitoring stack, run:**
-
-```sh filename="compose up" copy
-cd monitoring
-docker compose up -d
-```
-
-You can then navigate to `http://localhost:3000` to access the Grafana dashboard!
-
-![Tangle Dashboard](../../../../../components/images/tangle-metrics.png)
diff --git a/pages/docs/ecosystem-roles/validator/quickstart.mdx b/pages/docs/ecosystem-roles/validator/quickstart.mdx
deleted file mode 100644
index 0fc80041..00000000
--- a/pages/docs/ecosystem-roles/validator/quickstart.mdx
+++ /dev/null
@@ -1,43 +0,0 @@
----
-title: Node Operator Quickstart
-description: Participate in the Webb ecosystem by deploying a Tangle node, to validate, serve data or more.
----
-
-import { QuickDeployArea, DeployArea, SupportArea, MonitoringArea } from "../../../../components/TangleQuickstart"
-import { RepoArea } from "../../../../components/RepoArea";
-import FullWebbCTA from "../../../../components/FullWebbCTA";
-
-# Node Operator Quickstart
-
-Becoming a node operator on the Tangle Network requires some technical skills, trust, and support from the community. Below
-is a collection of quick links for quick setups!
-
-**If you're looking to understand how to become a Validator in Substrate systems like Tangle, see the [Polkadot Docs](https://wiki.polkadot.network/docs/maintain-guides-how-to-validate-polkadot) as well.**
-
-## Quick Setup
-
-
-
-## Advanced Setup
-
-
-
-## Monitoring
-
-Monitoring and troubleshooting your Tangle node is essential, and we provide setup instructions to make it incredibly easy to get started!
-
-
-
-## Support Channels
-
-Run into weird issues? Or have questions about the Tangle Network? Join the Webb community and become connected to the entire Webb ecosystem.
-
-
-
-## Repositories
-
-Interested in what we are building at Webb? Clone the below repositories, and start contributing to a private cross-chain future!
-
-
-
-
diff --git a/pages/docs/ecosystem-roles/validator/required-keys.mdx b/pages/docs/ecosystem-roles/validator/required-keys.mdx
deleted file mode 100644
index 8abb1604..00000000
--- a/pages/docs/ecosystem-roles/validator/required-keys.mdx
+++ /dev/null
@@ -1,141 +0,0 @@
----
-title: Required Keys
-description: Describes the keys necessary to start and run a Tangle node.
----
-
-import Callout from "../../../../components/Callout";
-
-
- This guide assumes you have a validator already running, refer [Running With Docker](./deploy-with-docker/validator-node.mdx) or [Running with systemd](./systemd/validator-node.mdx) to ensure your node is setup correctly
-
-
-# Required Keys
-
-In order to participate in the distributed key generation protocol, block production, and block finalization, you will be required to set up a few keys. These keys
-include:
-
-- DKG key (Ecdsa)
-- Aura key (Sr25519)
-- Account key (Sr25519)
-- Grandpa key (Ed25519)
-- ImOnline key (Sr25519)
-
-To generate each of the above keys we will make use of [subkey](https://docs.substrate.io/reference/command-line-tools/subkey/). You will need to install
-subkey before running the command.
-
-
- Keep in mind the below commands are using `/tangle-data` base-path, please specify your preferred base-path during execution.
-
-
-**Once installed, to generate the DKG key you can run the following:**
-
-```sh filename="DKG Key" copy
-tangle-standalone key insert --base-path /tangle-data \
---chain "" \
---scheme Ecdsa \
---suri "<12-PHRASE-MNEMONIC>" \
---key-type wdkg
-```
-
-**To generate the Aura key you can run the following:**
-
-```sh filename="Aura Key" copy
-tangle-standalone key insert --base-path /tangle-data \
---chain "" \
---scheme Sr25519 \
---suri "<12-PHRASE-MNEMONIC>" \
---key-type aura
-```
-
-**To generate the Account key you can run the following:**
-
-```sh filename="Account Key" copy
-tangle-standalone key insert --base-path /tangle-data \
---chain "" \
---scheme Sr25519 \
---suri "<12-PHRASE-MNEMONIC>" \
---key-type acco
-```
-
-**To generate the Imonline key you can run the following:**
-
-```sh filename="Imonline Key" copy
-tangle-standalone key insert --base-path /tangle-data \
---chain "" \
---scheme Sr25519 \
---suri "<12-PHRASE-MNEMONIC>" \
---key-type imon
-```
-
-### Synchronize Chain Data
-
-You can begin syncing your node by running the following command:
-
-```sh filename="Syncing node" copy
-./target/release/tangle-parachain
-```
-
-Once your node has fully syncronized with the Relay Chain you may proceed to setup the
-necessary accounts to operate a node.
-
-## Bond funds
-
-To start collating, you need to have x TNT tokens for Tangle Network. It is highly recommended that you make your controller
-and stash accounts be two separate accounts. For this, you will create two accounts and make sure each of them have at least
-enough funds to pay the fees for making transactions. Keep most of your funds in the stash account since it is meant to be
-the custodian of your staking funds.
-
-Make sure not to bond all your TNT balance since you will be unable to pay transaction fees from your bonded balance.
-
-It is now time to set up our validator. We will do the following:
-
-- Bond the TNT of the Stash account. These TNT tokens will be put at stake for the security of the network and can be slashed.
-- Select the Controller. This is the account that will decide when to start or stop validating.
-
-First, go to the Staking section. Click on "Account Actions", and then the "+ Stash" button. It should look something
-similar to the below image.
-
-![bond](../../../../components/images/bond.png)
-
-Once everything is filled in properly, click Bond and sign the transaction with your Stash account.
-
-## Session Keys
-
-Operators need to set their `Author` session keys. Run the following command to author session keys.
-**Note:** You may need to change `http://localhost:9933` to your correct address.
-
-```sh filename="Generate session key" copy
-curl -H "Content-Type: application/json" -d '{"id":1, "jsonrpc":"2.0", "method": "author_rotateKeys", "params":[]}' http://localhost:9933
-```
-
-Result will look like this, copy the key:
-
-```
-{"jsonrpc":"2.0","result":"0x400e3cef43bdessab331e4g03115c4bcecws3cxff608fa3b8sh6b07y369386570","id":1}
-```
-
-### Set session keys
-
-1. Go to the Polkadot.js portal: `Developer > Extrinsic`.
-2. Select your account and extrinsic type: session / setKeys.
-3. Enter the session keys and set proof to `0x00`.
-4. Submit the transaction.
-
-### Setting identity
-
-Operators need to set their identity.
-
-1. Go to the Polkadot.js portal: `Accounts`
-2. Open the 3 dots next to your address: `Set on-chain Identity`
-3. Enter all fields you want to set.
-4. Send the transaction.
-
-### Request judgment
-
-1. Go to the Polkadot.js portal: `Developer > Extrinsic`
-2. Select your account and extrinsic type: `identity / requestJudgment`
-3. Send the transaction.
-
-### Production blocks
-
-Once your is active, you will see your name inside Network tab every time you produce a block!
diff --git a/pages/docs/ecosystem-roles/validator/requirements.mdx b/pages/docs/ecosystem-roles/validator/requirements.mdx
deleted file mode 100644
index 0650ca60..00000000
--- a/pages/docs/ecosystem-roles/validator/requirements.mdx
+++ /dev/null
@@ -1,189 +0,0 @@
----
-title: Requirements
-description: An overview of Webb Tangle node requirements.
----
-
-import { Tabs, Tab } from "../../../../components/Tabs";
-
-# Requirements
-
-The current Tangle testnet is a standalone network, meaning that it is not connected to the Polkadot or Kusama relay chain.
-Since the Tangle is not a parachain, the size of nodes are quite a small build as it only contains code to run the standalone Tangle network and not syncing
-the relay chain or communicate between the two. As such, the build is smaller, and does not require the same minumum spec requirements as a parachain node.
-
-The following specifications are the ideal or recommended, but nodes can be run with less. Testnet nodes have also been run using AWS t3.Large instances.
-
-| Component | Requirements |
-| --------- | ------------------------------------------------------------------------------------------------------ |
-| CPU | Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz |
-| Storage | An NVMe solid state drive of 500 GB (As it should be reasonably sized to deal with blockchain growth). |
-| Memory | 32GB ECC |
-| Firewall | P2P port must be open to incoming traffic: - Source: Any - Destination: 30333, 30334 TCP |
-
-## Running Ports
-
-As stated before, the standalone nodes will listen on multiple ports. The default Substrate ports are used in the standalone,
-while the relay chain will listen on the next higher port.
-
-The only ports that need to be open for incoming traffic are those designated for P2P.
-
-**Default Ports for a Tangle Full-Node:**
-
-| Description | Port |
-| ----------- | ----------- |
-| P2P | 30333 (TCP) |
-| RPC | 9933 |
-| WS | 9944 |
-| Prometheus | 9615 |
-
-## Dependencies
-
-In order to build a Tangle node from source your machine must have specific dependecies installed. This guide
-outlines those requirements.
-
-This guide uses [https://rustup.rs](https://rustup.rs) installer and the `rustup` tool to manage the Rust toolchain. Rust is required to
-compile a Tangle node.
-
-First install and configure `rustup`:
-
-```sh filename="Install Rust" copy
-# Install
-curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
-
-# Configure
-source ~/.cargo/env
-```
-
-Configure the Rust toolchain to default to the latest stable version, add nightly and the nightly wasm target:
-
-```sh filename="Configure Rust" copy
-rustup default nightly
-rustup update
-rustup update nightly
-rustup target add wasm32-unknown-unknown --toolchain nightly
-```
-
-Great! Now your Rust environment is ready! 🚀🚀
-
-### Substrate Dependencies
-
-
-
-
- Debian version:
- ```sh filename=" Debian" copy
- sudo apt install --assume-yes git clang curl libssl-dev llvm libudev-dev make protobuf-compiler
- ```
- Arch version:
- ```sh filename="Arch" copy
- pacman -Syu --needed --noconfirm curl git clang make protobuf
- ```
- Fedora version:
- ```sh filename="Fedora" copy
- sudo dnf update
- sudo dnf install clang curl git openssl-devel make protobuf-compiler
- ```
- Opensuse version:
- ```sh filename="Opensuse" copy
- sudo zypper install clang curl git openssl-devel llvm-devel libudev-devel make protobuf
- ```
-
- Remember that different distributions might use different package managers and bundle packages in different ways.
- For example, depending on your installation selections, Ubuntu Desktop and Ubuntu Server might have different packages
- and different requirements. However, the packages listed in the command-line examples are applicable for many common Linux
- distributions, including Debian, Linux Mint, MX Linux, and Elementary OS.
-
-
-
-
- Assumes user has Homebrew already installed.
-
- ```sh filename="Brew" copy
- brew update
- brew install openssl gmp protobuf cmake
- ```
-
-
-
-
- For Windows users please refer to the official Substrate documentation:
- [Windows](https://docs.substrate.io/install/windows/)
-
-
-
-
-### Build from Source 💻
-
-Once the development environment is set up, you can build the Tangle node from source.
-
-```sh filename="Clone repo" copy
-git clone https://github.com/webb-tools/tangle.git
-```
-
-```sh filename="Build" copy
-cargo build --release
-```
-
-> NOTE: You _must_ use the release builds! The optimizations here are required
-> as in debug mode, it is expected that nodes are not able to run fast enough to produce blocks.
-
-You will now have the `tangle-standalone` binary built in `target/release/` dir
-
-#### Feature Flags
-
-Some features of tangle node are setup behind feature flags, to enable these features you will have to build the binary with these flags enabled
-
-1. **txpool**
-
-This feature flag is useful to help trace and debug evm transactions on the chain, you should build node with this flag if you intend to use the node for any evm transaction following
-
-```sh filename="Build txpool" copy
-cargo build --release --features txpool
-```
-
-2. **relayer**
-
-This feature flag is used to start the embedded tx relayer with tangle node, you should build node with this flag if you intend to run a node with a relayer which can be used for transaction relaying or data querying
-
-```sh filename="Build relayer" copy
-cargo build --release --features relayer
-```
-
-3. **light-client**
-
-This feature flag is used to start the embedded light client with tangle node, you should build node with this flag if you intend to run a node with a light client relayer to sync EVM data on Tangle
-
-```sh filename="Build light" copy
-cargo build --release --features light-client
-```
-
-### Use Precompiled binary 💻
-
-Every release of tangle node includes a Precompiled binary, its currently limited to amd-64 architecture but we plan to
-support more soon. You can view all releases [here](https://github.com/webb-tools/tangle/releases).
-
-In the below commands, substiture `LATEST_RELEASE` with the version you want to use, the current latest version is `0.4.6`
-
-### Get tangle binary
-
-```sh filename="Get binary" copy
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-linux-amd64
-```
-
-### Get tangle binary with txpool feature
-
-```sh filename="Get binary txpool" copy
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-txpool-linux-amd64
-```
-
-### Get tangle binary with relayer feature
-
-```sh filename="Get binary relayer" copy
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-relayer-linux-amd64
-```
-
-### Get tangle binary with light-client feature
-
-```sh filename="Get binary light" copy
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-light-client-linux-amd64
-```
diff --git a/pages/docs/ecosystem-roles/validator/systemd/_meta.json b/pages/docs/ecosystem-roles/validator/systemd/_meta.json
deleted file mode 100644
index a3cff2b9..00000000
--- a/pages/docs/ecosystem-roles/validator/systemd/_meta.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "full-node": "Full Node",
- "validator-node": "Validator Node"
-}
diff --git a/pages/docs/ecosystem-roles/validator/systemd/full-node.mdx b/pages/docs/ecosystem-roles/validator/systemd/full-node.mdx
deleted file mode 100644
index e6db89c0..00000000
--- a/pages/docs/ecosystem-roles/validator/systemd/full-node.mdx
+++ /dev/null
@@ -1,110 +0,0 @@
----
-title: Running with Systemd
-description: Run a Tangle full node using systemd.
----
-
-# Running with Systemd
-
-You can run your full node as a systemd process so that it will automatically restart on server reboots
-or crashes (and helps to avoid getting slashed!).
-
-Before following this guide you should have already set up your machines environment, installed the dependencies, and
-compiled the Tangle binary. If you have not done so, please refer to the [Hardware](/docs/tangle-network/node/hardware) and [Software](/docs/tangle-network/node/node-software) page.
-
-## System service setup
-
-Run the following commands to create the service configuration file:
-
-```sh filename="mv" copy
-# Move the tangle-standalone binary to the bin directory (assumes you are in repo root directory)
-sudo mv ./target/release/tangle-standalone /usr/bin/
-```
-
-Add the following contents to the service configuration file. Make sure to replace the **USERNAME** with the username you created in the previous step, add your own node name, and update
-any paths or ports to your own preference.
-
-**Note:** The below configuration assumes you are targeting the Tangle Network chainspec.
-
-**Full Node**
-
-```sh filename="full.service" copy
-sudo tee /etc/systemd/system/full.service > /dev/null << EOF
-[Unit]
-Description=Tangle Full Node
-After=network-online.target
-StartLimitIntervalSec=0
-
-[Service]
-User=
-Restart=always
-RestartSec=3
-ExecStart=/usr/bin/tangle-standalone \
- --base-path /data/full-node \
- --name \
- --chain tangle-testnet \
- --node-key-file "/home//node-key" \
- --rpc-cors all \
- --port 9946 \
- --no-mdns \
- --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-**Full Node with evm trace**
-
-**Note:** To run with evm trace, you should use a binary built with `txpool` flag, refer [requirements](../requirements.mdx) page for more details.
-
-```sh filename="full.service" copy
-sudo tee /etc/systemd/system/full.service > /dev/null << EOF
-[Unit]
-Description=Tangle Full Node
-After=network-online.target
-StartLimitIntervalSec=0
-
-[Service]
-User=
-Restart=always
-RestartSec=3
-ExecStart=/usr/bin/tangle-standalone \
- --base-path /data/full-node \
- --name \
- --chain tangle-testnet \
- --node-key-file "/home//node-key" \
- --rpc-cors all \
- --port 9946 \
- --no-mdns --ethapi trace,debug,txpool
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-### Enable the services
-
-Double check that the config has been written to `/etc/systemd/system/full.service` correctly.
-If so, enable the service so it runs on startup, and then try to start it now:
-
-```sh filename="enable service" copy
-sudo systemctl daemon-reload
-sudo systemctl enable full
-sudo systemctl start full
-```
-
-Check the status of the service:
-
-```sh filename="status" copy
-sudo systemctl status full
-```
-
-You should see the node connecting to the network and syncing the latest blocks.
-If you need to tail the latest output, you can use:
-
-```sh filename="logs" copy
-sudo journalctl -u full.service -f
-```
-
-Congratulations! You have officially setup a Tangle Network node using Systemd. If you are interested
-in learning how to setup monitoring for your node, please refer to the [monitoring](../monitoring/quickstart.mdx) page.
diff --git a/pages/docs/ecosystem-roles/validator/systemd/quick-node.mdx b/pages/docs/ecosystem-roles/validator/systemd/quick-node.mdx
deleted file mode 100644
index c8f26935..00000000
--- a/pages/docs/ecosystem-roles/validator/systemd/quick-node.mdx
+++ /dev/null
@@ -1,90 +0,0 @@
----
-title: Quickstart
-description: Run a Tangle Validator node using systemd.
----
-
-# Tangle Validator Quickstart
-
-**Caution:** The following guide is only meant as a quickstart for anyone looking to run a tangle node with minimal
-config, this guide uses automated keys and it is not recommended to run a validator using this setup long term, refer to [advanced](/docs/ecosystem-roles/validator/systemd/validator-node/) guide
-for a more secure long term setup.
-
-Before following this guide you should have already set up your machines environment, installed the dependencies, and
-compiled the Tangle binary. If you have not done so, please refer to the [Hardware](/docs/tangle-network/node/validator/hardware) page.
-
-## Standalone Testnet
-
-### 1. Fetch the tangle binary
-
-Use the latest release version in the url in place of ``, you can visit [releases](https://github.com/webb-tools/tangle/releases) page to view the latest info
-
-```
-wget https://github.com/webb-tools/tangle/releases/download//tangle-standalone-linux-amd64
-```
-
-For example, at the time of writing this document, the latest release is v0.4.7 and the link would be as follows
-
-```
-wget https://github.com/webb-tools/tangle/releases/download/v0.4.7/tangle-standalone-linux-amd64
-```
-
-### 2. Start the node binary
-
-To start the binary you can run the following command (ensure you are in the same folder where tangle-standalone is downloaded)
-
-Make sure to change the following params before executing the command
-
-1. `` : This is the path where your chain DB will live
-2. `` : This is a unique node name for your node, use a unique name here to help identity your node to other validators and telemetry data
-
-```
-./tangle-standalone-linux-amd64 \
- --base-path \
- --name \
- --chain tangle-testnet \
- --port 9944 \
- --validator \
- --auto-insert-keys \
- --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-```
-
-If the node is running correctly, you should see an output similar to below:
-
-```
-2023-03-22 14:55:51 Tangle Standalone Node
-2023-03-22 14:55:51 ✌️ version 0.1.15-54624e3-aarch64-macos
-2023-03-22 14:55:51 ❤️ by Webb Technologies Inc., 2017-2023
-2023-03-22 14:55:51 📋 Chain specification: Tangle Testnet
-2023-03-22 14:55:51 🏷 Node name: cooing-morning-2891
-2023-03-22 14:55:51 👤 Role: FULL
-2023-03-22 14:55:51 💾 Database: RocksDb at /Users/local/Library/Application Support/tangle-standalone/chains/local_testnet/db/full
-2023-03-22 14:55:51 ⛓ Native runtime: tangle-standalone-115 (tangle-standalone-1.tx1.au1)
-2023-03-22 14:55:51 Bn254 x5 w3 params
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 new validator set of size 5 has been processed for era 1
-2023-03-22 14:55:52 🔨 Initializing Genesis block/state (state: 0xfd16…aefd, header-hash: 0x7c05…a27d)
-2023-03-22 14:55:52 👴 Loading GRANDPA authority set from genesis on what appears to be first startup.
-2023-03-22 14:55:53 Using default protocol ID "sup" because none is configured in the chain specs
-2023-03-22 14:55:53 🏷 Local node identity is: 12D3KooWDaeXbqokqvEMqpJsKBvjt9BUz41uP9tzRkYuky1Wat7Z
-2023-03-22 14:55:53 💻 Operating system: macos
-2023-03-22 14:55:53 💻 CPU architecture: aarch64
-2023-03-22 14:55:53 📦 Highest known block at #0
-2023-03-22 14:55:53 〽️ Prometheus exporter started at 127.0.0.1:9615
-2023-03-22 14:55:53 Running JSON-RPC HTTP server: addr=127.0.0.1:9933, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 Running JSON-RPC WS server: addr=127.0.0.1:9944, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.0.125/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.0.125/tcp/30305
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.88.12/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.88.12/tcp/30305
-```
-
-**Note** : Since the `--auto-insert-keys` flag was used the logs will print out the keys automatically generated for you,
-make sure to note down and keep this safely, in case you need to migrate or restart your node, these keys are essential.
-
-Congratulations! You have officially setup an Tangle Network node. The quickstart is only meant as a quickstart for anyone looking to run a tangle node with minimal
-config, this guide uses automated keys and it is not recommended to run a validator using this setup long term, refer to [advanced](/docs/ecosystem-roles/validator/systemd/validator-node/) guide
-for a more secure long term setup.. If you are interested
-in learning how to setup monitoring for your node, please refer to the [monitoring](../monitoring/quickstart.mdx) page.
diff --git a/pages/docs/ecosystem-roles/validator/systemd/validator-node.mdx b/pages/docs/ecosystem-roles/validator/systemd/validator-node.mdx
deleted file mode 100644
index 78d572be..00000000
--- a/pages/docs/ecosystem-roles/validator/systemd/validator-node.mdx
+++ /dev/null
@@ -1,216 +0,0 @@
----
-title: Running with Systemd
-description: Run a Tangle Validator node using systemd.
----
-
-# Running with Systemd
-
-You can run your validator node as a Systemd process so that it will automatically restart on server reboots
-or crashes (and helps to avoid getting slashed!).
-
-Before following this guide you should have already set up your machines environment, installed the dependencies, and
-compiled the Tangle binary. If you have not done so, please refer to the [Hardware](/docs/tangle-network/node/validator/requirements) page.
-
-## Standalone Testnet
-
-### Generate and store keys
-
-We need to generate the required keys for our node. For more information on these keys, please see the [Required Keys](/docs/ecosystem-roles/validator/required-keys/) section.
-The keys we need to generate include the following:
-
-- DKG key (Ecdsa)
-- Aura key (Sr25519)
-- Account key (Sr25519)
-- Grandpa key (Ed25519)
-- ImOnline key (Sr25519)
-
-Let's now insert our required secret keys, we will not pass the SURI in the command, instead it will be interactive, where you
-should paste your SURI when the command asks for it.
-
-**Account Keys**
-
-```sh filename="Acco" copy
-# it will ask for your suri, enter it.
-./target/release/tangle-standalone key insert --base-path /data/validator/ \
---chain ./chainspecs/tangle-standalone.json \
---scheme Sr25519 \
---suri <"12-MNEMONIC-PHARSE"> \
---key-type acco
-```
-
-**Aura Keys**
-
-```sh filename="Aura" copy
-# it will ask for your suri, enter it.
-./target/release/tangle-standalone key insert --base-path /data/validator/ \
---chain ./chainspecs/tangle-standalone.json \
---scheme Sr25519 \
---suri <"12-MNEMONIC-PHARSE"> \
---key-type aura
-```
-
-**Im-online Keys** - **these keys are optional**
-
-```sh filename="Imonline" copy
-# it will ask for your suri, enter it.
-./target/release/tangle-standalone key insert --base-path /data/validator/ \
---chain ./chainspecs/tangle-standalone.json \
---scheme Sr25519 \
---suri <"12-MNEMONIC-PHARSE"> \
---key-type imon
-```
-
-**DKG Keys**
-
-```sh filename="DKG" copy
-# it will ask for your suri, enter it.
-./target/release/tangle-standalone key insert --base-path /data/validator/ \
---chain ./chainspecs/tangle-standalone.json \
---scheme Ecdsa \
---suri <"12-MNEMONIC-PHARSE"> \
---key-type wdkg
-```
-
-**Grandpa Keys**
-
-```sh filename="Grandpa" copy
-# it will ask for your suri, enter it.
-./target/release/tangle-standalone key insert --base-path /data/validator/ \
---chain ./chainspecs/tangle-standalone.json \
---scheme Ed25519 \
---suri <"12-MNEMONIC-PHARSE"> \
---key-type gran
-```
-
-To ensure you have successfully generated the keys correctly run:
-
-```sh filename="ls" copy
-ls ~/data/validator//keystore
-# You should see a some file(s) there, these are the keys.
-```
-
-## System service setup
-
-Run the following commands to create the service configuration file:
-
-```sh filename="mv" copy
-# Move the tangle-standalone binary to the bin directory (assumes you are in repo root directory)
-sudo mv ./target/release/tangle-standalone /usr/bin/
-```
-
-Add the following contents to the service configuration file. Make sure to replace the **USERNAME** with the username you created in the previous step, add your own node name, and update any paths or ports to your own preference.
-
-**Note:** The below configuration assumes you are targeting the Tangle Network chainspec.
-
-**Caution:** Ensure you insert the keys using the instructions at [generate keys](#generate-and-store-keys),
-if you want the node to auto generate the keys, add the `--auto-insert-keys` flag.
-
-**Validator Node**
-
-```sh filename="validator.service" copy
-sudo tee /etc/systemd/system/validator.service > /dev/null << EOF
-[Unit]
-Description=Tangle Validator Node
-After=network-online.target
-StartLimitIntervalSec=0
-
-[Service]
-User=
-Restart=always
-RestartSec=3
-ExecStart=/usr/bin/tangle-standalone \
- --base-path /data/validator/ \
- --name \
- --chain tangle-testnet \
- --node-key-file "/home//node-key" \
- --port 30333 \
- --validator \
- --no-mdns \
- --telemetry-url "wss://telemetry.polkadot.io/submit/ 0" --name
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-### Enable the services
-
-Double check that the config has been written to `/etc/systemd/system/validator.service` correctly.
-If so, enable the service so it runs on startup, and then try to start it now:
-
-```sh filename="enable service" copy
-sudo systemctl daemon-reload
-sudo systemctl enable validator
-sudo systemctl start validator
-```
-
-Check the status of the service:
-
-```sh filename="status" copy
-sudo systemctl status validator
-```
-
-You should see the node connecting to the network and syncing the latest blocks.
-If you need to tail the latest output, you can use:
-
-```sh filename="logs" copy
-sudo journalctl -u validator.service -f
-```
-
-If the node is running correctly, you should see an output similar to below:
-
-```sh filename="output"
-2023-03-22 14:55:51 Tangle Standalone Node
-2023-03-22 14:55:51 ✌️ version 0.1.15-54624e3-aarch64-macos
-2023-03-22 14:55:51 ❤️ by Webb Technologies Inc., 2017-2023
-2023-03-22 14:55:51 📋 Chain specification: Tangle Testnet
-2023-03-22 14:55:51 🏷 Node name: cooing-morning-2891
-2023-03-22 14:55:51 👤 Role: FULL
-2023-03-22 14:55:51 💾 Database: RocksDb at /Users/local/Library/Application Support/tangle-standalone/chains/local_testnet/db/full
-2023-03-22 14:55:51 ⛓ Native runtime: tangle-standalone-115 (tangle-standalone-1.tx1.au1)
-2023-03-22 14:55:51 Bn254 x5 w3 params
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 generated 5 npos voters, 5 from validators and 0 nominators
-2023-03-22 14:55:51 [0] 💸 generated 5 npos targets
-2023-03-22 14:55:51 [0] 💸 new validator set of size 5 has been processed for era 1
-2023-03-22 14:55:52 🔨 Initializing Genesis block/state (state: 0xfd16…aefd, header-hash: 0x7c05…a27d)
-2023-03-22 14:55:52 👴 Loading GRANDPA authority set from genesis on what appears to be first startup.
-2023-03-22 14:55:53 Using default protocol ID "sup" because none is configured in the chain specs
-2023-03-22 14:55:53 🏷 Local node identity is: 12D3KooWDaeXbqokqvEMqpJsKBvjt9BUz41uP9tzRkYuky1Wat7Z
-2023-03-22 14:55:53 💻 Operating system: macos
-2023-03-22 14:55:53 💻 CPU architecture: aarch64
-2023-03-22 14:55:53 📦 Highest known block at #0
-2023-03-22 14:55:53 〽️ Prometheus exporter started at 127.0.0.1:9615
-2023-03-22 14:55:53 Running JSON-RPC HTTP server: addr=127.0.0.1:9933, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 Running JSON-RPC WS server: addr=127.0.0.1:9944, allowed origins=["http://localhost:*", "http://127.0.0.1:*", "https://localhost:*", "https://127.0.0.1:*", "https://polkadot.js.org"]
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.0.125/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.0.125/tcp/30305
-2023-03-22 14:55:53 discovered: 12D3KooWMr4L3Dun4BUyp23HZtLfxoQjR56dDp9eH42Va5X6Hfgi /ip4/192.168.88.12/tcp/30304
-2023-03-22 14:55:53 discovered: 12D3KooWNHhcCUsZTdTkADmDJbSK9YjbtscHHA8R4jvrbGwjPVez /ip4/192.168.88.12/tcp/30305
-```
-
-### Network sync
-
-After a validator node is started, it will start syncing with the current chain state. Depending on the size of the chain when you do this, this step may take anywhere from a few minutes to a few hours.
-
-Example of node sync :
-
-```sh filename="output after synced" copy
-2021-06-17 03:07:39 🔍 Discovered new external address for our node: /ip4/10.26.16.1/tcp/30333/ws/p2p/12D3KooWLtXFWf1oGrnxMGmPKPW54xWCHAXHbFh4Eap6KXmxoi9u
-2021-06-17 03:07:40 ⚙️ Syncing 218.8 bps, target=#5553764 (17 peers), best: #24034 (0x08af…dcf5), finalized #23552 (0xd4f0…2642), ⬇ 173.5kiB/s ⬆ 12.7kiB/s
-2021-06-17 03:07:45 ⚙️ Syncing 214.8 bps, target=#5553765 (20 peers), best: #25108 (0xb272…e800), finalized #25088 (0x94e6…8a9f), ⬇ 134.3kiB/s ⬆ 7.4kiB/s
-2021-06-17 03:07:50 ⚙️ Syncing 214.8 bps, target=#5553766 (21 peers), best: #26182 (0xe7a5…01a2), finalized #26112 (0xcc29…b1a9), ⬇ 5.0kiB/s ⬆ 1.1kiB/s
-2021-06-17 03:07:55 ⚙️ Syncing 138.4 bps, target=#5553767 (21 peers), best: #26874 (0xcf4b…6553), finalized #26624 (0x9dd9…27f8), ⬇ 18.9kiB/s ⬆ 2.0kiB/s
-2021-06-17 03:08:00 ⚙️ Syncing 37.0 bps, target=#5553768 (22 peers), best: #27059 (0x5b73…6fc9), finalized #26624 (0x9dd9…27f8), ⬇ 14.3kiB/s ⬆ 4.4kiB/s
-```
-
-### Bond TNT and setup validator Account
-
-After your node is synced, you are ready to setup keys and onboard as a validator, make sure to complete the steps
-at [required keys](../required-keys.mdx) to start validating.
-
----
-
-Congratulations! You have officially setup an Tangle Network node using Systemd. If you are interested
-in learning how to setup monitoring for your node, please refer to the [monitoring](../monitoring/quickstart.mdx) page.
diff --git a/pages/docs/ecosystem-roles/validator/troubleshooting.mdx b/pages/docs/ecosystem-roles/validator/troubleshooting.mdx
deleted file mode 100644
index 5ebbeeac..00000000
--- a/pages/docs/ecosystem-roles/validator/troubleshooting.mdx
+++ /dev/null
@@ -1,108 +0,0 @@
----
-title: Troubleshooting
-description: Provides a series of suggestive fixes that are common issues when starting a Tangle node.
----
-
-# Logs
-
-If you would like to run the node with verbose logs you may add the following arguments during initial setup. You may change the target to include `debug | error | info| trace | warn`.
-
-```
--ldkg=debug \
--ldkg_metadata=debug \
--lruntime::offchain=debug \
--ldkg_proposal_handler=debug \
--ldkg_proposals=debug
-```
-
-# Troubleshooting
-
-## P2P Ports Not Open
-
-If you don't see an Imported message (without the [Relaychain] tag), you need to check the P2P port configuration. P2P port must be open to incoming traffic.
-
-## In Sync
-
-Both chains must be in sync at all times, and you should see either Imported or Idle messages and have connected peers.
-
-## Genesis Mismatching
-
-If you notice similar log messages as below:
-
-```
-DATE [Relaychain] Bootnode with peer id `ID` is on a different
-chain (our genesis: 0x3f5... theirs: 0x45j...)
-```
-
-This typically means that you are running an older version and will need to upgrade.
-
-## Troubleshooting for Apple Silicon users
-
-Install Homebrew if you have not already. You can check if you have it installed with the following command:
-
-```sh filename="brew" copy
-brew help
-```
-
-If you do not have it installed open the Terminal application and execute the following commands:
-
-```sh filename="install brew" copy
-# Install Homebrew if necessary https://brew.sh/
-/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
-
-# Make sure Homebrew is up-to-date, install openssl
-brew update
-brew install openssl
-```
-
-❗ **Note:** Native ARM Homebrew installations are only going to be supported at `/opt/homebrew`. After Homebrew installs, make sure to add `/opt/homebrew/bin` to your PATH.
-
-```sh filename="add PATH" copy
-echo 'export PATH=/opt/homebrew/bin:$PATH' >> ~/.bash_profile
-```
-
-An example `bash_profile` for reference may look like the following:
-
-```sh filename="export PATH" copy
-export PATH=/opt/homebrew/bin:$PATH
-export PATH=/opt/homebrew/opt/llvm/bin:$PATH
-export CC=/opt/homebrew/opt/llvm/bin/clang
-export AR=/opt/homebrew/opt/llvm/bin/llvm-ar
-export LDFLAGS=-L/opt/homebrew/opt/llvm/lib
-export CPPFLAGS=-I/opt/homebrew/opt/llvm/include
-export RUSTFLAGS='-L /opt/homebrew/lib'
-```
-
-In order to build **dkg-substrate** in `--release` mode using `aarch64-apple-darwin` Rust toolchain you need to set the following environment variables:
-
-```sh filename="export" copy
-echo 'export RUSTFLAGS="-L /opt/homebrew/lib"' >> ~/.bash_profile
-```
-
-Ensure `gmp` dependency is installed correctly.
-
-```sh filename="install gmp" copy
-brew install gmp
-```
-
-If you are still receiving an issue with `gmp`, you may need to adjust your path to the `gmp` lib. Below is a suggestive fix, but paths are machine / environment specific.
-
-Run:
-
-```sh filename="clean" copy
-cargo clean
-```
-
-Then:
-
-```sh filename="export" copy
-export LIBRARY_PATH=$LIBRARY_PATH:$(brew --prefix)/lib:$(brew --prefix)/opt/gmp/lib
-```
-
-This should be added to your bash_profile as well.
-
-Ensure `protobuf` dependency is installed correctly.
-
-```sh filename="install protobuf" copy
-brew install protobuf
-```
diff --git a/pages/docs/ecosystem-roles/validator/validation.mdx b/pages/docs/ecosystem-roles/validator/validation.mdx
deleted file mode 100644
index 992f4571..00000000
--- a/pages/docs/ecosystem-roles/validator/validation.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
-# Validation
-
-In a blockchain context, validating usually refers to the process performed by nodes (often called validators) in the network to ensure that transactions and blocks meet the necessary rules and protocols of the network. This can involve verifying that transactions are correctly signed, that they don't double-spend coins, and that newly created blocks are formatted correctly and include valid transactions. By validating data, transactions, or blocks, we ensure that the systems or networks in question operate as intended and maintain their integrity. In Proof-of-Stake systems, Validators are often incentivized and rewarded through portions of new tokens generated by inflation or otherwise.
-
-## Stepping into Responsibility
-
-Embarking on the journey to becoming a blockchain validator comes with considerable responsibility. As a validator, you are entrusted not only with your own stake but also the stake of those who nominate you. Any errors or breaches can lead to penalties known as slashing, impacting both your token balance and your standing within the network. However, being a validator can be immensely rewarding, offering you the opportunity to actively contribute to the security of a decentralized network and grow your digital assets.
-
-## Proceed with Caution
-
-We strongly advise that you possess substantial system administration experience before choosing to run your own validator. The role goes beyond merely running a blockchain binary; it requires the ability to address and resolve technical issues and anomalies independently. Running a validator is as much about knowledge as it is about problem-solving skills.
-
-## Security: Your Priority
-
-Security is paramount when running a successful validator. You should thoroughly familiarize yourself with the secure validator guidelines to understand the considerations when setting up your infrastructure. As you grow and evolve as a validator, these guidelines can serve as a foundation upon which you build your modifications and customizations.
-
-## Your Support Network
-
-Remember, you are not alone in this journey. We encourage you to connect with the [Webb community](https://webb.tools/community). These communities are teeming with experienced team members and fellow validators who are more than willing to answer questions, provide insights, and share valuable experiences. Additionally, you will want to make community members aware of your validator services, so they can nominate their stake to you.
-
-Embarking on the validator journey is both challenging and rewarding. With careful preparation, a strong understanding of the associated responsibilities and risks, and the support of the community, you can make significant contributions to the Webb ecosystem.
diff --git a/pages/docs/tangle-network/governance/_meta.json b/pages/docs/governance/_meta.json
similarity index 87%
rename from pages/docs/tangle-network/governance/_meta.json
rename to pages/docs/governance/_meta.json
index 37887bcf..1ac29b09 100644
--- a/pages/docs/tangle-network/governance/_meta.json
+++ b/pages/docs/governance/_meta.json
@@ -4,6 +4,5 @@
"governance-interfaces": "Governance Interfaces",
"governance-parameters": "On-chain Governance Parameters",
"proposal-creation": "Create a Proposal",
- "how-to-vote-on-tangle": "How to Vote",
"governance-procedures": "Other Procedures"
}
diff --git a/pages/docs/tangle-network/governance/democracy-voting.mdx b/pages/docs/governance/democracy-voting.mdx
similarity index 100%
rename from pages/docs/tangle-network/governance/democracy-voting.mdx
rename to pages/docs/governance/democracy-voting.mdx
diff --git a/pages/docs/tangle-network/governance/governance-interfaces.mdx b/pages/docs/governance/governance-interfaces.mdx
similarity index 100%
rename from pages/docs/tangle-network/governance/governance-interfaces.mdx
rename to pages/docs/governance/governance-interfaces.mdx
diff --git a/pages/docs/tangle-network/governance/governance-parameters.mdx b/pages/docs/governance/governance-parameters.mdx
similarity index 100%
rename from pages/docs/tangle-network/governance/governance-parameters.mdx
rename to pages/docs/governance/governance-parameters.mdx
diff --git a/pages/docs/tangle-network/governance/governance-procedures.mdx b/pages/docs/governance/governance-procedures.mdx
similarity index 100%
rename from pages/docs/tangle-network/governance/governance-procedures.mdx
rename to pages/docs/governance/governance-procedures.mdx
diff --git a/pages/docs/tangle-network/governance/overview.mdx b/pages/docs/governance/overview.mdx
similarity index 100%
rename from pages/docs/tangle-network/governance/overview.mdx
rename to pages/docs/governance/overview.mdx
diff --git a/pages/docs/tangle-network/governance/proposal-creation.mdx b/pages/docs/governance/proposal-creation.mdx
similarity index 100%
rename from pages/docs/tangle-network/governance/proposal-creation.mdx
rename to pages/docs/governance/proposal-creation.mdx
diff --git a/pages/docs/index.mdx b/pages/docs/index.mdx
index cecb7f41..077c836b 100644
--- a/pages/docs/index.mdx
+++ b/pages/docs/index.mdx
@@ -1,33 +1,146 @@
---
-title: Webb Quickstart
-description: Explore the Webb ecosystem with a few easy steps!
+title: Overview, Use Cases and Actions
+description: Explore the Tangle Network ecosystem at a glance including use cases including interoperable shielded pools, identity-based systems, decentralized private finance, and secure multi-party computation, as well as features, and roadmap.
---
import { QuickStartArea, DappsArea, DeployArea } from "../../components/QuickStart";
import { RepoArea } from "../../components/RepoArea";
import FullWebbCTA from "../../components/FullWebbCTA";
-import Callout from "../../components/Callout";
+import { UseCasesArea, ParticipateArea, TestNetworkArea } from "/./components/UseCasesTangle";
-# Webb Quickstart
+# Tangle Network Overview
-Webb is an ecosystem of **privacy protocols and products for a multi-chain world**.
+## Introduction
-In this site, you can find information about the protocols we are actively building, the open-source repos we maintain, and
-other privacy related research and ideas we aim to integrate into our community. Interested in joining our community? Visit
-the below links to join!
+The Tangle Network is a specialized platform engineered to support and govern cross-chain Zero-Knowledge (ZK) applications. Leveraging the robust Substrate blockchain framework, the Tangle Network forms the core infrastructure to facilitate a new generation of privacy-enhancing and governance-decentralized applications.
-
+The Tangle Network's unique offering centers around the intersection of cross-chain functionality, familiar EVM tooling and compatibility, advanced governance systems, and the world of ZK applications. Our network seamlessly merges these distinct elements, creating an ecosystem that amplifies the strengths of each component.
+
+## Key Features
+
+**Cross-Chain Functionality and EVM Compatibility**
+
+The Tangle Network breaks traditional chain boundaries, allowing seamless interaction, data exchange, and operation of ZK applications across different blockchain networks. Leveraging the power of the Ethereum Virtual Machine (EVM) on Substrate, developers can utilize familiar EVM tooling and compatibility to build decentralized applications (DApps), create Non-Fungible Tokens (NFTs), and utilize ERC20 tokens across multiple networks.
+
+**Advanced Governance**
+
+The Tangle Network implements an innovative governance model based on Distributed Key Generation (DKG) protocol. The DKG protocol serves as a security instrument for the Tangle Network's cross-chain Anchor zkApps, ensuring the validity of bridge updates with crypto-economically secured threshold-signatures.
+
+DKG is a cryptographic method where multiple entities collaboratively produce a shared public and private key. In the Tangle Network, DKG fortifies the governance of distributed applications, particularly the Anchor System. It bolsters the security of the network by ensuring the integrity of signed messages via threshold signatures. This not only provides resistance against potential threats but also amplifies the Tangle Network's credibility.
+
+**Privacy-Enhancing ZK Applications**
+
+Privacy is a paramount concern in the Tangle Network. By providing an infrastructure for Zero-Knowledge (ZK) applications, we enable users to experience a new generation of privacy-enhancing functionalities. ZK applications empower users to transact, communicate, and interact privately while maintaining the security and immutability of blockchain technology.
+
+**Built on Substrate**
+
+The Tangle Network is built on Substrate, an advanced blockchain framework renowned for its flexibility, scalability, and cutting-edge features. This strategic choice ensures that our platform remains at the forefront of speed, security, and scalability, serving as a reliable backbone for the Tangle Network. Leveraging Substrate's modular architecture, we enable seamless interaction and interoperability with other blockchain networks.
+
+---
+
+## Use Cases
+
+The Tangle Network and Webb Protocol have been designed to serve a variety of use cases. A majority of these applications revolve around enhancing privacy and improving cross-chain interoperability. Here are some key proposed applications for the Tangle Network and Webb Protocol:
+
+**Interoperable Shielded Pools**
+
+The most immediate application is the creation of private bridges for assets, or "interoperable shielded pools." The decentralized, updatable Tangle Network is ideal for maintaining the state of a set of bridge anchors, making it possible to design data to be inserted into these anchors around that of an asset system. Unspent transaction outputs (UTXOs) are inserted into anchors, and users can spend these UTXOs privately across the chains the bridge exists over.
+
+**Interoperable Membership Groups**
+
+The Tangle Network and Webb Protocol can support interoperable membership groups. These are communities that exist across chains and leverage privacy. A natural implementation might look like an interoperable Semaphore system, where anyone in the Semaphore membership group can relay a vote or response to a poll from any chain privately, and potentially without even needing a wallet on that chain.
+
+**Interoperable Badge System**
+
+An identity-based application, an interoperable badge system, could use expressive data blobs for arbitrary proofs of ownership, participation, and identity. Using Webb's technology stack, these badges can be proven to exist from anywhere, enabling new types of composable application development due to the zero-knowledge and private nature of data disclosure.
+
+**Variable Asset Anchor System**
+
+This system allows for interoperable shielded pools, wherein users can transfer arbitrary amounts of assets privately between blockchains. The Variable Asset Anchor System uses zero-knowledge proofs and is similar to a shielded UTXO system, but with cross-chain capabilities.
+
+**Semaphore Anchor System**
+
+Semaphore is a popular zero-knowledge protocol that enables members of an on-chain community to create anonymous signals using zero-knowledge proofs of membership in the community’s identity set. This concept can be extended to a cross-chain identity set, allowing any member of a set of non-fungible token (NFT) communities to register.
+
+**Identity-Based Variable Asset Anchor System**
+
+By combining the Semaphore Identity protocol and the Variable Asset protocol, a cross-chain shielded pool application over a restricted identity set can be designed. This creates a private transaction system where only users with proofs of membership in a cross-chain identity system can transact. This opens up possibilities for even more diverse use cases in the realm of zero-knowledge applications.
+
+In addition, with the integration of threshold Elliptic Curve Digital Signature Algorithm (ECDSA), the Tangle Network can sign arbitrary Bitcoin transactions, Ethereum transactions, and more. It can also be extended to include a Bitcoin bridge into the system with a proper custody rotation, or to sign messages for other smart contracts across chains for governance purposes.
+
+**Decentralized Private Finance (Private Defi)**
-## dApps
+Decentralized Finance, or DeFi, has experienced significant growth over the past few years, facilitating peer-to-peer financial transactions without intermediaries. However, DeFi transactions on public blockchains are often not private, posing privacy concerns for users. Leveraging Tangle Network's and Webb Protocol's privacy-preserving capabilities, we can establish Decentralized Private DefFi applications.
-We are actively building amazing products for our community. Check out the below dApps to start. Have feedback to share about a Webb
-product? We want to hear from you, share your thoughts [here](https://github.com/webb-tools/feedback/discussions/categories/webb-dapp-feedback).
+These allow users to make transactions while keeping their financial activities private. They can engage in yield farming, liquidity provision, lending, borrowing, and other DeFi operations while remaining anonymous. The added privacy benefits could attract users that prefer to keep their financial activities private due to security concerns, thus expanding the overall user base of DeFi.
-
- Please keep in mind that these dApps are in active development, and may not be fully functional. If you find a bug, please report it [here](https://github.com/webb-tools/webb-dapp/issues/new?assignees=&labels=&template=bug_report.md&title=).
-
+**Secure Multi-Party Computation (SMPC)**
-
+Secure Multi-Party Computation (SMPC) is an area of cryptography concerned with enabling multiple parties to jointly compute a function over their inputs while keeping those inputs private. With its cryptographic properties, Tangle Network can be extended to facilitate SMPC.
+
+For example, consider a consortium of companies willing to collaboratively train a machine learning model using their data without exposing sensitive information to each other. By leveraging SMPC on the Tangle Network, these companies can jointly compute the machine learning model without revealing their individual datasets.
+
+This not only preserves privacy but also fosters collaboration between different entities that would otherwise be reluctant to share sensitive data.
+
+**Cross-chain Atomic Swaps**
+
+In the current state of the blockchain ecosystem, transferring assets between different blockchains (cross-chain) often involves centralized exchanges or trusted intermediaries. With the Tangle Network and Webb Protocol, we can enable cross-chain atomic swaps with enhanced privacy.
+
+An atomic swap is a smart contract technology that enables the exchange of one cryptocurrency for another without using centralized intermediaries. Users will be able to privately and securely exchange tokens between different blockchains directly. For instance, a user can exchange Bitcoin for Ethereum directly from their wallets without an exchange, ensuring privacy and reducing the reliance on intermediaries.
+
+**Private and Secure Messaging Systems**
+
+In today's digital age, privacy and security in communication are paramount. With the Tangle Network's zero-knowledge proofs and privacy-oriented architecture, we can develop a private and secure messaging system.
+
+In this system, all communications would be encrypted and can be securely stored across multiple blockchains. This would ensure that only the intended recipients can read the messages. Additionally, the decentralized nature of the system would make it resistant to censorship and control by any single entity.
+
+**Privacy-Preserving Data Marketplace**
+
+Data is often referred to as the "new oil." However, data transactions can be challenging due to privacy and trust concerns. By leveraging the Tangle Network, we can establish a privacy-preserving data marketplace.
+
+In this marketplace, data sellers can list their datasets without revealing the actual data. Using zero-knowledge proofs, they can provide evidence of the data's authenticity and other characteristics. Buyers, on the other hand, can verify these proofs and make purchases without exposing their identities. The entire transaction can be managed on-chain, ensuring fairness and transparency while preserving privacy.
+
+**Decentralized Identity Systems (DID)**
+
+Identity is fundamental in both the physical and digital worlds. However, traditional identity systems are often centralized and vulnerable to attacks. The Tangle Network can support Decentralized Identity Systems (DID), offering privacy, control, and cross-chain compatibility.
+
+In a DID system on the Tangle Network, each user can generate a self-sovereign identity that could be used across different blockchains. With zero-knowledge proofs, users can prove certain attributes of their identity without revealing any unnecessary personal information. This would not only enhance privacy but also give users full control over their identities, avoiding reliance on any single authority.
+
+These use cases showcase the versatility and potential of the Tangle Network and Webb Protocol in various sectors, underscoring its ability to drive forward both privacy and interoperability in the blockchain space.
+
+## Roadmap
+
+The following is subject to change as DAO governance supports different initiatives.
+
+**Phase 1**
+
+- Test Runtime Upgrade
+- Finalize Tangle Token Distribution
+- Launch Incentivized testnet
+
+**Phase 2**
+
+- Update Tangle Genesis for Launch
+- Distribute TNT Tokens
+- Launch Democracy Governance
+- Launch OFAC VAnchor Bridges
+
+**Phase 3**
+
+- Launch Cross-chain Transfers
+- Validator Staking Upgrades
+- Launch Semaphore VAnchor bridges
+
+**Phase 4**
+
+- Remove Sudo
+- Improve Relayer & Proposer Security
+
+## Participate
+
+
+
+
## Repositories
diff --git a/pages/docs/learn/_meta.json b/pages/docs/learn/_meta.json
new file mode 100644
index 00000000..05039e0c
--- /dev/null
+++ b/pages/docs/learn/_meta.json
@@ -0,0 +1,7 @@
+{
+ "webb-protocol": "Webb Protocol on Tangle",
+ "incentives": "Incentives, Staking and Slashing",
+ "understanding-dkg-tangle": "Intro to Distributed Key Generation (DKG)",
+ "distributed-key-gen": "DKG In-Depth",
+ "tss-governance": "TSS In-Depth"
+}
diff --git a/pages/docs/concepts/distributed-key-gen.mdx b/pages/docs/learn/distributed-key-gen.mdx
similarity index 98%
rename from pages/docs/concepts/distributed-key-gen.mdx
rename to pages/docs/learn/distributed-key-gen.mdx
index 6a86443b..ce4548e5 100644
--- a/pages/docs/concepts/distributed-key-gen.mdx
+++ b/pages/docs/learn/distributed-key-gen.mdx
@@ -5,7 +5,6 @@ description: Overview of a distributed key generation protocol for governing Web
import DKGKeygenImages from '../../../components/images/DKGKeygenProtocol'
import DKGSigningImages from '../../../components/images/DKGSigningProtocol'
-import { DKGImpl } from "../../../components/RepoArea";
import Callout from "../../../components/Callout";
# Distributed Key Generation Protocol
@@ -71,5 +70,3 @@ relayer? We want to hear from you, share your thoughts [here](https://github.com
Please keep in mind that this repo is in active development, and may not be fully functional. If you find a bug, please report it [here](https://github.com/webb-tools/dkg-substrate/issues/new/choose).
-
-
diff --git a/pages/docs/tangle-network/learn/incentives.mdx b/pages/docs/learn/incentives.mdx
similarity index 100%
rename from pages/docs/tangle-network/learn/incentives.mdx
rename to pages/docs/learn/incentives.mdx
diff --git a/pages/docs/concepts/tss-governance.mdx b/pages/docs/learn/tss-governance.mdx
similarity index 100%
rename from pages/docs/concepts/tss-governance.mdx
rename to pages/docs/learn/tss-governance.mdx
diff --git a/pages/docs/tangle-network/learn/understanding-dkg-tangle.mdx b/pages/docs/learn/understanding-dkg-tangle.mdx
similarity index 100%
rename from pages/docs/tangle-network/learn/understanding-dkg-tangle.mdx
rename to pages/docs/learn/understanding-dkg-tangle.mdx
diff --git a/pages/docs/tangle-network/learn/webb-protocol.mdx b/pages/docs/learn/webb-protocol.mdx
similarity index 100%
rename from pages/docs/tangle-network/learn/webb-protocol.mdx
rename to pages/docs/learn/webb-protocol.mdx
diff --git a/pages/docs/tangle-network/node/_meta.json b/pages/docs/node/_meta.json
similarity index 100%
rename from pages/docs/tangle-network/node/_meta.json
rename to pages/docs/node/_meta.json
diff --git a/pages/docs/tangle-network/node/docker-node.mdx b/pages/docs/node/docker-node.mdx
similarity index 99%
rename from pages/docs/tangle-network/node/docker-node.mdx
rename to pages/docs/node/docker-node.mdx
index 52744cd4..3fdd9a5b 100644
--- a/pages/docs/tangle-network/node/docker-node.mdx
+++ b/pages/docs/node/docker-node.mdx
@@ -3,7 +3,7 @@ title: Deploying with Docker
description: Deploy a Tangle node with only a few steps using Docker.
---
-import Callout from "../../../../components/Callout";
+import Callout from "../../../components/Callout";
import { Tabs } from 'nextra/components';
# Deploying a Tangle Network Node with Docker
diff --git a/pages/docs/tangle-network/node/flags.mdx b/pages/docs/node/flags.mdx
similarity index 100%
rename from pages/docs/tangle-network/node/flags.mdx
rename to pages/docs/node/flags.mdx
diff --git a/pages/docs/tangle-network/node/hardware.mdx b/pages/docs/node/hardware.mdx
similarity index 100%
rename from pages/docs/tangle-network/node/hardware.mdx
rename to pages/docs/node/hardware.mdx
diff --git a/pages/docs/ecosystem-roles/validator/monitoring/_meta.json b/pages/docs/node/monitoring/_meta.json
similarity index 100%
rename from pages/docs/ecosystem-roles/validator/monitoring/_meta.json
rename to pages/docs/node/monitoring/_meta.json
diff --git a/pages/docs/ecosystem-roles/validator/monitoring/alert-manager.mdx b/pages/docs/node/monitoring/alert-manager.mdx
similarity index 98%
rename from pages/docs/ecosystem-roles/validator/monitoring/alert-manager.mdx
rename to pages/docs/node/monitoring/alert-manager.mdx
index a6b4664a..26f849ae 100644
--- a/pages/docs/ecosystem-roles/validator/monitoring/alert-manager.mdx
+++ b/pages/docs/node/monitoring/alert-manager.mdx
@@ -3,8 +3,8 @@ title: Alert Manager Setup
description: Create alerts to notify the team when issues arise.
---
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
+import { Tabs, Tab } from "../../../../components/Tabs";
+import Callout from "../../../../components/Callout";
# Alert Manager Setup
diff --git a/pages/docs/ecosystem-roles/validator/monitoring/grafana.mdx b/pages/docs/node/monitoring/grafana.mdx
similarity index 98%
rename from pages/docs/ecosystem-roles/validator/monitoring/grafana.mdx
rename to pages/docs/node/monitoring/grafana.mdx
index 916cb9ac..8d69c3e0 100644
--- a/pages/docs/ecosystem-roles/validator/monitoring/grafana.mdx
+++ b/pages/docs/node/monitoring/grafana.mdx
@@ -3,8 +3,8 @@ title: Grafana Dashboard Setup
description: Create visual dashboards for the metrics captured by Prometheus.
---
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
+import { Tabs, Tab } from "../../../../components/Tabs";
+import Callout from "../../../../components/Callout";
# Grafana Setup
diff --git a/pages/docs/ecosystem-roles/validator/monitoring/loki.mdx b/pages/docs/node/monitoring/loki.mdx
similarity index 98%
rename from pages/docs/ecosystem-roles/validator/monitoring/loki.mdx
rename to pages/docs/node/monitoring/loki.mdx
index 31d92fa6..9180b220 100644
--- a/pages/docs/ecosystem-roles/validator/monitoring/loki.mdx
+++ b/pages/docs/node/monitoring/loki.mdx
@@ -3,8 +3,8 @@ title: Loki Log Management
description: A service dedidated to aggregate and query system logs.
---
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
+import { Tabs, Tab } from "../../../../components/Tabs";
+import Callout from "../../../../components/Callout";
# Loki Log Management
diff --git a/pages/docs/ecosystem-roles/validator/monitoring/prometheus.mdx b/pages/docs/node/monitoring/prometheus.mdx
similarity index 99%
rename from pages/docs/ecosystem-roles/validator/monitoring/prometheus.mdx
rename to pages/docs/node/monitoring/prometheus.mdx
index bbcb3f74..21f9eacc 100644
--- a/pages/docs/ecosystem-roles/validator/monitoring/prometheus.mdx
+++ b/pages/docs/node/monitoring/prometheus.mdx
@@ -3,8 +3,8 @@ title: Prometheus Setup
description: Setup Prometheus for scraping node metrics and more.
---
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
+import { Tabs, Tab } from "../../../../components/Tabs";
+import Callout from "../../../../components/Callout";
# Prometheus Setup
diff --git a/pages/docs/tangle-network/node/monitoring/quickstart.mdx b/pages/docs/node/monitoring/quickstart.mdx
similarity index 93%
rename from pages/docs/tangle-network/node/monitoring/quickstart.mdx
rename to pages/docs/node/monitoring/quickstart.mdx
index a39eae5b..eed71663 100644
--- a/pages/docs/tangle-network/node/monitoring/quickstart.mdx
+++ b/pages/docs/node/monitoring/quickstart.mdx
@@ -3,8 +3,8 @@ title: Quickstart
description: Creating monitoring stack for Tangle node.
---
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
+import { Tabs, Tab } from "../../../../components/Tabs";
+import Callout from "../../../../components/Callout";
# Monitoring Tangle Node
@@ -56,4 +56,4 @@ docker compose up -d
You can then navigate to `http://localhost:3000` to access the Grafana dashboard!
-![Tangle Dashboard](../../../../../components/images/tangle-metrics.png)
+![Tangle Dashboard](../../../../components/images/tangle-metrics.png)
diff --git a/pages/docs/tangle-network/node/node-software.mdx b/pages/docs/node/node-software.mdx
similarity index 100%
rename from pages/docs/tangle-network/node/node-software.mdx
rename to pages/docs/node/node-software.mdx
diff --git a/pages/docs/tangle-network/node/quicknode.mdx b/pages/docs/node/quicknode.mdx
similarity index 100%
rename from pages/docs/tangle-network/node/quicknode.mdx
rename to pages/docs/node/quicknode.mdx
diff --git a/pages/docs/tangle-network/node/quickstart.mdx b/pages/docs/node/quickstart.mdx
similarity index 86%
rename from pages/docs/tangle-network/node/quickstart.mdx
rename to pages/docs/node/quickstart.mdx
index 1d290a43..83f85c21 100644
--- a/pages/docs/tangle-network/node/quickstart.mdx
+++ b/pages/docs/node/quickstart.mdx
@@ -3,9 +3,9 @@ title: Node Operator Quickstart
description: Participate in the Webb ecosystem by deploying a Tangle node, to validate, serve data or more.
---
-import { QuickDeployArea, DeployArea, SupportArea, MonitoringArea } from "../../../../components/TangleQuickstart"
-import { RepoArea } from "../../../../components/RepoArea";
-import FullWebbCTA from "../../../../components/FullWebbCTA";
+import { QuickDeployArea, DeployArea, SupportArea, MonitoringArea } from "../../../components/TangleQuickstart"
+import { RepoArea } from "../../../components/RepoArea";
+import FullWebbCTA from "../../../components/FullWebbCTA";
# Node Operator Quickstart
diff --git a/pages/docs/tangle-network/node/systemd.mdx b/pages/docs/node/systemd.mdx
similarity index 100%
rename from pages/docs/tangle-network/node/systemd.mdx
rename to pages/docs/node/systemd.mdx
diff --git a/pages/docs/tangle-network/node/troubleshooting.mdx b/pages/docs/node/troubleshooting.mdx
similarity index 100%
rename from pages/docs/tangle-network/node/troubleshooting.mdx
rename to pages/docs/node/troubleshooting.mdx
diff --git a/pages/docs/tangle-network/node/validator/_meta.json b/pages/docs/node/validator/_meta.json
similarity index 100%
rename from pages/docs/tangle-network/node/validator/_meta.json
rename to pages/docs/node/validator/_meta.json
diff --git a/pages/docs/tangle-network/node/validator/proxyaccount.mdx b/pages/docs/node/validator/proxyaccount.mdx
similarity index 100%
rename from pages/docs/tangle-network/node/validator/proxyaccount.mdx
rename to pages/docs/node/validator/proxyaccount.mdx
diff --git a/pages/docs/tangle-network/node/validator/requirements.mdx b/pages/docs/node/validator/requirements.mdx
similarity index 100%
rename from pages/docs/tangle-network/node/validator/requirements.mdx
rename to pages/docs/node/validator/requirements.mdx
diff --git a/pages/docs/ecosystem-roles/validator/validator-rewards.mdx b/pages/docs/node/validator/validator-rewards.mdx
similarity index 100%
rename from pages/docs/ecosystem-roles/validator/validator-rewards.mdx
rename to pages/docs/node/validator/validator-rewards.mdx
diff --git a/pages/docs/overview/_meta.json b/pages/docs/overview/_meta.json
deleted file mode 100644
index 5c255e48..00000000
--- a/pages/docs/overview/_meta.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "intro-to-webb": "Intro to the Webb",
- "privacy-manifesto": "Privacy Manifesto",
- "webb-protocol-technical-introduction": "Technical Overview of Webb Protocol",
- "understanding-webb": "Understanding Webb for Newbies"
-}
diff --git a/pages/docs/overview/intro-to-webb.mdx b/pages/docs/overview/intro-to-webb.mdx
deleted file mode 100644
index 6d2d7a35..00000000
--- a/pages/docs/overview/intro-to-webb.mdx
+++ /dev/null
@@ -1,18 +0,0 @@
----
-title: Overview
-description: An outline describing Webb's motivations.
----
-
-import { ProtocolImpl } from "../../../components/RepoArea";
-
-# Introduction to Webb
-
-Welcome to Webb, an ecosystem of privacy protocols and products designed to extend privacy to the blockchain space. Webb is committed to a future where privacy is accessible to everyone in the multi-chain world.
-
-## Ethos
-
-At Webb, we regard privacy as a fundamental human right. As we increasingly participate in the multi-chain world, from transferring assets to buying NFTs, it becomes crucial that our activities are not exposed to just **anyone with an internet connection,** just as our day-to-day activities are private to the world. Webb is dedicated to changing this status quo by building and contributing to privacy tools for blockchains.
-
-## Mission
-
-Webb's primary goal is to improve privacy, particularly focusing on cross-chain activities. The majority of our efforts are directed toward developing our private bridge protocol, the Webb Anchor System, and the Tangle Network, a decentrally-governed platform for cross-chain zero-knowledge Applications, as well as several other protocols and tools that are entered into the open-source ecosystem for the use and research of all.
diff --git a/pages/docs/overview/privacy-manifesto.mdx b/pages/docs/overview/privacy-manifesto.mdx
deleted file mode 100644
index d44d2b05..00000000
--- a/pages/docs/overview/privacy-manifesto.mdx
+++ /dev/null
@@ -1,30 +0,0 @@
----
-title: Privacy Manifesto
-description: A manifesto outlining Webb's stance on privacy.
----
-
-# Privacy Manifesto
-
-> Privacy is necessary for an open society in the electronic age. Privacy is not secrecy. A private matter is something one doesn't want the whole world to know, but a secret matter is something one doesn't want anybody to know. Privacy is the power to selectively reveal oneself to the world. — Eric Hughes
-
-The future of privacy on the Internet is a [collective action problem](https://en.wikipedia.org/wiki/Collective_action_problem). When more of us care about our data privacy — how our data is used, viewed, and tracked — the greater we collectively benefit. Collaborating towards a private future is essential to increasing our odds of success in creating a more fair and open Internet.
-
-Privacy is crucial for a fair and healthy internet. The ability to selectively disclose and control our data directly protects our own interests and the interests of our peers. It leads to open spaces for sharing ideas, collaborating on work, and voicing our opinions on controversial issues. It also creates better closed spaces for new ideas that require separate to emerge. We need more networked tools to reclaim our privacy; tools that grow privacy through positive-sum behavior. Networked tools for privacy create network effects, which are key for sustainable privacy creation.
-
-Privacy on the internet is traditionally seen through a pessimistic lens involving large technology companies. These monolithic technology companies collect troves of our data and lock them behind insecure doors. The incessant cycle of data breaches and hacks can only be seen as a sign that these companies do not value our privacy no matter what they say. This can change within the Web3 ecosystem. We can set an example of how to build systems to value users’ privacy and how to do so sustainably and with balanced incentives.
-
-Privacy in Web3 is also currently fragmented. Each privacy project commands its own, independent privacy sets. Each privacy project competes for roughly the same set of users, liquidity, and data. This independence of privacy sets creates a zero-sum environment for privacy creation.
-
-We can create a new narrative of privacy on the Internet and in Web3. We can create a future of privacy that is not only secure, but also empowering and connected. We can create a future that enables us to express our true selves, without fear of retribution or exploitation, using anonymous identities. We can create a future that enables us to protect our data, and to make it work for us through the use of zero-knowledge proofs. We can create a future that makes privacy a priority, and encourages all of us to be a part of the solution through collaborative incentives. Together, we can create a future of privacy that is truly revolutionary.
-
-We must take collective action to make our privacy a priority. We must advocate for laws that protect our data and that provide us with control over who has access to it. We must build tools and services that prioritize privacy and provide users with clear, understandable options for how their data is used. We must create incentives for projects, companies, and organizations to build privacy into their products and do so in a connected way to benefit the maximum number of people. We must create a community with open and honest dialogue around privacy, and we must create culture that values privacy as an essential part of our digital lives. We must take collective action to create a future of privacy, and we must do it together.
-
-That is why we believe in building connected private applications that span all possible communities. Using interoperable tools that connect and grow privacy collaboratively, we can unite ourselves together towards a common and beneficial goal. We can realize one of the core visions of cryptocurrency technology; to reclaim power over our financial lives and identities. That is why Webb exists; to build the privacy tools that bridge us together.
-
-We believe the future of privacy is connected. We believe the future of privacy is anonymous and accountable. We are here to build systems that connect blockchains together with collaborative privacy technology. Technology that anyone from anywhere can participate in and, most importantly, benefit from.
-
-Here's how we realize our vision
-
-1. **Zero-knowledge / Private bridges** - We build bridges between privacy sets to grow privacy for any user on any compatible blockchain within the same system. Your data lives on our bridges in an anonymous, attributed and secure representation that only you can prove properties of whenever you want.
-2. **Fully open source** - All our work is fully open source so that it's clear what goes on underneath the hood. We are fully committed to this vision and will continue to innovate in public. Check out our [Github](https://github.com/webb-tools) and [Documentation Site](https://docs.webb.tools/) for up-to-date work.
-3. **Collaborative incentives** - We believe that privacy creation is needed for a fair and sustainable Internet. We are designing our protocols and tools with collaborative incentives across development, deployment, community creation, and privacy creation. Anyone who deploys and integrates a with us will not only increase the maximum achievable privacy for their community but they will also become a key participant in our collective mission to increase our privacy together.
diff --git a/pages/docs/overview/understanding-webb.mdx b/pages/docs/overview/understanding-webb.mdx
deleted file mode 100644
index 700c39aa..00000000
--- a/pages/docs/overview/understanding-webb.mdx
+++ /dev/null
@@ -1,35 +0,0 @@
-# Webb Protocol Overview
-
-Webb Protocol can be thought of as a vast, interconnected city - "Webb City." This city is not isolated but has bridges (Webb Bridges) connecting it to other cities (blockchains).
-
-The **Tangle Network** acts as the city's administration - a group of trusted individuals who are elected and ensure that every transaction is approved and validated, keeping the city secure and organized. They provide the security and validation for the **Webb Bridges** and the **Anchor System**, ensuring the integrity of cross-chain transactions and the privacy of user data.
-
-**Distributed Key Generation (DKG)** is like our city's council selection process. Instead of an election, we have a cryptographic lottery that picks a group from many potential council members to form the Tangle Network. This process is integral to maintaining the decentralization and fairness of the system. The DKG process is the core governance framework for the Tangle Network, ensuring the network remains decentralized and secure.
-
-These selected members, using a protocol called **Threshold Secret Sharing (TSS)**, share the responsibility for a 'master key' that governs critical operations within the network. The key is never whole unless a specific number (threshold) of these council members agree to combine their parts. This prevents any single member from wielding full control and ensures our city's operations are secure and democratic. TSS plays a pivotal role in securing **Webb Shielded Pools**, ensuring that withdrawal credentials are secure.
-
-The **Anchor System**, like the city's post office, records every incoming and outgoing package (transaction) but only in hashed form, making it impossible to know the sender or receiver's real identity. It relies on the Tangle Network to verify and validate these transactions, with TSS and DKG vital for maintaining its decentralized and secure nature.
-
-The **Oracle and Relayer Networks** act as the postmen of Webb City. The Oracle postmen listen for new packages (transactions) and relay the information to the post office (Anchor System). The Relayer postmen ensure that the citizens can send and receive packages without ever revealing their identity. These networks operate under the governance of the Tangle Network, with the shared responsibility of TSS enabling their trustless operation.
-
-Lastly, **Stats DApp** is the control room of the city. Here, the administrators can monitor the city's operations, including the number of packages coming in and going out, the performance of the postmen, and other important statistics.
-
-## Component Breakdown
-
-- **Webb Bridges:** Webb Bridges connect different blockchains, allowing for interoperability across multiple networks.
-- **Tangle Network:** The Tangle Network is a decentralized network of nodes that validate and approve transactions within the Webb ecosystem.
-- **Distributed Key Generation (DKG):** The DKG is a method of generating a shared secret among a group of participants, each of which holds a unique piece of the secret.
-- **Anchor System:** The Anchor System connects merkle trees on different blockchains and maintains the latest state of each anchor in the network.
-- **Oracle Network:** The Oracle Network listens for new transactions in the Webb ecosystem, relaying this information to the appropriate entities.
-- **Relayer Network:** The Relayer Network facilitates transaction execution on behalf of users, maintaining user privacy.
-- **Stats DApp:** The Stats DApp is a dashboard that presents various statistics about the Webb ecosystem, including transaction volumes, governance proposals, and network health metrics.
-
-## Conclusion
-
-Together, all of these components work in harmony to ensure privacy, interoperability, and efficiency in the Webb Protocol. This intricate interconnection of components, with DKG and TSS at its core, is what makes the Webb Protocol a powerful tool for privacy in the blockchain space. It is the concerted effort of all these parts, working under the governance of the Tangle Network and the security of TSS, that brings the dream of a truly private, secure, and interoperable digital world into reality.
-
-The power of the Webb Protocol lies not in its individual components, but in their orchestration. Each component relies on and enhances the others, like gears in a well-oiled machine. From the democratic and secure key generation of DKG, to the shared responsibility of TSS, and the vigilant monitoring of the Stats DApp, all systems are intertwined to build an environment where privacy is not an option but a fundamental right.
-
-Webb Bridges leverage the inherent security of this system to connect different blockchains securely and privately, and the Anchor System employs the same principles to provide a private and reliable cross-chain transaction record. The Oracle and Relayer Networks are key cogs in this machine, maintaining the integrity of transactions and user privacy.
-
-This is the essence of Webb Protocol – a collective system designed with decentralization, privacy, and security at its heart, each component working together in a harmonic dance that ensures a seamless, private and secure digital experience. Welcome to the Webb City, where your privacy is respected, your transactions are secure, and interoperability is as natural as the sunrise.
diff --git a/pages/docs/overview/webb-protocol-technical-introduction.mdx b/pages/docs/overview/webb-protocol-technical-introduction.mdx
deleted file mode 100644
index 831f5323..00000000
--- a/pages/docs/overview/webb-protocol-technical-introduction.mdx
+++ /dev/null
@@ -1,15 +0,0 @@
-# Technical Introduction to Webb Protocol
-
-The Webb Protocol is a transformative solution to existing privacy and scalability issues in the realm of decentralized applications (dApps). By enabling enhanced privacy, interoperability, and scalability, the Webb Protocol offers an efficient, privacy-focused framework for a wide range of decentralized applications on a global scale.
-
-In pursuit of creating a scalable, privacy-centric system, the Webb Protocol is crafted to interlink various blockchains through two core components— the Anchor System and the Distributed Key Generation (DKG) protocol. The Anchor System, based on smart contracts and a graph-like framework, maintains a network of interconnected blockchains, each referred to as an anchor. Each anchor possesses an on-chain Merkle tree and an edge list for storing and updating linked metadata, thus facilitating interoperable and potentially private cross-chain applications.
-
-Simultaneously, the DKG protocol acts as the security backbone of the Webb Protocol, validating updates in the Anchor System using a shared key pair. This protocol employs a cryptographic multi-party mechanism to generate a shared public and private key used for threshold signatures and threshold encryptions. Its architecture involves two significant independent protocols— the threshold-key-generation protocol, and the threshold-signature-scheme or the distributed signing protocol. Through these, the DKG provides a secure framework for generating and handling signed messages and ensuring the absence of malicious proposals.
-
-The Anchor System also showcases a sophisticated design with two primary properties, Liveness and Safety, guaranteeing the continuous update of all anchors and validating all anchor updates. The system connects different blockchains in a partially-synchronous environment using Oracle and Relayer Networks. These networks ensure the relay of changes across all anchors and preserve user privacy by delegating proof submission through an overlay network.
-
-Working within a Proof of Stake (PoS) blockchain protocol, the Webb Protocol intelligently adapts to changing validator set sizes and maintains security with the execution of the Key Rotation Protocol and Misbehaviour Protocol during each session. The system operates through an array of standardized messages facilitating seamless communication within the system.
-
-A distinguishing feature of the Webb Protocol is its utilization of a hybrid threshold and light-client validation protocol, allowing for threshold-signed messages to be considered valid while enforcing message signing only when they can be verified as true against a light client.
-
-Overall, the Webb Protocol provides a comprehensive, resilient, and privacy-preserving framework for connecting multiple blockchains, maintaining security, validating updates, and facilitating efficient interaction between various decentralized applications. The implementations of the blockchain and Anchor Systems are open-source, fostering collaboration and continuous improvement of this groundbreaking protocol.
diff --git a/pages/docs/projects/_meta.json b/pages/docs/projects/_meta.json
deleted file mode 100644
index 36c0a9be..00000000
--- a/pages/docs/projects/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "hubble-bridge": "Hubble Bridge",
- "stats-dapp": "Stats Dapp",
- "compliance-statement": "Compliance and Security"
-}
diff --git a/pages/docs/projects/compliance-statement.mdx b/pages/docs/projects/compliance-statement.mdx
deleted file mode 100644
index 5b04bfbb..00000000
--- a/pages/docs/projects/compliance-statement.mdx
+++ /dev/null
@@ -1,33 +0,0 @@
-# Compliance and Security
-
-### User Restrictions
-
-Tangle Network and its associated cross-chain privacy bridge are designed for legitimate and lawful use only, and are not to be exploited by means of criminal or illicit activities. Users are obliged to adhere to all laws and regulations of their respective jurisdiction. Usage by politically exposed persons or their associates is strictly prohibited as per prevailing laws.
-
-Before using Tangle Network and its services, users must confirm and accept the following terms:
-
-If acting individually, you are of legal age as specified in your jurisdiction.
-You are not a politically exposed person currently in or recently retired from a prominent public role.
-You are not an immediate family member or closely associated with a politically exposed person.
-You are not involved in money laundering or financing of terrorism.
-Your access to the network doesn't violate any rules, laws, regulations, or directives in your country of residence or jurisdiction.
-You have never been arrested or convicted of any offence or crime.
-You are prepared to verify your identity upon request.
-
-### User Responsibility
-
-Use of Tangle Network and its cross-chain privacy bridge is solely at the risk of the user. The codebase, autonomous protocol, and decentralized networks are all open-source and are not controlled by any specific team. Contributors to Tangle Network are not liable for any loss or damage resulting from inherent risks associated with cryptography, blockchain, and digital wallet usage.
-
-The user agrees not to bypass location-based restrictions through the use of any technology. Users bear the responsibility of reporting and paying any relevant taxes. Any addresses suspected of malpractice can be blocked.
-
-### Geographical Restrictions
-
-While Tangle Network and Hubble Bridge are in its Alpha stage with minimal restrictions, users must comply with their local, state, and federal laws. We strongly advise checking this before using the network. Certain jurisdictions may be programmatically restricted due to local regulatory authorities' policies that prevent anonymous transactions.
-
-### Anonymity
-
-The Tangle token is not an anonymity-enhanced cryptocurrency in itself; however, when used with our cross-chain privacy bridge, privacy is enabled. For the safety of all users, those from certain jurisdictions may be prohibited from accessing the protocol. Post-alpha, restrictions may be implemented for additional countries based on legal considerations.
-
-### Wallet Compliance
-
-We may in the future pursue integrations to ensure that wallets interacting with the protocol do not contain funds from illicit sources. In that future case, suspicious wallets may be barred from depositing into the protocol, and funds may not be withdrawn to such wallets.
diff --git a/pages/docs/projects/hubble-bridge/_meta.json b/pages/docs/projects/hubble-bridge/_meta.json
deleted file mode 100644
index c8f85e43..00000000
--- a/pages/docs/projects/hubble-bridge/_meta.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "overview": "Overview",
- "usage-guide": "How to Use Hubble Bridge"
-}
diff --git a/pages/docs/projects/hubble-bridge/overview.mdx b/pages/docs/projects/hubble-bridge/overview.mdx
deleted file mode 100644
index d00641f7..00000000
--- a/pages/docs/projects/hubble-bridge/overview.mdx
+++ /dev/null
@@ -1,33 +0,0 @@
----
-title: Overview
-description: An overview of the Webb Hubble bridge.
----
-
-import { DappsAreaBridge} from "../../../../components/QuickStart";
-
-# Overview
-
-The Webb Hubble Bridge provides a easy to use, and friendly interface for users to privately move assets cross-chain. Underneath the interface we
-leverage Webb's Shielded Asset Protocol - A cross-chain private transaction system for privately moving and transferring assets between blockchains.
-Making user’s transaction history obfuscated through the use of cross-chain zero-knowledge proofs. To learn more about Webb's Shielded Asset Protocol
-please refer to this [page](/docs/protocols/single-asset-shielded-pool/overview/).
-
-The interface provided makes it incredibly easy for users to deposit into shielded pools, transfer shielded assets, and withdraw assets from the pool, while also
-preserving privacy for the user. Check out our usage guides for an in-depth look at how to use the bridge.
-
-
-
-## Tips to stay anonymous
-
-It is important to note that Webb's implementation only provides obfuscation and privacy for actions related on-chain. However, it is the user
-that should take necessary precaution to protect their privacy. The below lists are only suggestions, and are not an exhaustive list of options.
-
-- Use a VPN or Tor to hide your IP for all actions within the dApp
-- Store your note in a safe place
-- Make sure you clear cookies for dApps before using your new address
-- Wait some time before withdrawing your deposit
-- Use multiple addresses
-- If making multiple deposits / withdraws spread them out accordingly
-- Spread deposits and withdraws across a 24hr period
-
-The above list are merely suggestions and are not required to interact with Webb's Hubble bridge.
diff --git a/pages/docs/projects/hubble-bridge/usage-guide/_meta.json b/pages/docs/projects/hubble-bridge/usage-guide/_meta.json
deleted file mode 100644
index f4f83576..00000000
--- a/pages/docs/projects/hubble-bridge/usage-guide/_meta.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "account": "Account",
- "deposit": "Deposit",
- "manage-notes": "Manage Notes",
- "withdraw": "Withdraw",
- "transfer": "Transfer"
-}
diff --git a/pages/docs/projects/hubble-bridge/usage-guide/account.mdx b/pages/docs/projects/hubble-bridge/usage-guide/account.mdx
deleted file mode 100644
index 8f50722a..00000000
--- a/pages/docs/projects/hubble-bridge/usage-guide/account.mdx
+++ /dev/null
@@ -1,72 +0,0 @@
----
-title: Accounts on Hubble Bridge
-description: An overview of accounts on Hubble Bridge
----
-
-import { ConnectWallet, ConnectedWallet, SelectSourceChain, WalletComponent, CreateNoteAccount, CreateNoteAccountPrompt, SignatureRequestPrompt, AccountCongrats } from '../../../../../components/images/bridge/usage-guide/account/Account';
-
-# Accounts on Hubble Bridge
-
-The Hubble Bridge dApp allows users to manage their notes with an easy to use interface. Prior to depositing
-into the bridge, users are required to create a `NoteAccount`. The `NoteAccount` is easily created by having the user
-sign a message using their MetaMask account. More precisely, the MetaMask `NoteAccount` uses the first 32 bytes of the signed message
-from the MetaMask account as the secret seed. It is with this keypair that users are able to fetch, spend, and manage their
-notes right from the interface!
-
-## Creating a `NoteAccount` with MetaMask
-
-The first step to interacting with the bridge dApp is to connect a MetaMask wallet and create a `NoteAccount` using your preferred
-MetaMask account. The below steps outline how to go about creating an `NoteAccount`.
-
-### **1. Connect MetaMask Wallet**
-
-When you enter the bridge you will be greeted with the following screen. The button on the header indicates that you are required to
-connect a wallet.
-
-
-
-
-
-### **2. Select Wallet to Connect**
-
-After selecting the 'Connect wallet', you will be prompted to connect to your desired wallet provider.
-For the purposes of this walkthrough we will utilize MetaMask. Select MetaMask and wait to be prompted
-by your MetaMask extension. If you do not have MetaMask installed, you will need to install it prior to
-depositing into the bridge.
-
-
-
-
-
-### **3. Creating Note Account**
-
-Once connected to your MetaMask account, you will need to create a NoteAccount using the connect MetaMask account.
-To do so, select the 'Create Note Account' button as shown below.
-
-
-
-
-
-Once selected you will be prompted to agree to Webb's terms and conditions. Select the confirm checkbox, followed with
-'Create Note Account'
-
-
-
-
-
-### **4. Sign MetaMask Message**
-
-Upon selecting to 'Create Note Account', users will be prompted by MetaMask to sign a message from Webb. As mentioned above,
-we use the first 32 bytes of the signed message from the MetaMask account as the secret seed to create the `NoteAccount` keypair.
-
-
-
-
-
-Once signed, you will have successfully created your Webb Note Account!!
-
-
-
-
-
-Now that you have created a `NoteAccount` you can make a deposit into the bridge, view, and manage all your notes from the interface!
diff --git a/pages/docs/projects/hubble-bridge/usage-guide/deposit.mdx b/pages/docs/projects/hubble-bridge/usage-guide/deposit.mdx
deleted file mode 100644
index 85a5834e..00000000
--- a/pages/docs/projects/hubble-bridge/usage-guide/deposit.mdx
+++ /dev/null
@@ -1,124 +0,0 @@
----
-title: Deposits on Hubble Bridge
-description: An overview of deposits on Hubble Bridge
----
-
-import { SelectSourceChain, TokenSelection, WrapDeposit, Deposit, DepositAmount, ConfirmDeposit, InprogessDeposit, CopiedNote, SuccessDeposit, SelectDestinationChain } from '../../../../../components/images/bridge/usage-guide/deposits/Deposits';
-
-# Deposit on Hubble Bridge
-
-This guide will provide a step-by-step process to assist in making a deposit on the Hubble Bridge.
-
-## Deposit Inputs
-
-You have created your `Note Account` and are now ready to make a deposit into the Hubble Bridge. You
-will notice the **Deposit** button to be disabled initially. We must provide the details of our transaction
-before we can deposit.
-
-
-
-
-
-### **1. Select Source Chain**
-
-To get started first select a source chain. Ensure that you have an available balance on the chain that you
-select.
-
-
-
-
-
-### **2. Select Token**
-
-Once you have selected the source chain we will need to select what token to deposit into the bridge. Clicking
-on the token selection component will display a list of tokens.
-
-
-
-
-
-### **3. Wrap and Deposit**
-
-Be advised that deposits into the bridge are wrapped into Webb based asset. For example, if you select `WETH` to be
-deposited, it will be wrapped in a Webb wrapped asset (e.g. `webbETH`). This is required to facilitate bridging of
-assets across different chains. It's important to note that you can unwrap and withdraw back into `WETH` on the destination
-chain as long as there is liquidity available on that destination chain.
-
-
-
-
-
-### **4. Select Destination Chain**
-
-You must specify a destination chain during the deposit flow. Be advised that you cannot change the destination chain
-after the deposit transaction is made and its only on this destination chain that you can withdraw from. Select
-the desired destination chain by clicking the destination chain component.
-
-
-
-
-
-### **5. Input Amount**
-
-With the Hubble bridge you can deposit variable amounts. Input the amount you would like to deposit into the
-Hubble bridge. If you want to deposit the entire available balance simply click **Max** for a quick input.
-
-
-
-
-
-We now have all the inputs necessary to make a deposit. Click the **Deposit** button to proceed in confirming the
-details of the transaction.
-
-## Confirming Deposit Details
-
-Upon clicking the **Deposit** button you will be presented with the details of your transaction. It is important to
-review these transaction details to ensure they're accurate.
-
-### **1. Verify Deposit Details**
-
-Review the transaction details of the deposit. Pay close attention to the destination chain and amount displayed.
-
-
-
-
-
-### **2. Confirm Copied Note**
-
-Within the deposit confirmation view, you will need to confirm that you have copied the spend note. The spend note should
-be kept in confidence as anyone who gains access to your spend note will be able to withdraw your funds from the
-bridge. Your notes are stored locally as you transact through this application as well as encrypted on-chain for persistent storage.
-
-
-
-
-
-Once you have confirmed that you have copied the spend note you may proceed to click **Wrap And Deposit** to initiate the transaction.
-
-### **3. Deposit In-Progress**
-
-Before the deposit transaction can be sent to the source chain selected we transition through the following stages:
-
-- Fetching transaction leaves
-- Generate zero knowledge proof
-- Confirm transaction with connected wallet
-- Send transaction to selected source chain
-
-You may observe the progress of each stage within the deposit in-progress view. If you would like to proceed with
-an additional transaction you can click **New Transaction** and begin on your next transaction. The Hubble dApp
-conveniently keeps a notification card with transaction details and progress on the right side to keep you informed
-when the transaction is successful.
-
-
-
-
-
-### **4. Successful Deposit**
-
-When the deposit transaction is completed you can observe a successful confetti filled message in the notification card!
-If you want to see the transaction in a block explorer, click on the **Successfully Deposited** link within the
-notification card.
-
-
-
-
diff --git a/pages/docs/projects/hubble-bridge/usage-guide/manage-notes.mdx b/pages/docs/projects/hubble-bridge/usage-guide/manage-notes.mdx
deleted file mode 100644
index 8707deba..00000000
--- a/pages/docs/projects/hubble-bridge/usage-guide/manage-notes.mdx
+++ /dev/null
@@ -1,85 +0,0 @@
----
-title: Managing Notes on Hubble Bridge
-description: An overview of how to manage your notes on Hubble Bridge
----
-
-import { AvailableNotesManage, AvailableNotesQuick, AvailableSpendNotes, ShieldedAssets } from '../../../../../components/images/bridge/usage-guide/manage-notes/Notes';
-
-# Managing Notes
-
-For each deposit, transfer, and withdrawal a note is generated to represent a user balance. For Webb's Hubble Bridge
-we make use of the UTXO model for handling user balances. Each note contains the following attributes:
-
-- the protocol name (e.g. `vanchor`),
-- private key,
-- blinding value,
-- merkle path index,
-- chainID,
-- public key,
-- commitment,
-- nullifier,
-- a value that represents the amount of funds that can be spent and effectively transferred to another user
-
-An example note may look similar to the following:
-
-```
-webb://v1:vanchor/1099522782887:1099522782887/0x965c94a6e1b713e751164d2ba09aa0306f48ee74:0x965c94a6e1b713e751164d2ba09aa0306f48ee74/0000010000aa36a7:0000000000000000003c6568f12e8000:1d81f339efc951a8d19133c4a050159662b28d4021e75c66af29cb93f427b331:003feb82b9da3e335d6312f94670ea0317035d34c4addb31fad674af1f4c8059/?curve=Bn254&width=5&exp=5&hf=Poseidon&backend=Circom&token=webbETH&denom=18&amount=17000000000000000
-```
-
-These notes are intended to be kept in confidence, anyone who gains access to your notes can withdraw your funds from
-the Hubble Bridge. Your notes are stored locally as you transact through this application as well as encrypted on-chain
-for persistent storage. The below guide provides a walkthrough of how to manage your notes.
-
-## Shielded Assets Table
-
-Once you have made deposit(s) into the Hubble Bridge we conveniently populate the Shielded Assets table with the
-relevant data points of all your notes.
-
-
-
-
-
-The columns of the above pictured table represent the following:
-
-- **Chain:** the destination chain that the shielded assets may be withdrawn from
-- **Shieled Asset:** the name of the shielded asset that you have claim to in the pool
-- **Composition:** the configuration of tokens that make up the pool in which your shielded assets exist in
-- **Available Balance:** the total sum of available balance to be withdrawn across all your notes for that given shielded asset
-- **Note Found:** the sum of all notes for a given shielded asset
-- **Action:** quick action shortcuts user can make use of for deposit, transfer and withdrawal
-
-The shielded asset table provides you with a holistic view of your various shielded assets that exist within the Hubble Bridge.
-
-## Available Spend Notes Table
-
-For every transact you have done with the Hubble Bridge a note is generated. The Available Spend table provides a record of each
-your notes so you can conveniently access and view these notes along with relevant data. Simply the table provides a list
-of your own available spend notes.
-
-
-
-
-
-The columns of the above pictured table represent the following:
-
-- **Chain:** the destination chain that the shielded assets may be withdrawn from
-- **Shieled Asset:** the name of the shielded asset that you have claim to in the pool
-- **Composition:** the configuration of tokens that make up the pool in which your shielded assets exist in
-- **Available Balance:** the available balance to be withdrawn for the given shielded asset
-- **Subsequent Deposits:** the number of deposits that occurred after your own deposit. The more deposits that occurred subsequently to your own the greater the privacy achieved.
-- **Note:** the actual note representing your claim to your deposited amount
-
-The available spend table provides you with a holistic view of each of your available spend notes.
-
-Selecting the chevron icon next to your note will display a dropdown menu for quick action shortcuts user can make use of for transfer and withdraw.
-
-
-
-
-
-To upload or download individual notes you can simply click on the **Manage** button to display a dropdown menu for
-**Upload** or **Download** a given note.
-
-
-
-The Hubble Bridge makes it incredibly easy to manage and review all your deposits through these two tables.
diff --git a/pages/docs/projects/hubble-bridge/usage-guide/refund.mdx b/pages/docs/projects/hubble-bridge/usage-guide/refund.mdx
deleted file mode 100644
index c1e6e325..00000000
--- a/pages/docs/projects/hubble-bridge/usage-guide/refund.mdx
+++ /dev/null
@@ -1,20 +0,0 @@
-# Refunds
-
-## Introduction to Refunds
-
-Refund is a feature on the Hubble Bridge that makes it easier to use your withdrawn tokens immediately while preserving your privacy.
-
-When users are in the shielded pool with shielded balances and they want to withdraw shielded ERC-20 tokens to a clean address, they encounter a challenge. The clean address they wish to withdraw to will have zero native tokens (ETH). Consequently, if they want to use the native tokens for gas to perform transactions on the blockchain, they would need to do another separate withdraw from the bridge to obtain the necessary gas.
-
-## Solution - The Refund Feature:
-
-The Refund feature addresses this problem by allowing users to negotiate with the relayer of the bridge. Users can propose to pay a higher fee to the relayer in exchange for receiving gas tokens. This enables them to perform an atomic swap with the relayer, obtaining the necessary gas for their transactions.
-
-The Refund feature provides flexibility and convenience to users, giving them two options:
-
-1. **Getting a Refund:**
- Users can choose to receive a refund instead of withdrawing the shielded ERC-20 tokens to the clean address. By opting for a refund, they can negotiate with the relayer to pay a higher fee and receive gas tokens in return. This allows them to have the necessary native tokens (ETH) for performing transactions directly from the shielded pool, without the need for an additional withdrawal from the bridge.
-2. **Withdrawing Native Tokens:**
- Alternatively, users can choose to proceed with the withdrawal of shielded ERC-20 tokens to the clean address. In this case, they would need to perform _another separate withdrawal_ from the bridge to obtain the native tokens required for gas. **This option may be more suitable if users prefer to have the shielded ERC-20 tokens in a separate wallet or address from their native tokens.**
-
-The Refund feature thus offers users the flexibility to choose between obtaining a refund with gas tokens or performing a separate withdrawal for native tokens, depending on their specific requirements and preferences.
diff --git a/pages/docs/projects/hubble-bridge/usage-guide/transfer.mdx b/pages/docs/projects/hubble-bridge/usage-guide/transfer.mdx
deleted file mode 100644
index 0fd353ca..00000000
--- a/pages/docs/projects/hubble-bridge/usage-guide/transfer.mdx
+++ /dev/null
@@ -1,133 +0,0 @@
----
-title: Transfers on Hubble Bridge
-description: An overview of Transfers on Hubble Bridge
----
-
-import { TransferToken, TransferDestination, TransferAmount, TransferRelayer, TransferRecipient, TransferConfirm, TransferChangeNoteCopied, TransferInprogress, TransferSuccess, TransferAddress } from '../../../../../components/images/bridge/usage-guide/transfer/Transfer';
-
-import { Transfer, SelectSourceChain, SelectShieldedPool, Amount, SelectDestinationChain, SelectRelayer, NoRelayer, PublicKey, Recipient, Refund, Confirm, InProgress, Success } from '../../../../../components/images/bridge/usage-guide/transfer/Transfer'
-
-# Transfers on Hubble Bridge
-
-This guide will provide a step-by-step process to assist in making a transfers on the Hubble Bridge.
-
-## Transfer Inputs
-
-You have made an deposit on the bridge and would like to transfer your shielded assets to another recipient. It is important to note, that
-transfering shielded assets requires the recipient to have a shielded address and that assets during transfer operations never leave the shielded
-pool.
-
-You will notice the **Transfer** button to be disabled initially. We must provide the details of our transaction
-before we can transfer.
-
-
-
-
-### **1. Select Source Chain**
-
-To get started we want to make sure we are connected to the network of the source chain of the shielded assets we're looking to transfer.
-
-
-
-
-### **2. Select Asset to Transfer**
-
-Once you have selected the desired chain to transfer from we will need to select what shielded asset to transfer. Clicking on the token selection component will display the available assets to transfer from within the shielded pools. These are assets that you have initiated in the deposit flow.
-
-_Note: The amount indicated next to the shield icon displays your `shielded balance` of the indicated token on the connected chain._
-
-
-
-
-### **3. Input Amount**
-
-Input the amount you would like to transfer to the recipient. The amount must be less than or equal to the amount of the note. If you want to transfer the entire available balance simply click `Max` for a quick input.
-
-
-
-
-### **4. Select Destination Chain**
-
-You must specify a destination chain for the transfer operation. Be advised that you cannot change the destination chain after the transfer transaction is made and its only on this destination chain that the recipient will be able to withdraw from. Select the desired destination chain by clicking the destination chain component.
-
-
-
-
-### **5. Select Relayer _(Optional)_**
-
-Using a `relayer` by default for transfers maximizes privacy. Relayers essentially act as intermediaries that broadcast transactions to the network. By doing so, relayers hide the original IP address, which could otherwise potentially be used to link identity to blockchain transactions.
-
-
-
-
-The bridge has preselected a default relayer for simplicity but also allow users to select their own relayer if they prefer _(or not use a relayer at all if they choose)_.
-
-
-
-
-### **6. Enter Recipient Shielded Address [public key]**
-
-The recipient shielded address is the public key for the note account you wish to transfer your shielded assets to. You can view your own shielded address by clicking on the wallet connection at the top. See below image for details.
-
-
-
-
-Once you have obtained the recipient's shielded address, input it into the **Recipient Address** component. Once you have input the address, you will notice that the **Transfer** button will be enabled.
-
-
-
-
-### **7. Enable Refund + Input Recipient _(Optional)_**
-
-The refund feature enables native tokens be sent to the provided recipient address so that they maintain their privacy and are able to execute subsequent transactions (e.g. pay for gas).
-
-In our ecosystem we call this mechanism a refund. We refer to as a refund, when transactions uses a relayer they will also be given the option to receive native tokens in exchange for the wrapped asset. The relayer will have a configured option indicating the max refund amount of any given supported chain.
-
-
-
-
-## Confirming Transfer Details
-
-Upon clicking the **Transfer** button you will be presented with the details of your transaction. It is important to
-review these transaction details to ensure they're accurate.
-
-### **1. Verify Transfer Details**
-
-Review the transaction details of the transfer. Pay close attention to the recipient address you have provided and ensure its accuracy.
-
-
-
-
-### **2. Confirm Change Note is Copied**
-
-Within the transfer in-progress view, you will need to confirm that you have copied the change note. A change note is a new note generated from your remaining balance. The change note should
-be kept in confidence as anyone who gains access to the note will be able to withdraw your funds from the
-bridge. Your notes are stored locally as you transact through this application as well as encrypted on-chain for persistent storage.
-
-
-
-
-### **3. Transfer In-Progress**
-
-Before the transfer transaction can be completed we transition through the following stages:
-
-- Fetching transaction leaves
-- Generate zero knowledge proof
-- Send transaction to recipient address
-
-You may observe the progress of each stage within the transfer in-progress view. If you would like to proceed with
-an additional transaction you can click **New Transaction** and begin on your next transaction. The Hubble dApp
-conveniently keeps a notification card with transaction details and progress on the right side to keep you informed
-when the transaction is successful.
-
-
-
-
-### **3. Successful Transfer**
-
-When the transfer transaction is completed you can observe a successful confetti filled message in the notification card!
-If you want to see the transaction in a block explorer, click on the **Successfully Transfer** link within the
-notification card.
-
-
-
diff --git a/pages/docs/projects/hubble-bridge/usage-guide/withdraw.mdx b/pages/docs/projects/hubble-bridge/usage-guide/withdraw.mdx
deleted file mode 100644
index 8b0769da..00000000
--- a/pages/docs/projects/hubble-bridge/usage-guide/withdraw.mdx
+++ /dev/null
@@ -1,148 +0,0 @@
----
-title: Withdrawals on Hubble Bridge
-description: An overview of Withdrawls on Hubble Bridge
----
-
-import { Withdraw, SelectSourceChain, SelectShieldedPool, FixedAmount, CustomAmount, SelectToken, SelectRelayer, CustomRelayer, NoRelayer, Recipient, Refund, Confirm, Confirmed, InProgress, Success } from '../../../../../components/images/bridge/usage-guide/withdraw/Withdraw';
-
-# Withdrawals on Hubble Bridge
-
-This guide will provide a step-by-step process to assist in making a withdraw on the Hubble Bridge.
-
-## Withdrawal Inputs
-
-You have made an deposit on the bridge and are now ready to withdraw from the shielded pool. It is important to note
-to protect and preserve your privacy you should wait some time for subsequent deposits to be made prior to withdrawing
-your funds. This will ensure that your withdrawal transaction is not linked to your deposit transaction.
-
-You will notice the **Withdraw** button to be disabled initially. We must provide the details of our transaction
-before we can withdraw.
-
-
-
-
-### **1. Select Source Chain**
-
-To get started we want to make sure we are connected to the network of the source chain of the shielded assets we're looking to withdraw from. Note that you will be withdrawing to the connected network. In this case, Polygon Mumbai.
-
-
-
-
-### **2. Select Shielded Pool to Withdraw From**
-
-Once we have selected the network we wish to withdraw from we will need to select what token to withdraw from the shielded pool. In this example, it is shown that the user has 1.45webbETH on Polygon Mumbai.
-
-
-
-
-_Note: The amount displays your availability on the connected network. If you switch network, the amount availability will update accordingly._
-
-### **3. Select Amount**
-
-#### _3.1. Fixed Amount_
-
-To maximize privacy, the default state for withdrawal amount is set to fixed amounts, as it is harder for an outside observer to link deposits to withdrawals, as many transactions have the same amount. This technique is often referred to as "denominations" in privacy protocols.
-
-
-
-
-#### _3.2. Custom Amount_
-
-To withdraw a custom amount user will need to toggle the switch component. This will enable arbitrary amount input for withdrawal.
-
-
-
-
-_Note: while arbitrary amounts provides flexibility and convenience, it can make it easier for an outside observer to link the withdrawal to the deposit, as unique withdrawal amount could serve as finger print that makes the transaction traceable._
-
-### **4. Select Withdrawal Token**
-
-When we make a deposit on the Hubble bridge the depositing asset is wrapped into a Webb based asset. Thus, during the withdraw process we can either unwrap the asset into a supported underlying asset of the specified shielded pool or we can withdraw the Webb wrapped asset as is (e.g. `webbETH`).
-
-
-
-
-_Note: in order to unwrap and withdraw the underlying asset there must be sufficient liquidity on the destination chain. Otherwise, you will be required to withdraw the Webb wrapped asset as is or wait for sufficient liquidity on that particular chain. That does **not** mean you are unable to unwrap and withdraw the underlying asset completely, only that you will need to withdraw the Webb wrapped asset and then perform a separate transaction to unwrap the asset on a destination chain that has sufficient liquidity if you want to withdraw the underlying asset (e.g. `ETH`)._
-
-### **5. Update Relayer _(Optional)_**
-
-At default state, withdrawal method uses a preselected relayer, as relayers act as intermediaries that broadcast transactions to the network on behalf of users, maximizing privacy.
-
-The bridge has preselected a default relayer for simplicity but also allow users to select their own relayer if they prefer.
-
-
-
-
-We can also use a custom relayers by inputing a custom relayer URL in the search component. Please ensure the custom relayer is compatible with the connected network (the network you intend to transact on) for the system to return search result(s).
-
-
-
-
-If you are not using a relayer and instead using your wallet as the preferred withdrawal method by enabling `No Relayer` option, make sure that you have sufficient tokens to pay for gas fee. The anonymity of the withdrawal will be compromised without the usage of a relayer. It is recommended to use a relayer.
-
-
-
-
-_Note: please ensure you have sufficient balance in native tokens to pay for gas fees if you are not using a relayer._
-
-### **6. Input Recipient Wallet Address**
-
-To maximize the privacy of your transaction it is recommended to use a new address (clean address) for withdrawal. A **"clean address"** refers to a new or unused address that has no transaction history associated with it. This means that it cannot be linked to any previous transactions, making it more private.
-
-
-
-
-### **7. Enable Refund + Input Recipient _(Optional)_**
-
-The refund feature enables native tokens be sent to the provided recipient address so that they maintain their privacy and are able to execute subsequent transactions (e.g. pay for gas).
-
-In our ecosystem we call this mechanism a refund, as users select a relayer they will also be given the option to receive native tokens in exchange for the wrapped asset. The relayer will have a configured option indicating the max refund amount of any given supported chain.
-
-
-
-
-## Confirming Withdraw Details
-
-Upon clicking the **Withdraw** button you will be presented with the details of your transaction. It is important to
-review these transaction details to ensure they're accurate.
-
-### **1. Verify Withdraw Details**
-
-Review the transaction details of the withdrawal. Pay close attention to the recipient address you have provided and ensure its accuracy.
-
-
-
-
-### **2. Confirm Change Note is Copied**
-
-Within the withdraw in-progress view, you will need to confirm that you have copied the change note. A change note is a new note generated from your remaining balance. The change note should
-be kept in confidence as anyone who gains access to the note will be able to withdraw your funds from the
-bridge. Your notes are stored locally as you transact through this application as well as encrypted on-chain for persistent storage.
-
-
-
-
-### **3. Withdrawal In-Progress**
-
-Before the withdraw transaction can be completed we transition through the following stages:
-
-- Fetching transaction leaves
-- Generate zero knowledge proof
-- Send transaction to recipient address
-
-You may observe the progress of each stage within the withdraw in-progress view. If you would like to proceed with
-an additional transaction you can click **New Transaction** and begin on your next transaction. The Hubble dApp
-conveniently keeps a notification card with transaction details and progress on the right side to keep you informed
-when the transaction is successful.
-
-
-
-
-### **4. Successful Withdraw**
-
-When the withdraw transaction is completed you can observe a successful confetti filled message in the notification card!
-If you want to see the transaction in a block explorer, click on the **Successfully Withdraw** link within the
-notification card.
-
-
-
diff --git a/pages/docs/projects/stats-dapp/_meta.json b/pages/docs/projects/stats-dapp/_meta.json
deleted file mode 100644
index cc11c7e7..00000000
--- a/pages/docs/projects/stats-dapp/_meta.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "overview": "Overview",
- "usage-guide": "How to Use the Stats Dapp"
-}
diff --git a/pages/docs/projects/stats-dapp/overview.mdx b/pages/docs/projects/stats-dapp/overview.mdx
deleted file mode 100644
index c4746f1b..00000000
--- a/pages/docs/projects/stats-dapp/overview.mdx
+++ /dev/null
@@ -1,30 +0,0 @@
----
-title: Overview
-description: An overview of the Webb Stats dApp.
----
-
-import { RepoArea, StatsdApp } from "../../../../components/RepoArea";
-
-# Overview
-
-Webb Stats dApp provides a simple interface to view the latest data from Webb Networks, more specifically it provides insight into the
-Distributed Key Generation (DKG) protocol that runs within Webb based chains such as Tangle Network and Tangle Network Test Network. The dApp provides visibility into the current
-state of the DKG protocol, the active key present, the current authorities, signed proposals and more.
-
-The dApp frontend is built using React, and the backend is built using [SubQuery](https://subquery.network/). You can view both implementation within the following repos:
-
-
-
-## Stats dApp Pages
-
-The current dApp implementation is comprised of three distinct pages:
-
-- Keys
-- Authorities
-- Proposals
-
-Within each of those pages you may drill in to view more detailed data. For example, on the Keys page you can view the current active key, the compressed and
-uncompressed key and the authorities used to generate the key. You can also view the previous keys and the authorities used to generate them.
-
-The Webb team intends to iteratively add additional insights to this dApp as the protocol matures and as the community requests additional features. If you have
-a suggested feature please post your feature request [here](https://github.com/webb-tools/feedback/discussions/categories/webb-dapp-feedback).
diff --git a/pages/docs/projects/stats-dapp/usage-guide/_meta.json b/pages/docs/projects/stats-dapp/usage-guide/_meta.json
deleted file mode 100644
index 01d94b59..00000000
--- a/pages/docs/projects/stats-dapp/usage-guide/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "keys": "Keys",
- "authorities": "Authorities",
- "proposals": "Proposals"
-}
diff --git a/pages/docs/projects/stats-dapp/usage-guide/authorities.mdx b/pages/docs/projects/stats-dapp/usage-guide/authorities.mdx
deleted file mode 100644
index 5a0e8918..00000000
--- a/pages/docs/projects/stats-dapp/usage-guide/authorities.mdx
+++ /dev/null
@@ -1,53 +0,0 @@
----
-title: Authorities
-description: An overview of the Webb Stats dApp Authorities page.
----
-
-import { AuthPage, AuthDetailsPage, AuthTable, Thresholds, UpcomingThresholds } from '../../../../../components/images/stats/authorities/Authorities';
-
-# Overview
-
-The Authorities page visualizes the network threshold parameters and the DKG authorities that have participated in the network. If you are interested
-in running a DKG Authority please refer to the node operators section of the docs [here](/docs/ecosystem-roles/validator/quickstart/).
-
-
-
-
-
-## Network Thresholds
-
-The network thresholds are displayed indicating the current DKG parameters for the threshold-key-generation protocol, and
-the threshold-signature-scheme protocol. More specifically these parameters indicate:
-
-- `Keygen Threshold`: The minimum number of authorities required to produce a valid signature.
-- `Signature Threshold`: The 't' in (t-out-of-n) threshold signatures used in the DKG signing system. Required of DKG authorities to generate signatures.
-
-These params will change over time, to view the history of keygen and signature thresholds from the network you simply need to
-select **View History**. A line chart will be displayed showing the history of the keygen and signature thresholds.
-
-
-
-
-## Upcoming Thresholds
-
-The upcoming thresholds table displays the next keygen and signature thresholds that will be used in the upcoming session. It also displays the next
-authority set that will be used in the upcoming session.
-
-
-
-
-## DKG Authorities
-
-The DKG authorities table displays all the participating authorities in previous sessions. The table provides some metadata relevant to
-each authority. If the authority node has set their location it will appear, the authorities uptime, and reputation score is also displayed.
-
-
-
-
-## Authority Details
-
-Selecting the **Details** button on an authority will display the authority details page. This lists all the keygens the authority has participated in,
-and identifying information if it is set. This may include, a link to their website, twitter profile, or email address.
-
-
-
diff --git a/pages/docs/projects/stats-dapp/usage-guide/keys.mdx b/pages/docs/projects/stats-dapp/usage-guide/keys.mdx
deleted file mode 100644
index 368de097..00000000
--- a/pages/docs/projects/stats-dapp/usage-guide/keys.mdx
+++ /dev/null
@@ -1,48 +0,0 @@
----
-title: Keys
-description: An overview of the Webb Stats dApp Keys page.
----
-
-import { KeysPage, KeysDetailsPage, ActiveKey, KeygenTable } from '../../../../../components/images/stats/keys/Keys';
-
-# Overview
-
-The Keys page visualizes the keys generated during the DKG's threshold-key-generation protocol that are being used to govern distributed applications,
-namely the Anchor System.
-
-
-
-
-
-## Active Key
-
-The active key section displays the **current** active key. The active key is the key that is currently being used to sign proposals into existence. The dApp
-visualizes the start or when the key became active, and the end or when the next key is rotated in. The component provides insight into what session the
-current key became active in, and you may copy the compressed key to your clipboard.
-
-
-
-## Keygen List
-
-The keygen list displays all the keys that have been generated during the DKG's threshold-key-generation protocol, the participating authorities used to generate the
-key, the block height and session the key was generated in. In addition it displays the keygen and signature thresholds during the key's active session. More precisely,
-the table headers can be defined as the following:
-
-- `Height`: The block height the key was generated in.
-- `Session`: The session the key was generated in.
-- `Keygen Threshold`: The keygen threshold set during the keys generation which is the minimum number of authorities required to produce a valid signature.
-- `Signature Threshold`: The 't' in (t-out-of-n) threshold signatures used in the DKG signing system. Required of DKG authorities to generate signatures.
-- `Authorities`: The authorities that participated in the key generation.
-
-
-
-
-
-## Key Details
-
-To view additional data about any given key you may click on the key's row in the keygen list. This will display the key's details page. The key details page illustrates
-the compressed and uncompressed key, the key's history details, and the authorities that were active participants.
-
-
-
-
diff --git a/pages/docs/projects/stats-dapp/usage-guide/proposals.mdx b/pages/docs/projects/stats-dapp/usage-guide/proposals.mdx
deleted file mode 100644
index 5d81591b..00000000
--- a/pages/docs/projects/stats-dapp/usage-guide/proposals.mdx
+++ /dev/null
@@ -1,56 +0,0 @@
----
-title: Proposals
-description: An overview of the Webb Stats dApp Proposals page.
----
-
-import { ProposalsPage, ProposalsDetailsPage, ProposalsTable, ProposalsChart, ProposalStatus } from '../../../../../components/images/stats/proposals/Proposals';
-
-# Overview
-
-The Proposal page visualizes the proposer threshold parameters, and all the proposals that have been proposed to the DKG to be signed. Proposals are
-essentially messages that have been proposed to enact a change within the protocol. Proposals are proposed by proposers and must pass a threshold in
-order to be signed by the DKG.
-
-
-
-
-## Proposal Status
-
-The Proposal Status section displays the current state of the proposal threshold parameters. The parameters are as follows:
-
-- Proposal threshold: Active proposers vote on and if the vote threshold is met, the proposal is passed on to the DKG to be signed.
-- Proposers: Vote on proposals to be signed by the DKG. The valid proposers is superset of the current DKG authorities. Active DKG authorities are continuously rotated into the proposer set.
-
-
-
-
-## Proposals Overtime
-
-The stacked area graph displays the number of proposals that have been proposed overtime. The graph is broken down by the proposal type. For an in-depth review of
-all proposal types please refer to the application specific proposals and global proposals [here](/docs/protocols/asset-transfer/governance-proposals), and [here](/docs/protocols/mpc-protocols/proposals/).
-
-
-
-
-## Proposals Table
-
-The proposal table displays all the proposals that have been submitted to the network, the status of those proposals, the type of
-proposal, and the chain in which the proposal was derived from. More precisely,
-the table headers can be defined as the following:
-
-- `Status`: The status of the proposal (e.g. signed, opened, rejected).
-- `Height`: The block height the proposal was submitted in.
-- `Type`: The type of proposal (e.g. AnchorUpdate, AddToken).
-- `Proposers`: The proposers that voted on the proposal.
-- `Chain`: The chain the proposal was submitted from.
-
-
-
-
-## Proposal Details
-
-Selecting the **Details** button on a proposal will display the proposal details page. Here you can view the decoded
-data of the proposal, the proposers that voted on the proposal, and the distribution of votes.
-
-
-
diff --git a/pages/docs/protocols/_meta.json b/pages/docs/protocols/_meta.json
deleted file mode 100644
index be5b4d1d..00000000
--- a/pages/docs/protocols/_meta.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "identity": "Identity Protocol",
- "masp": "Multi-Asset Shielded Pools",
- "mpc-protocols": "Multi-party Computation (MPC) Protocols",
- "single-asset-shielded-pool": "Single Asset Shielded Pool",
- "ZK Protocols": "Zero Knowledge (ZK) Protocols"
-}
diff --git a/pages/docs/protocols/identity.mdx b/pages/docs/protocols/identity.mdx
deleted file mode 100644
index 0248b471..00000000
--- a/pages/docs/protocols/identity.mdx
+++ /dev/null
@@ -1,141 +0,0 @@
----
-title: Identity
-description: Describes the identity protocol.
----
-
-import IdentityImages from '../../../components/images/IdentityProtocol'
-
-# Identity
-
-The identity protocol we describe here is inspired and based off of Semaphore Protocol, an identity protocol that allows for private arbitrary signaling (voting for example) on the blockchain while proving that the user is part of a group in the semaphore set.
-
-In our version of the identity protocol we will leverage Webb’s technology to design an interoperable Semaphore protocol that allows for signaling from one-of-many connected Semaphore sets on potentially different blockchains. A benefit of using Webb's interoperable identity system is the ability to create and connect multiple identity sets together from potentially many distinct blockchains. Using this, we can create applications that enable participation from users in multiple communities and blockchain ecosystems at the same time.
-
-## Identity sets and identities
-
-The Semaphore protocol allows us to create arbitrary merkle-trees to represent our identity sets. Each leaf in the merkle tree is a 32-bytes identity commitment. A user can prove membership in the Semaphore identity set by proving that they know the secret preimage to the leaf commitment and its corresponding merkle path. In Webb's extension protocol, a user can be a member of one-of-many merkle trees on potentially distinct blockchains. In this setting, the user can prove on any of the chains their membership in one of them, enabling cross-chain proofs of identities and interactions on top.
-
-Below we document the Identity data structure.
-
-- Each identity is composed of a pair of random field elements: (`identityNullifier`, `identityTrapdoor`).
-- The `identitySecret` is the `Poseidon(identityNullifier, identityTrapdoor)`.
-- The `identityCommitment` is the `Poseidon(identitySecret)`.
-- The `identityCommitment` is the leaf data that we will insert into an identity set's merkle tree.
-
-For a detailed diagram on how this process is integrated into Semaphore: [linked here](https://semaphore.appliedzkp.org/docs/technical-reference/circuits)
-![image](https://user-images.githubusercontent.com/13153687/199113862-756548ae-5e57-4499-b35f-c290169b7262.png)
-
-### Controlling identity sets
-
-Core to having cross-chain zero-knowledge proofs of identity and group membership is actually creating and maintaining those groups. We may envision having cross-chain groups for NFT holders of specific NFT communities on Ethereum, Arbitrum, and Polygon such as a group for a certain number of Cryptopunk derivative communities. We may also envision having cross-chain groups for users of a specific social forum like Commonwealth, where communities do already live on different chains. We may now be able to leverage the power of the masses to grow the privacy these users have together and provide additional products for them. To that end, we must construct these groups at the very least.
-
-The functionality for modifying identity sets is managed by a group administrator. As is currently implemented, this is an arbitrary address whose signatures control modification of the underlying smart contract using a `modifier onlyAdmin()` style interface. This is geared towards being an EOA or user controlled smart contract that manually approves and selects users to join their community identity set. Nonetheless, this can be abstracted to provide a rule-based system for administering membership into the identity set, such as a rule proving ownership of an NFT or some # of tokens.
-
-### Preventing double signaling
-
-Double signaling is only bad depending on the application. If we're deploying a forum, for example, a user posting twice is expected behaviour.
-
-Because of this, the double signalling prevention is dependent on the application. E.g. For the voting system, the ballotID is the nullifier.
-
-In the base contract (`Semaphore.sol`) and interface (`Semaphore.ts`), the nullifier being used for verification can be any uint256, so there's no attempt to prevent any kind of double signaling since the user can send the same signal multiple times by just generating another random nullifier each time.
-
-On the extensions, there's an already implemented logic for preventing double signaling. On the `SemaphoreVoting.sol` example, the pollID is the same as the nullifierHash, so each user can only vote one time per poll.
-
-## Circuit
-
-The zero-knowledge circuit encodes a variety of constraints to ensure that the identity system works properly. This includes, among other things, the constraints necessary to prove that a user is part of a valid merkle-tree.
-
-The constraints required are:
-
-- To verify the correctness of `identity nullifiers`
-- To verify the uniqueness of `external nullifier`
-- To verify the existence of input `identityCommitments`
-- To verify correctness of merkle-path and merkle-root
-
-## Developer Usage: Using semaphore as external contract
-
-One interesting possibility on how to use Semaphore is to use its contract just to manage groups, while managing circuit and double spending prevention logic on another repo. Example: `https://github.com/webb-tools/protocol-solidity/pull/182`
-
-We have a Semaphore contract deployed at the following addresses.
-
-```
-Ethereum testnet: TBD
-Arbitrum testnet: TBD
-...
-
-```
-
-Integration should proceed as follows
-
-1. Add the Semaphore contract interface to your contracts.
-2. Point the interface to the corresponding address to the chain you're deploying on.
-3. Add a groupId parameter to your smart-contract. You may consider making it immutable.
-4. Define your group on the Semaphore contract using the Semaphore interface by pointing to the contract address as follows:
-
-```
-import { Semaphore } from '@webb-tools/semaphore';
-
-const wasmPath = path.resolve(
- __dirname,
-
-);
-const witnessCalcPath = path.resolve(
- __dirname,
-
-);
-const zkeyPath = path.resolve(
- __dirname,
-
-);
-zkComponents2_2 = await fetchComponentsFromFilePaths(
- wasmPath,
- witnessCalcPath,
- zkeyPath
-);
-// zkComponents2_2 is duplicated. This is not a good interface for a one (fixed-size) or more than two possible validator sets.
-const semaphore = Semaphore.connect("", zkComponents2_2, zkComponents2_2, signer);
-
-const levels = 30 // between 1 and 32
-const groupId = // just need to make sure it isn't taken
-const maxEdges = // Currently supported (1, 7)
-
-await semaphore.createGroup(groupId, levels, signer.address, maxEdges);
-
-let members: List = ... // Create members using same logic as in the circuit being used.
-await semaphore.addMembers(groupId, members);
-```
-
-Your group is ready on the semaphore contract. Now you should just point its interface to the correct address and group ID.
-
-## Developer Usage: Developing semaphore extensions
-
-Development of semaphore extensions should be done under `packages/semaphore/contracts/extensions`.
-
-The main contracts you should understand the interfaces in order to develop extensions are `packages/semaphore/contracts/base/SemaphoreCore.sol` and `packages/semaphore/contracts/base/SemaphoreGroups.sol`.
-
-`SemaphoreCore.sol` Deals with proof verification. `SemaphoreGroups.sol` deals with the different linked merkle trees for each group.
-
-Note also that the extension contracts and Semaphore.sol are not interchangeable. Current implementation does not allow for deploying a `Voting/Whistleblowing` contract or a `Semaphore/Voting` contract that deals with both applications and unifies anonymity sets
-
-For API documentation on the contracts, go onto the `semaphore-anchor/packages/contracts/interfaces/`.
-
-### Formal description
-
-
-
-## Research
-
-### New nullifier schemes
-
-There are new nullifier schemes that may be more attractive for bootstrapping identity sets on a given chain. For example the VUF Nullifier scheme as described in [https://eprint.iacr.org/2022/1255]() details a nullifier scheme based on ECDSA signatures. This would open up the possibility of creating cross-chain identity sets using only the underlying user's EVM address. These new schemes can be integrated in a modular manner without affecting the bridging mechanism that bridges identity sets together.
-
-### New protocol integrations
-
-Integrating with existing identity protocols is usually a straightforward manner. We simply take the core zero-knowledge circuit and make it a one-of-many merkle tree membership proof instead of a single merkle tree membership proof. Ideas and protocols we are interested to integrate with are:
-
-- **Sismo** for cross-chain zkBadges.
-- **Interrep** for cross-chain reputation.
-
-## References
-
-- [https://semaphore.appliedzkp.org/](https://semaphore.appliedzkp.org/)
diff --git a/pages/docs/protocols/masp/_meta.json b/pages/docs/protocols/masp/_meta.json
deleted file mode 100644
index bccaa499..00000000
--- a/pages/docs/protocols/masp/_meta.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "overview": "Overview",
- "swap": "Swap Circuit",
- "rollup": "Rollup",
- "proof-generation": "Delegatable Proof Generation",
- "note-encryption": "Note Encryption"
-}
diff --git a/pages/docs/protocols/masp/note-encryption.mdx b/pages/docs/protocols/masp/note-encryption.mdx
deleted file mode 100644
index 7836da84..00000000
--- a/pages/docs/protocols/masp/note-encryption.mdx
+++ /dev/null
@@ -1,64 +0,0 @@
----
-title: MASP Note Encryption
-description: Describes the MASP Note Encryption.
----
-
-import Callout from "../../../../components/Callout";
-
-# MASP Note Encryption (without FMD)
-
-## Note Structure
-
-Say Alice `transact`s via the `Transaction.circom` circuit and the recipient of both output commitments is Bob. We have the following desiderata:
-
-1. If I possess Alice’s viewing key, I should be able to see the internal data of the two output commitments.
-2. If I possess Bob’s viewing key, I should be able to see all the internal data of the output commitments.
-
-This allows:
-
-1. Recipients to spend MASP commitments transferred to them.
-2. Regulators and outside observers to view BOTH sent and received funds of any party they hold the viewing keys of.
-
-
- Note due to our MASP key design, even if regulators know the blinding of a commitment, they still cannot spend the output commitment, because they do not have the proof authorizing key, `ak`, which is needed to create a valid ZKP.
-
-
-### Note Data
-
-Each note corresponds to an output commitment. The data contained in a note is simply the internal data of the output commitment:
-
-1. AssetID
-2. TokenID
-3. Amount
-4. DestinationChainID
-5. PublicKey_X
-6. PublicKey_Y
-7. blinding
-
-## Encrypting the Note
-
-Each note is encrypted twice, once with Alice’s public key and once with Bob’s public key. More specifically the encryption process works as follows. We will use a similar design to [zkopru](https://github.com/zkopru-network/zkopru/blob/340d5f3f0b4c5112e767bef122c42cdd6f0ab89c/packages/transaction/src/utxo.ts#L53-L80).
-
-### Encrypting with Alice’s Public Key
-
-1. Alice generates an ephemeral babyjubjub keypair `(esk, epk)`.
-2. Alice computes `esk[pk_A]` and uses this as the secret key in `chacha20` symmetric encryption scheme to encrypt the note.
-3. Alice posts the encryption along with `epk` on the blockchain.
-
-### Encrypting with Bob’s Public Key
-
-1. Bob generates an ephemeral babyjubjub keypair `(esk, epk)`.
-2. Bob computes `esk[pk_B]` and uses this as the secret key in `chacha20` symmetric encryption scheme to encrypt the note.
-3. Bob posts the encryption along with `epk` on the blockchain.
-
-## Decrypting Notes
-
-We will use a similar design to zkopru:
-
-[zkopru-network/zkopru](https://github.com/zkopru-network/zkopru/blob/340d5f3f0b4c5112e767bef122c42cdd6f0ab89c/packages/transaction/src/utxo.ts#L112-L171)
-
-
- Note `esk[pk] = vk[epk]`, so anyone with the viewing key can compute `vk[epk]` and use `chacha20` to decrypt notes.
-
-
-Potential recipients can decrypt new encrypted notes on the blockchain, compute the `Record` commitment from the internal data, and see if it matches the output commitment posted on the blockchain. If so, this note/commitment is meant for the recipient. Eventually, we will have relayers take care of a lot of the note encryption and decryption work so each individual does not have to do it.
diff --git a/pages/docs/protocols/masp/overview.mdx b/pages/docs/protocols/masp/overview.mdx
deleted file mode 100644
index 312e7861..00000000
--- a/pages/docs/protocols/masp/overview.mdx
+++ /dev/null
@@ -1,141 +0,0 @@
----
-title: Multi-Asset Shielded Pool (MASP)
-description: A description of Webb's Asset Multi-Asset Shielded Pool (MASP).
----
-
-import { MASPPrivateTransferImage } from '../../../../components/images/masp/Masp.tsx'
-
-# Multi-Asset Shielded Pool (MASP)
-
-The MASP is a multi-asset interoperable shielded pool. It allows users to transfer multiple types of assets within a single pool such as fungible and non-fungible tokens. The MASP is more featureful than existing Webb ZK applications in this way and supports a variety of other features such as:
-
-- An updated key structure system supporting delegatable proof generation.
-- An incentive mechanism that rewards users for depositing and staying within the shielded pool.
-- A batch deposit system that allows cheap and efficient deposits into the pool.
-- A swap circuit that allows users to execute atomic swap transactions within the pool.
-
-## What are shielded transactions and pools?
-
-A shielded transaction is a transfer of assets between two parties, Alice and Bob, that conceals the sender, receiver, and amount from outside observers. Shielded pools are one way to implement a shielded transfer system. At a high level, Alice deposits assets into a shielded pool, and Bob withdraws from it. Since all depositors contribute to the shielded pool, their assets are combined, making it impossible for an outside observer to link Alice and Bob. The pool's size is often referred to as the anonymity set.
-
-## What is a multi-asset shielded pool (MASP)?
-
-Traditional shielded pool systems only support a single asset type. This means a separate shielded pool smart contract must be deployed for each ERC20 token.
-
-A multi-asset shielded pool (MASP) is a shielded pool that supports shielded transactions for multiple asset types. In addition to hiding the sender, receiver, and transaction amount, shielded transactions can also conceal the asset type being transferred. This increases the anonymity set size and provides a better user experience for the end user.
-
-## Features
-
-### Support for Fungible and Non-Fungible Tokens
-
-Webb’s MASP protocol supports depositing both fungible and non-fungible tokens (NFTs).
-
-### Private Asset Transfers
-
-The core functionality of the MASP is private asset transfers. Every user in the MASP has associated MASP keys of the following form.
-
-
-
-- `g` is a generator for the `babyjubjub` elliptic curve and therefore, `(sk, ak)` and `(vk, pk)` form valid secret/public keypairs, which can be used for EdDSA signatures and encryption/decryption.
-
-When Alice deposits a `Record` is inserted into a Merkle tree on-chain.
-
-```jsx
-BlindingHash = Poseidon(blinding)
-PartialRecord = Poseidon(DestinationChainId, Pk_X, Pk_Y, BlindingHash)
-Record = Poseidon(AssetId, TokenId, Amount, PartialRecord)
-```
-
-The `Record` components are:
-
-- `blinding`: A randomly generated number only known to Alice.
-- `DestinationChainId`: The `chainId` where Alice intends to spend her deposited funds.
-- `Pk_X, Pk_Y`: The `x` and `y` coordinate of Alice’s MASP public key.
-- `AssetId`: The `AssetId` of the token being deposited.
-- `TokenId`: If the asset being deposited is an NFT, this is the `TokenId` of that NFT. Otherwise, for fungible tokens, this value is 0.
-- `Amount`: The amount of tokens being deposited.
-
-The root of this Merkle tree is then relayed to all the MASPs across the bridge.
-
-When Alice wants to spend her tokens, she submits a zero-knowledge proof via a relayer on the `DestinationChainId`'s MASP. The zero-knowledge proof verifies that she knows the internal values (`blinding`, `MASP key`, `AssetId`, `TokenId`, `Amount`) of a `Record` located on a Merkle tree in one of the MASPs on the bridge. Only Alice can create such a zero-knowledge proof since values like the `blinding` are secret and known only to her.
-
-### Structure of a MASP Transaction
-
-The MASP supports a more feature-rich set of transaction types than simple deposits and withdraws. Specifically, a MASP transasction consists of:
-
-- `publicAmount`: This is the amount being deposited into or withdrawn from the MASP. If this value is 0, the transaction is known as an internal shielded transfer and is special because an outside observer does not even know the type of asset that is being transacted.
-- `inputRecord`s: These are the `Record`s that are being spent. They have already been inserted into a Merkle tree on one of the MASPs on the bridge.
-- `outputRecord`s: These are the new `Record`s that are being created. They will be inserted into the MASP’s Merkle tree.
-
-The following invariant must hold so that funds are not created out of thin-air:
-
-```markdown
-sum of inputRecords amounts + public Amount = sum of outputRecords amounts
-```
-
-This invariant is checked in the zero-knowledge proof. For valid transactions, the `inputRecord`s are nullified, so they cannot be double spent, and the `outputRecord`s are inserted into the MASPs Merkle tree.
-
-### Yield Generation via Anonymity Mining
-
-The strength of privacy provided by the MASP is strongly tied to the size of the anonymity set. The anonymity set grows the longer users keep their assets locked in the shielded pool. Therefore, Webb’s MASP protocol includes an anonymity mining system which tracks, in a privacy-preserving manner, the amount of time user’s keep their funds locked and proportionally rewards them with tokens. As per the current iteration of the MASP protocol, anonymity miners are rewarded in Tangle tokens, which is the native token of Webb’s MPC Substrate blockchain.
-
-### Shielded Swaps
-
-The Webb MASP protocol allows two counterparties Alice and Bob to swap assets. For instance, Alice can swap her shielded NFT for Bob’s shielded USDC. All an outside observer sees is `Record`s inserted and nullified on the MASP’s Merkle tree.
-
-At a high level, the process works as follows:
-
-- Via some communication network Alice advertises that she wants to sell her NFT for USDC.
-- Bob responds to Alice and they agree upon a price.
-- Various details of the swap transaction are sent to a relayer which then produces a zero-knowledge proof and submits it on-chain. The technical details of this mechanism are present in this document:
-
-[MASP Swap Circuit](./swap.mdx)
-
-### Rollup Functionalities
-
-Inserting into an on-chain Merkle tree is a computationally expensive operation since many hashes must be computed. One solution to this issue is to store the leaf data that needs to be inserted into the Merkle tree in an on-chain queue. A relayer can then come and batch insert these elements into the Merkle tree via a succinct zero-knowledge proof and the new Merkle tree root is stored on the MASP smart contract.
-
-More technical details about this mechanism are in the following docs:
-
-[MASP Rollup Functionality v1](./rollup.mdx)
-
-### Delegatable Transaction Proof Generation
-
-For many current shielded proof systems, the zero-knowledge proof for transacting is generated in the user’s browser. This is because the data needed to generate a valid zero-knowledge proof includes secret values, such as the MASP secret key, that should not be shared with an external party. The downside of this approach is that proof generation is a computationally intensive task. As such, it would be ideal to delegate proof generation to a computationally powerful relayer.
-
-This is exactly what the proof authorizing key (part of the MASP key) allows for. Particularly, it allows a user to delegate proof generation to a relayer, without exposing its MASP secret key. For the technical details on this process, please see:
-
-[MASP Delegatable Proof Generation](/docs/protocols/masp/proof-generation/)
-
-## Lifecycles
-
-### For yield farmers
-
-1. Deposit tokens into shielded pool
-2. Keep funds locked for long period of time and accumulate anonymity points.
-3. When ready to claim Tangle tokens as reward, spend funds inside shielded pool, so a commitment gets inserted on `RewardSpentTree`.
-4. Claim anonymity points via a zero-knowledge proof on Tangle blockchain.
-5. Use AMM pallet to swap anonymity points into Tangle Tokens.
-
-### For transactors
-
-1. Deposit into shielded pool.
-2. Submit a zero-knowledge proof via a relayer to spend funds.
-
-### For swappers
-
-1. Assume Alice and Bob already have funds inside the shielded pool.
-2. Alice sends a message over a communication network indicating that she wants to swap her token(s).
-3. Bob responds to Alice’s request and they agree on a price.
-4. They submit swap proof inputs to a relayer.
-5. Relayer computes swap proof and submits it on-chain.
-
-## Fees and Incentives for Relayers
-
-- Relayers that submit transaction proofs on-chain are paid fees inside the shielded pool, in one of a number of whitelisted fee tokens. That is, the transactor/user must pay its fees via `Record`s that it owns. These `Record`s are used to form `feeOutputRecord`s owned by the relayer.
-- There are currently no relayer fees for submitting proofs for batch inserting into Merkle trees.
-- There are currently no relayer fees for submitting swap proofs.
-
-## Links
-
-- `masp` branch on `protocol-solidity` repo: [https://github.com/webb-tools/protocol-solidity/](https://github.com/webb-tools/protocol-solidity).
diff --git a/pages/docs/protocols/masp/proof-generation.mdx b/pages/docs/protocols/masp/proof-generation.mdx
deleted file mode 100644
index 9f364a55..00000000
--- a/pages/docs/protocols/masp/proof-generation.mdx
+++ /dev/null
@@ -1,35 +0,0 @@
----
-title: MASP Delegatable Proof Generation
-description: A description of MASP Delegatable Proof Generation.
----
-
-import { MASPPrivateTransferImage } from '../../../../components/images/masp/Masp.tsx'
-
-# MASP Delegatable Proof Generation
-
-## Overview
-
-In Webb’s MASP system, delegatable proof generation offloads the computationally intensive proof generation process from the user’s browser to a computationally powerful, semi-trusted prover (which we call a relayer). As we will see, this process is not as trivial as the user sending the relayer its proof inputs, since the proof inputs contain secret data that the relayer can then use to steal the user’s funds.
-
-## Technical Details
-
-### MASP key structure:
-
-
-
-### MASP `Record` structure:
-
-```circom
-BlindingHash = Poseidon(blinding)
-PartialRecord = Poseidon(DestinationChainId, Pk_X, Pk_Y, BlindingHash)
-Record = Poseidon(AssetId, TokenId, Amount, PartialRecord)
-```
-
-### Preserving Security
-
-The relayer must not be able to change the contents of the `outputRecord`s. If it could, it would simply change the `outputRecord`s to be under its keys and transfer the user’s funds to itself. To prevent such behavior, we make use of the fact that `(sk, ak)` is a secret/public keypair with which EdDSA signatures can be made. In particular, we do the following:
-
-- User signs `inputRecord`s and `outputRecord`s with `sk`
-- Inside the ZKP, we verify that these `Record`s are correctly signed, by verifying with `ak`.
-
-This begs the question: can’t the relayer just generate a new `(sk, ak)` pair and forge the signatuers with altered `outputRecord`s? The answer is NO! Because the `inputRecord`s are bound to `ak`. That is `pk_X` and `pk_Y` are derived inside the circuit from `ak` so if the relayer changes `ak` the `inputRecord`s will no longer be valid `Record`'s on the MASP Merkle trees.
diff --git a/pages/docs/protocols/masp/rollup.mdx b/pages/docs/protocols/masp/rollup.mdx
deleted file mode 100644
index 7cb3715e..00000000
--- a/pages/docs/protocols/masp/rollup.mdx
+++ /dev/null
@@ -1,60 +0,0 @@
----
-title: MASP Rollup
-description: Describes the MASP Rollup functionality.
----
-
-## Overview
-
-Various functionalities in Webb’s MASP system require inserting into an on-chain Merkle tree. Due to the amount of hashing required, this is a gas-expensive operation. To save gas, we can instead queue Merkle tree leaves on-chain and then batch insert them via a succinct proof. In this document, we describe queuing and batch inserting into the MASP `Record` , `RewardUnspent`, and `RewardSpent` trees.
-
-### Technical Details
-
-It is easiest to describe the rollup functionality via a step-by-step user flow. Let’s call this user Alice. We describe how the rollup functionality works, by explaining what happens as Alice interacts with the MASP by depositing funds and then transacting via ZKPs.
-
-The main smart contract which contains the queueing and batch inserting functionality is called the `MASPProxy`. A `MASPProxy` can proxy for multiple `MASP`s.
-
-- Alice sends the following `QueueDepositInfo` struct information:
-
-```jsx
-struct QueueDepositInfo {
- address unwrappedToken;
- address wrappedToken;
- uint256 amount;
- uint256 assetID;
- uint256 tokenID;
- bytes32 depositPartialCommitment;
- address proxiedMASP;
-}
-```
-
-to the `queueDeposit` function along with the associated deposit tokens.
-
-- `queueDeposit` function checks that the deposited tokens match the information in the `QueueDepositInfo` struct. If so, inserts `QueueDepositInfo` into `QueueDepositMap` which is a double mapping from `masp address` → `uint256` → `QueueDepositInfo`.
-- A relayer or any other outside entity can call the `batchDeposit` function which does the following for each queued deposit being inserted:
- - The associated funds have to be transferred to MASP and potentially wrapped.
- - Associated `RewardUnspentTree` commitment has to be computed and queued for insertion.
- - The relayer has to submit a batch update zero-knowledge proof (the details of which are described in the Batch Update Circuit section below) to update the Merkle root of the MASPs Merkle tree. This proof is then verified by the MASP.
-- When Alice transacts on the MASP, for instance, by doing an internal shielded transfer, `inputRecord`s are spent and `outputRecord`s are created. The reward commitments associated with the nullified `inputRecord`s are queued for insertion on the `RewardSpentTree` and the reward commitments associated with the `outputRecord`s are queued for insertion on the `RewardUnspentTree`.
-- A relayer can submit a batch update zero-knowledge proof to update the `RewardUnspentTree` and `RewardSpentTree`. The roots of these trees are then relayed to the Tangle blockchain, where rewards can be claimed in Tangle tokens.
-
-### Batch Update Circuit
-
-[batchMerkleTreeUpdate.circom](https://github.com/webb-tools/protocol-solidity/tree/main/circuits/merkle-tree)
-
-The purpose of a Merkle tree batch update circuit is simple. It takes in as inputs:
-
-```jsx
-signal input argsHash; // Public Input
-signal input oldRoot; // Private Input
-signal input newRoot; // Private Input
-signal input pathIndices; // Private Input
-signal input pathElements[height]; // Private Input
-signal input leaves[nLeaves]; // Private Input
-```
-
-The `argsHash` is simply the hash of all the private inputs. This allows for gas-efficiency. Since more public inputs means more gas spent on proof verification. The `oldRoot` is the current root of the on-chain Merkle tree. The `newRoot` is the root after the `leaves[nLeaves]` are inserted into the Merkle tree. The circuit checks that `newRoot` really is the new root after the `leaves[nLeaves]` are inserted. It does this via the following algorithm (note `nLeaves` must be a power of 2):
-
-- Say the entire Merkle tree has height `levels`.
-- We think of the Merkle tree as a Merkle treee of height `level - batchLevels`, where each leaf is actually the root of a Merkle tree of height `batchLevels`, where `2 ** batchLevels = nLeaves`.
-- Inside the circuit, we first Merkleize the `nLeaves`. This gives us the root of the Merkle tree of height `batchLevels`, which is in turn a leaf of the Merkle tree of height `levels - batchLevels`.
-- It is then checked that inserting this Merkleized value into the Merkle tree of height `levels - batchLevels` results in the Merkle root being `newRoot`. This is done via the `MerkleTreeUpdater` circuit: [merkleTreeUpdater.circom](https://github.com/webb-tools/protocol-solidity/tree/main/circuits/merkle-tree).
diff --git a/pages/docs/protocols/masp/swap.mdx b/pages/docs/protocols/masp/swap.mdx
deleted file mode 100644
index 718c3c2d..00000000
--- a/pages/docs/protocols/masp/swap.mdx
+++ /dev/null
@@ -1,314 +0,0 @@
----
-title: MASP Swap Circuit
-description: Describes the MASP Swap Circuit.
----
-
-import { MASPPrivateTransferImage, MASPSwapImage, MASPSwapDiagramImage } from '../../../../components/images/masp/Masp.tsx'
-import Callout from "../../../../components/Callout";
-
-# MASP Swap Circuit
-
-## MASP key structure:
-
-Each MASP participant has a set of keys of the following form:
-
-
-
-
- Note: `(sk, ak)` form a secret/public keypair which can be used to make EdDSA signatures. These EdDSA signatures can then be verified inside a ZKP (`circomlib` provides templates for this).
-
-
-## Updated Record Structure
-
-The MASP key is used in the commitment (record) that is inserted into the MASP’s Merkle tree.
-
-```markdown
-InnerPartialRecord = Poseidon(blinding)
-PartialRecord = Poseidon(DestinationChainId, PublicKey_X, PublicKey_Y, InnerPartialRecord)
-Record = Poseidon(AssetId, TokenId, Amount, PartialRecord)
-```
-
-## Swap User Mechanics
-
-
-
-1. Alice wants to exchange 1 webbBTC (`assetId = 1, tokenId = 0`) for Bob’s 10 webbETH (`assetId = 2, tokenId = 0`) as long as the swap occurs between time `t` and `tPrime`.
- **a.** Alice has a `Record` worth 1.5 webbBTC under her public key that she wants to use for the swap. This is known as Alice’s spend `Record`.
- **b.** Alice creates a change `Record` under her public key worth 0.5 webbBTC.
- **c.** Alice creates the `Record` to receive 10 webbETH under her public key. This is known as Alice’s receive `Record`.
- **d.** Bob similarly creates spend, change, and receive records.
-
-
-
-1. They then send the following details/proof inputs about these `Record`s to a semi-trusted relayer. The relayer makes the swap ZKP and submits it on-chain.
- **a.** These inputs include a signature of the so-called swap message hash `(aliceChangeRecord, aliceReceiveRecord, bobChangeRecord, bobReceiveRecord, t, t')`. This is precisely the data the relayer can tamper with so we have Alice and Bob sign them with their `sk`.
-
- ```jsx
-
- // Swap message is (aliceChangeRecord, aliceReceiveRecord, bobChangeRecord, bobReceiveRecord, t, t')
- // We check a Poseidon Hash of message is signed by both parties
- signal input aliceSpendAssetID;
- signal input aliceSpendTokenID;
- signal input aliceSpendAmount;
- signal input aliceSpendInnerPartialRecord;
- signal input bobSpendAssetID;
- signal input bobSpendTokenID;
- signal input bobSpendAmount;
- signal input bobSpendInnerPartialRecord;
- signal input t;
- signal input tPrime;
-
- signal input alice_ak_X;
- signal input alice_ak_Y;
-
- signal input bob_ak_X;
- signal input bob_ak_Y;
-
- signal input alice_R8x;
- signal input alice_R8y;
-
- signal input aliceSig;
-
- signal input bob_R8x;
- signal input bob_R8y;
-
- signal input bobSig;
-
- signal input aliceSpendPathElements[levels];
- signal input aliceSpendPathIndices;
- signal input aliceSpendNullifier; // Public Input
-
- signal input bobSpendPathElements[levels];
- signal input bobSpendPathIndices;
- signal input bobSpendNullifier; // Public Input
-
- signal input swapChainID; // Public Input
- signal input roots[length]; // Public Input
- signal input currentTimestamp; // Public Input
-
- signal input aliceChangeChainID;
- signal input aliceChangeAssetID;
- signal input aliceChangeTokenID;
- signal input aliceChangeAmount;
- signal input aliceChangeInnerPartialRecord;
- signal input aliceChangeRecord; // Public Input
- signal input bobChangeChainID;
- signal input bobChangeAssetID;
- signal input bobChangeTokenID;
- signal input bobChangeAmount;
- signal input bobChangeInnerPartialRecord;
- signal input bobChangeRecord; // Public Input
-
- signal input aliceReceiveChainID;
- signal input aliceReceiveAssetID;
- signal input aliceReceiveTokenID;
- signal input aliceReceiveAmount;
- signal input aliceReceiveInnerPartialRecord;
- signal input aliceReceiveRecord; // Public Input
- signal input bobReceiveChainID;
- signal input bobReceiveAssetID;
- signal input bobReceiveTokenID;
- signal input bobReceiveAmount;
- signal input bobReceiveInnerPartialRecord;
- signal input bobReceiveRecord; // Public Input
- ```
-
-2. The relayer creates the swap ZKP and submits it to the MASP smart contract.
-
-## Swap Circuit Constraints
-
-### Corresponding `AssetID`s and `TokenID`s are equal
-
-```circom
-aliceSpendAssetID === aliceChangeAssetID;
-aliceReceiveAssetID === bobSpendAssetID;
-bobSpendAssetID === bobChangeAssetID;
-bobReceiveAssetID === aliceSpendAssetID;
-aliceSpendTokenID === aliceChangeTokenID;
-aliceReceiveTokenID === bobSpendTokenID;
-bobSpendTokenID === bobChangeTokenID;
-bobReceiveTokenID === aliceSpendTokenID;
-```
-
-### Funds are neither created nor destroyed
-
-```circom
-aliceSpendAmount === aliceChangeAmount + bobReceiveAmount;
-bobSpendAmount === bobChangeAmount + aliceReceiveAmount;
-```
-
-### Swap Message Hash is Signed By Both Alice and Bob
-
-The swap message is:
-
-```circom
-Poseidon(
- aliceChangeRecord,
- aliceReceiveRecord,
- bobChangeRecord,
- bobReceiveRecord,
- t,
- t'
-)
-```
-
-Alice and Bob each sign with their spending keys `sk` and we verify the signature on the circuit with their public keys `ak`.
-
-### Alice and Bob `Spend` `Record`s are in some Merkle tree
-
-```circom
-// Check Alice Spend Merkle Proof
- component aliceSpendPartialRecordHasher = PartialRecord();
- aliceSpendPartialRecordHasher.chainID <== swapChainID;
- aliceSpendPartialRecordHasher.pk_X <== alice_pk_X;
- aliceSpendPartialRecordHasher.pk_Y <== alice_pk_Y;
- aliceSpendPartialRecordHasher.innerPartialRecord <== aliceSpendInnerPartialRecord;
- component aliceSpendRecordHasher = Record();
- aliceSpendRecordHasher.assetID <== aliceSpendAssetID;
- aliceSpendRecordHasher.tokenID <== aliceSpendTokenID;
- aliceSpendRecordHasher.amount <== aliceSpendAmount;
- aliceSpendRecordHasher.partialRecord <== aliceSpendPartialRecordHasher.partialRecord;
-
- component aliceMerkleProof = ManyMerkleProof(levels, length);
- aliceMerkleProof.leaf <== aliceSpendRecordHasher.record;
- aliceMerkleProof.pathIndices <== aliceSpendPathIndices;
- for (var i = 0; i < levels; i++) {
- aliceMerkleProof.pathElements[i] <== aliceSpendPathElements[i];
- }
- aliceMerkleProof.isEnabled <== 1;
- for (var i = 0; i < length; i++) {
- aliceMerkleProof.roots[i] <== roots[i];
- }
-
- // Check Bob Spend Merkle Proof
- component bobSpendPartialRecordHasher = PartialRecord();
- bobSpendPartialRecordHasher.chainID <== swapChainID;
- bobSpendPartialRecordHasher.pk_X <== bob_pk_X;
- bobSpendPartialRecordHasher.pk_Y <== bob_pk_Y;
- bobSpendPartialRecordHasher.innerPartialRecord <== bobSpendInnerPartialRecord;
- component bobSpendRecordHasher = Record();
- bobSpendRecordHasher.assetID <== bobSpendAssetID;
- bobSpendRecordHasher.tokenID <== bobSpendTokenID;
- bobSpendRecordHasher.amount <== bobSpendAmount;
- bobSpendRecordHasher.partialRecord <== bobSpendPartialRecordHasher.partialRecord;
-
- component bobMerkleProof = ManyMerkleProof(levels, length);
- bobMerkleProof.leaf <== bobSpendRecordHasher.record;
- bobMerkleProof.pathIndices <== bobSpendPathIndices;
- for (var i = 0; i < levels; i++) {
- bobMerkleProof.pathElements[i] <== bobSpendPathElements[i];
- }
- bobMerkleProof.isEnabled <== 1;
- for (var i = 0; i < length; i++) {
- bobMerkleProof.roots[i] <== roots[i];
- }
-```
-
-### Alice and Bob’s `Change` and `Receive` `Record`s constructed correctly
-
-```circom
-// Check Alice and Bob Change/Receive Records constructed correctly
- component aliceChangePartialRecordHasher = PartialRecord();
- aliceChangePartialRecordHasher.chainID <== aliceChangeChainID;
- aliceChangePartialRecordHasher.pk_X <== alice_pk_X;
- aliceChangePartialRecordHasher.pk_Y <== alice_pk_Y;
- aliceChangePartialRecordHasher.innerPartialRecord <== aliceChangeInnerPartialRecord;
- component aliceChangeRecordHasher = Record();
- aliceChangeRecordHasher.assetID <== aliceChangeAssetID;
- aliceChangeRecordHasher.tokenID <== aliceChangeTokenID;
- aliceChangeRecordHasher.amount <== aliceChangeAmount;
- aliceChangeRecordHasher.partialRecord <== aliceChangePartialRecordHasher.partialRecord;
- aliceChangeRecordHasher.record === aliceChangeRecord;
-
- component aliceReceivePartialRecordHasher = PartialRecord();
- aliceReceivePartialRecordHasher.chainID <== aliceReceiveChainID;
- aliceReceivePartialRecordHasher.pk_X <== alice_pk_X;
- aliceReceivePartialRecordHasher.pk_Y <== alice_pk_Y;
- aliceReceivePartialRecordHasher.innerPartialRecord <== aliceReceiveInnerPartialRecord;
- component aliceReceiveRecordHasher = Record();
- aliceReceiveRecordHasher.assetID <== aliceReceiveAssetID;
- aliceReceiveRecordHasher.tokenID <== aliceReceiveTokenID;
- aliceReceiveRecordHasher.amount <== aliceReceiveAmount;
- aliceReceiveRecordHasher.partialRecord <== aliceReceivePartialRecordHasher.partialRecord;
- aliceReceiveRecordHasher.record === aliceReceiveRecord;
-
- component bobChangePartialRecordHasher = PartialRecord();
- bobChangePartialRecordHasher.chainID <== bobChangeChainID;
- bobChangePartialRecordHasher.pk_X <== bob_pk_X;
- bobChangePartialRecordHasher.pk_Y <== bob_pk_Y;
- bobChangePartialRecordHasher.innerPartialRecord <== bobChangeInnerPartialRecord;
- component bobChangeRecordHasher = Record();
- bobChangeRecordHasher.assetID <== bobChangeAssetID;
- bobChangeRecordHasher.tokenID <== bobChangeTokenID;
- bobChangeRecordHasher.amount <== bobChangeAmount;
- bobChangeRecordHasher.partialRecord <== bobChangePartialRecordHasher.partialRecord;
- bobChangeRecordHasher.record === bobChangeRecord;
-
- component bobReceivePartialRecordHasher = PartialRecord();
- bobReceivePartialRecordHasher.chainID <== bobReceiveChainID;
- bobReceivePartialRecordHasher.pk_X <== bob_pk_X;
- bobReceivePartialRecordHasher.pk_Y <== bob_pk_Y;
- bobReceivePartialRecordHasher.innerPartialRecord <== bobReceiveInnerPartialRecord;
- component bobReceiveRecordHasher = Record();
- bobReceiveRecordHasher.assetID <== bobReceiveAssetID;
- bobReceiveRecordHasher.tokenID <== bobReceiveTokenID;
- bobReceiveRecordHasher.amount <== bobReceiveAmount;
- bobReceiveRecordHasher.partialRecord <== bobReceivePartialRecordHasher.partialRecord;
- bobReceiveRecordHasher.record === bobReceiveRecord;
-```
-
-### Alice and Bob’s `Spend` `Record` Nullifiers Constructed Correctly
-
-```circom
-// Check Alice and Bob Spend Nullifiers constructed correctly
- component aliceNullifierHasher = Nullifier();
- aliceNullifierHasher.record <== aliceSpendRecordHasher.record;
- aliceNullifierHasher.pathIndices <== aliceSpendPathIndices;
- aliceNullifierHasher.nullifier === aliceSpendNullifier;
-
- component bobNullifierHasher = Nullifier();
- bobNullifierHasher.record <== bobSpendRecordHasher.record;
- bobNullifierHasher.pathIndices <== bobSpendPathIndices;
- bobNullifierHasher.nullifier === bobSpendNullifier;
-```
-
-## Swap Smart Contract Implementation
-
-The Multi Asset Variable Anchor is a variable-denominated shielded pool system
-derived from previous pool systems that supports multiple assets in a single pool.
-This system extends the shielded pool system into a bridged system and allows for
-join/split transactions of different assets at 2 same time.
-
-The system is built on top the MultiAssetVAnchorBase/AnchorBase/LinkableAnchor system
-which allows it to be linked to other VAnchor contracts through a simple graph-like
-interface where anchors maintain edges of their neighboring anchors.
-
-Part of the benefit of a MASP is the ability to handle multiple assets in a single pool.
-To support this, the system uses a `assetId` field in the UTXO to identify the asset.
-One thing to remember is that all assets in the pool must be wrapped ERC20 tokens specific
-to the pool. We refer to this tokens as the bridge ERC20 tokens. Part of the challenge of building
-the MASP then is dealing with the mapping between bridge ERC20s and their asset IDs.
-
-- [protocol-solidity](https://github.com/webb-tools/protocol-solidity)
-
-## Why the Relayer Cannot Tamper With Data
-
-Note, it is largely impossible for the relayer to tamper with the `Spend` `Record` data because if it does, the proof of membership in one-of-many Merkle trees will fail.
-
-Where the relayer has free reign is to change the data in the `Change` and `Receive` `Record`s, as well as `t` and `tPrime`. Let’s consider a few attacks and see why they don’t work.
-
-### Relayer changes data in the `Change/Receive` `Record`s but does NOT change the swap message hash signatures
-
-If the swap message hash signatures are not changed by the relayer, the only way they will verify is if we feed the uncorrupted `Change` and `Receive` `Record` commitments into the Poseidon hasher for the swap message hash. In this case, if the relayer changes data in the `Change/Receive` `Record`s, constraints such as:
-
-```circom
-bobReceiveRecordHasher.record === bobReceiveRecord;
-```
-
-This will not verify.
-
-So now it is clear that to tamper with data, the relayer must change the data inside the swap message. The only way it can do so is by signing this bogus data with its own keys.
-
-### Relayer changes data inside the swap message and signs it with its own keys
-
-In this case, it will feed an incorrect `ak` into the circuit so that the signature verifies. But the `pk` corresponding to this `ak` will form a non-existent `Spend` `Record` and the one-of-many Merkle proof for the `Spend` `Record` will not pass.
diff --git a/pages/docs/protocols/mpc-protocols/anchor-proposal-lifecycle.mdx b/pages/docs/protocols/mpc-protocols/anchor-proposal-lifecycle.mdx
deleted file mode 100644
index 87faa779..00000000
--- a/pages/docs/protocols/mpc-protocols/anchor-proposal-lifecycle.mdx
+++ /dev/null
@@ -1,50 +0,0 @@
----
-title: Anchor Update Proposal Lifecycle
-description: An overview of how proposals are signed and processed.
----
-
-import DKGProposalImages from '../../../../components/images/DKGProposalSigning'
-
-# Proposal Lifecycle
-
-This page outlines the lifecycle of proposals in the context of the Anchor Update Proposal. It describes the process from proposing an anchor update to submitting a signed proposal.
-
-## How proposals are signed
-
-The below sequence diagram illustrates the steps required to successfully submit an **Anchor Update Proposal**
-for signature.
-
-1. **Propose:** Proposers submit an anchor update proposal.
-2. **Vote:** Proposers cast their votes to either reject or acknowledge the proposal.
-3. **Rejection:** If the majority votes to reject, the execution is canceled, and a proposal rejection event is emitted.
-4. **Approval:** If the majority votes in favor of the proposal, the execution to have that proposal signed begins.
-5. **Queue:** The proposal is inserted into an Unsigned Proposal Queue.
-6. **Fetch:** The DKG-gadget, an offchain service worker, fetches the unsigned proposal from the queue.
-7. **Send:** The DKG-gadget sends messages to all connected peers requesting their signatures in the current or next round.
-8. **Sign:** Upon the completion of the round, the now signed proposals are processed and stored in offchain storage.
-9. **Retrieve:** The Proposal-Handlers offchain worker fetches the signed proposals.
-10. **Validate:** The proposal signatures are validated and inserted into pallet storage.
-11. **Event:** A ProposalSigned event is emitted, indicating a successfully submitted proposal.
-
-
-
-## Supported Proposals
-
-The current DKG implementation manages the following proposals, each specify a unique change to the system
-that must be signed in order for any of these requested updates to be considered valid.
-
-| Proposals | Description |
-| -------------------------- | ------------------------------------------------------- |
-| Refresh | Proposal to refresh a contract’s governor |
-| AnchorUpdate | Proposal to update merkle roots |
-| SetVerifierProposal | Proposal to set a verifier address |
-| TokenAdd | Proposal to add token to a set |
-| TokenRemove | Proposal to remove token from a set |
-| WrappingFeeUpdate | Proposal to update fee parameter |
-| RescueToken | Proposal to move tokens from a Treasury |
-| MaxDepositLimitUpdate | Proposal to update a maximum deposit limit parameter |
-| MinWithdrawalLimitUpdate | Proposal to update a minimum withdrawal limit parameter |
-| FeeRecipientUpdateProposal | Proposal to update a fee recipient account |
-| SetTreasuryHandlerProposal | Proposal to set a treasury handler address |
-| ResourceIdUpdate | Proposal to add/update a resource ID |
-| ProposalSetUpdate | Proposal to update the latest proposer set state |
diff --git a/pages/docs/protocols/mpc-protocols/dkg-substrate/_meta.json b/pages/docs/protocols/mpc-protocols/dkg-substrate/_meta.json
deleted file mode 100644
index 40b3ec9e..00000000
--- a/pages/docs/protocols/mpc-protocols/dkg-substrate/_meta.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "overview": "Overview",
- "key-manager": "Key Manager"
-}
diff --git a/pages/docs/protocols/mpc-protocols/dkg-substrate/key-manager.mdx b/pages/docs/protocols/mpc-protocols/dkg-substrate/key-manager.mdx
deleted file mode 100644
index 77f549d2..00000000
--- a/pages/docs/protocols/mpc-protocols/dkg-substrate/key-manager.mdx
+++ /dev/null
@@ -1,36 +0,0 @@
-# Keygen Manager
-
-![](../../../../../images/keygen_manager.png)
-
-## Introduction
-
-The Keygen Manager is designed to simplify and manage the spawning and execution of Keygen protocols. Unlike its predecessor, it includes message enqueuing to ensure synchronicity between DKG nodes, as well as simpler to follow logic.
-
-[Github Repo](https://github.com/webb-tools/dkg-substrate/blob/5a5642ce3d2465169e08144c4df4e40f4964eac0/dkg-gadget/keygen-manager.md)
-
-## Design
-
-The Keygen Manager primarily consists of two integral components: The Keygen Manager itself and the Job Manager. The former acts as a bridge between the DKG worker and the Job Manager, while the latter takes on the responsibility of enqueueing, executing, handling, and managing stalled jobs.
-
-### The DKG Worker
-
-In relation to the Keygen Manager, the DKG Worker's task is to send finality notifications from the blockchain and signed DKG messages from the gossip protocol to the Keygen Manager.
-
-### The Keygen Manager
-
-When the DKG worker transmits a finality notification to the Keygen Manager, it first checks to see if we are forcing any keygen. If so, it removes the any potentially running jobs in the Job Manager and immediately starts a new job.
-
-If a forced keygen is not required, it then checks to see if there are any currently running unstalled jobs. If there are, then we return. Otherwise, we check the current session.
-
-If the current session is 0, we check to see if the local keygen state is uninitialized. If so, we then check to see if the _current_ DKG public key is on-chain. If it is not, as is the case when first running a protocol, we start a new keygen that generates a key for session 0.
-Otherwise, since the current key is on-chain, we then check to see if the next key is on-chain. If it is, we return. Otherwise, we then check to see if the blockchain is ready for a new keygen. If the blockchain is ready, we begin a keygen protocol to generate a key for session 1.
-
-If the current session is not 0, e.g., session=`s`, we check to see if the next key (i.e., `s+1`) is on-chain. If so, we return. Otherwise, we check to see if the blockchain is ready for a new keygenn. If so, we begin a keygen protocol to generate a key for session `s+1`.
-
-### The Job Manager (the background worker)
-
-Unlike the background worker for the Signing Manager's job manager, the Keygen Manager opts-out of the background worker. Polling is done manually through the Keygen Manager.
-
-### Receiving signed DKG messages
-
-When the Job Manager receives signed DKG messages, it checks to see if it needs to deliver the message. If not, it enqueues the message for potential future keygen protocols.
diff --git a/pages/docs/protocols/mpc-protocols/dkg-substrate/overview.mdx b/pages/docs/protocols/mpc-protocols/dkg-substrate/overview.mdx
deleted file mode 100644
index 8e3c826c..00000000
--- a/pages/docs/protocols/mpc-protocols/dkg-substrate/overview.mdx
+++ /dev/null
@@ -1,98 +0,0 @@
-# Getting Started
-
-[Github Repo](https://github.com/webb-tools/dkg-substrate/edit/master/README.md)
-
-The DKG is a multi-party computation protocol that generates a group public and private key. We aim to use this group keypair to sign arbitrary messages that will govern protocols deployed around the blockchain ecosystem. One primary purpose for the DKG is to govern and facilitate operations of the private signature bridge/anchor system.
-
-For additional information, please refer to the [Webb DKG Rust Docs](https://webb-tools.github.io/dkg-substrate/). Have feedback on how to improve the dkg? Or have a specific question to ask? Checkout the [DKG Feedback Discussion](https://github.com/webb-tools/feedback/discussions/categories/dkg-feedback) 💬.
-
-## Prerequisites
-
-This guide uses [https://rustup.rs](https://rustup.rs) installer and the `rustup` tool to manage the Rust toolchain.
-
-First install and configure `rustup`:
-
-```bash
-# Install
-curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
-# Configure
-source ~/.cargo/env
-```
-
-Configure the Rust toolchain to default to the latest stable version, add nightly and the nightly wasm target:
-
-```
-rustup default nightly
-rustup update
-rustup update nightly
-rustup target add wasm32-unknown-unknown
-```
-
-## Installation
-
-cargo build --release
-
-NOTE: You must use the release builds! The optimizations here are required as in debug mode, it is expected that nodes are not able to run fast enough to produce blocks.
-
-## Installation Using Nix
-
-1. Install [Nix](https://nixos.org/download.html)
-2. Enable [Flakes](https://nixos.wiki/wiki/Flakes)
-3. If you have [direnv](https://github.com/nix-community/nix-direnv#installation) installed, everything should work out of the box.
-4. Run `nix develop`` in the root of this repo to get a shell with all the dependencies installed.
-
-## Usage
-
-**Standalone Local Testnet**
-
-```
-./scripts/run-local-testnet.sh --clean
-```
-
-This should start the local testnet, you can view the logs in /tmp directory for all the authorities and use polkadotJS to view the running testnet.
-
-### Testing
-
-The following instructions outline how to run dkg-substrate's base test suite and E2E test suite.
-
-To run base tests:
-
-```bash
-Copy code
-cargo test
-To run the test orchestrator E2E tests (recommended):
-```
-
-```css
-# Build the dkg-standalone node
-cargo build --release -p dkg-standalone-node --features=integration-tests,testing
-
-
-# run the orchestrator, making sure to use the proper config
-cargo run --package dkg-test-orchestrator --release --features=testing -- --config /path/to/orchestrator_config.toml
-```
-
-Setting up debugging logs:
-
-If you would like to run the dkg with verbose logs you may add the following arguments during initial setup. You may change the target to include `debug | error | info| trace | warn``. You may also want to review Substrate runtime debugging.
-
-```diff
-
--ldkg=debug \
--ldkg_metadata=debug \
--lruntime::offchain=debug \
--ldkg_proposal_handler=debug \
--ldkg_proposals=debug
-```
-
-### Contributing
-
-Interested in contributing to the Webb Relayer Network? Thank you so much for your interest! We are always appreciative for contributions from the open-source community!
-
-If you have a contribution in mind, please check out our Contribution Guide for information on how to do so. We are excited for your first contribution!
-
-### License
-
-Licensed under GNU General Public License v3.0.
-
-Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in this crate by you, as defined in the GNU General Public License v3.0 license, shall be licensed as above, without any additional terms or conditions.
diff --git a/pages/docs/protocols/mpc-protocols/proposals.mdx b/pages/docs/protocols/mpc-protocols/proposals.mdx
deleted file mode 100644
index df87bce1..00000000
--- a/pages/docs/protocols/mpc-protocols/proposals.mdx
+++ /dev/null
@@ -1,51 +0,0 @@
----
-title: Global Governance Proposals
-description: An overview of global governance proposals for the DKG protocol.
----
-
-# Global Governance Proposals
-
-This page provides an overview of Webb global governance specific proposals, the proposal structure and the intended use.
-
-Collectively, there are 4 distinct global governance proposals as you can see from the table below.
-A proposal simply is a message that is voted on which suggests a change in the merkle roots or system. Proposals can be unsigned and unsigned.
-Below are all global governance proposals that the DKG protocol handles.
-
-| Proposals | Description |
-| ------------------------------ | ------------------------------------------------ |
-| Refresh (Governance) | Proposal to refresh a contract's governor |
-| AnchorUpdate (Governance) | Proposal to update merkle roots |
-| ResourceIdUpdate (Governance) | Proposal to add/update a resource ID |
-| ProposalSetUpdate (Governance) | Proposal to update the latest proposer set state |
-
-## Proposals
-
-### AnchorUpdateProposal
-
-| Protocol | Doc | Byte Size | Arguments |
-| --------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| EVM | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/evm/struct.AnchorUpdateProposal.html) | Total Size: 104 bytes | 1. ProposalHeader (40 bytes) 2. Merkle root (32 bytes) 3. Source ResourceID (32 bytes) |
-| Substrate | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/substrate/struct.AnchorUpdateProposal.html) | Total Size: ≥ 40 bytes | [Encoded call](https://github.com/webb-tools/protocol-substrate/blob/a8ef0d0fcbf060fd62ab4ad683cb7c810885a3ea/pallets/vanchor-handler/src/lib.rs#L167) 1. ProposalHeader (40 bytes) 2. Merkle root (32 bytes) 3. Source ResourceID (32 bytes) |
-| Cosmwasm | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/cosmwasm/struct.AnchorUpdateProposal.html) | Total Size: ≥ 40 bytes | [Encoded call](https://github.com/webb-tools/protocol-cosmwasm/blob/24cd6668257b4b1938d05d539cc1fcc2944c91bd/contracts/anchor-handler/src/contract.rs#L62) 1. ProposalHeader (40 bytes) 2. Merkle root (32 bytes) 3. Source ResourceID (32 bytes) |
-| ink! | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/ink/struct.AnchorUpdateProposal.html) | Total Size: ≥ 40 bytes | [Encoded call](https://github.com/webb-tools/protocol-ink/blob/6ba2a3c748c757b70f9cfb83154deb3a1062fb5c/contracts/anchor_handler/lib.rs#L187) 1. ProposalHeader (40 bytes) 2. Merkle root (32 bytes) 3. Source ResourceID (32 bytes) |
-
-### ResourceIdUpdateProposal
-
-| Protocol | Doc | Byte Size | Arguments |
-| --------- | -------------------------------------------------------------------------------------------------------------------- | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| EVM | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/evm/struct.ResourceIdUpdateProposal.html) | Total Size: 92 bytes | 1. ProposalHeader (40 bytes) 2. New resource ID (32 bytes) 3. Handler address (20 bytes) |
-| Substrate | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/substrate/struct.ResourceIdUpdateProposal.html) | Total Size: ≥ 40 bytes | Encoded call for a specific handler pallet 1. [VAnchor handler pallet call](https://github.com/webb-tools/protocol-substrate/blob/a8ef0d0fcbf060fd62ab4ad683cb7c810885a3ea/pallets/vanchor-handler/src/lib.rs#L179) |
-| Cosmwasm | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/cosmwasm/struct.ResourceIdUpdateProposal.html) | Total Size: ≥ 40 bytes | [Encoded call](https://github.com/webb-tools/protocol-cosmwasm/blob/24cd6668257b4b1938d05d539cc1fcc2944c91bd/contracts/anchor-handler/src/contract.rs#L85) 1. ProposalHeader (40 bytes) 2. New resource ID (32 bytes) 3. Handler address (20 bytes) |
-| ink! | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/ink/struct.ResourceIdUpdateProposal.html) | Total Size: ≥ 40 bytes | [Encoded call](https://github.com/webb-tools/protocol-ink/blob/6ba2a3c748c757b70f9cfb83154deb3a1062fb5c/contracts/anchor_handler/lib.rs#L153) 1. ProposalHeader (40 bytes) 2. New resource ID (32 bytes) 3. Handler address (20 bytes) |
-
-### Refresh
-
-| Protocol | Doc | Byte Size | Arguments |
-| -------- | ------------------------------------------------------------------------------------------------------ | -------------------- | ---------------------------------------------------- |
-| DKG | [Proposal Docs](https://webb-tools.github.io/dkg-substrate/dkg_primitives/struct.RefreshProposal.html) | Total Size: 68 bytes | 1. Public key (64 bytes) 2. Nonce (4 bytes) |
-
-### ProposerSetUpdate
-
-| Protocol | Doc | Byte Size | Arguments |
-| -------- | --------------------------------------------------------------------------------------------------------------------------------------------- | -------------------- | ----------------------------------------------------------------------------------------------------------------------------------- |
-| DKG | [Proposal Docs](https://webb-tools.github.io/dkg-substrate/dkg_primitives/handlers/proposer_set_update/struct.ProposerSetUpdateProposal.html) | Total Size: 48 bytes | 1. Merkle root (32 bytes) 2. Average session length (8 bytes) 3. Number of proposers (4 bytes) 4. Nonce (4 bytes) |
diff --git a/pages/docs/protocols/single-asset-shielded-pool/_meta.json b/pages/docs/protocols/single-asset-shielded-pool/_meta.json
deleted file mode 100644
index 02738680..00000000
--- a/pages/docs/protocols/single-asset-shielded-pool/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "overview": "Overview",
- "deployments": "SASP Deployments",
- "governance-proposals": "Proposal System"
-}
diff --git a/pages/docs/protocols/single-asset-shielded-pool/deployments.mdx b/pages/docs/protocols/single-asset-shielded-pool/deployments.mdx
deleted file mode 100644
index 1503a80a..00000000
--- a/pages/docs/protocols/single-asset-shielded-pool/deployments.mdx
+++ /dev/null
@@ -1,50 +0,0 @@
----
-title: Deployments
-description: Outlines relevant deployments of the Webb infrastructure.
----
-
-# Webb Contract Deployments
-
-Webb related contracts are deployed to the addresses below. Be extremely careful to confirm mappings below.
-
-| Contract | Arbitrum Testnet |
-| ------------------------- | -------------------------------------------------------------------------------------------------------------------- |
-| Anchor | `0x38e7aa90c77f86747fab355eecaa0c2e4c3a463d` |
-| Signature Bridge | `0xab85034baF6D500b923191FA29962ae7fE67af7a` |
-| $webbAlpha entry | `1099511628196 + 0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01` |
-| $webbAlpha Token Explorer | [0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01](https://arbiscan.io/address/0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01) |
-
-| Contract | ETH Goerli |
-| ------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
-| Anchor | `0x38e7aa90c77f86747fab355eecaa0c2e4c3a463d` |
-| Signature Bridge | `0xab85034baF6D500b923191FA29962ae7fE67af7a` |
-| $webbAlpha entry | `1099511627781 + 0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01` |
-| $webbAlpha Token Explorer | [0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01](https://goerli.etherscan.io/address/0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01) |
-
-| Contract | ETH Sepolia |
-| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
-| Anchor | `0x38e7aa90c77f86747fab355eecaa0c2e4c3a463d` |
-| Signature Bridge | `0xab85034baF6D500b923191FA29962ae7fE67af7a` |
-| $webbAlpha entry | `1099522782887 + 0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01` |
-| $webbAlpha Token Explorer | [0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01](https://sepolia.etherscan.io/address/0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01) |
-
-| Contract | Optimism Goerli |
-| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- |
-| Anchor | `0x38e7aa90c77f86747fab355eecaa0c2e4c3a463d` |
-| Signature Bridge | `0xab85034baF6D500b923191FA29962ae7fE67af7a` |
-| $webbAlpha entry | `1099511628196 + 0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01` |
-| $webbAlpha Token Explorer | [0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01](https://goerli-optimism.etherscan.io/address/0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01) |
-
-| Contract | Polygon Mumbai |
-| ------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
-| Anchor | `0x38e7aa90c77f86747fab355eecaa0c2e4c3a463d` |
-| Signature Bridge | `0xab85034baF6D500b923191FA29962ae7fE67af7a` |
-| $webbAlpha entry | `1099511707777 + 0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01` |
-| $webbAlpha Token Explorer | [0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01](https://mumbai.polygonscan.com/address/0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE011) |
-
-| Contract | Moonbase Alpha |
-| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------- |
-| Anchor | `0x38e7aa90c77f86747fab355eecaa0c2e4c3a463d` |
-| Signature Bridge | `0xab85034baF6D500b923191FA29962ae7fE67af7a` |
-| $webbAlpha entry | `1099511629063 + 0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01` |
-| $webbAlpha Token Explorer | [0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01](https://moonbase.moonscan.io/address/0x01EB50a7c42b525fA7c3BD340aB1f6F81257fE01) |
diff --git a/pages/docs/protocols/single-asset-shielded-pool/governance-proposals.mdx b/pages/docs/protocols/single-asset-shielded-pool/governance-proposals.mdx
deleted file mode 100644
index 8a803db8..00000000
--- a/pages/docs/protocols/single-asset-shielded-pool/governance-proposals.mdx
+++ /dev/null
@@ -1,139 +0,0 @@
----
-title: Application Specific Proposals
-description: Describes the application specific proposals for the Asset Transfer protocol.
----
-
-# Application Specific Proposals
-
-This page provides an overview of Webb's Asset Transfer protocol specific proposals, the proposal structure for the Asset Transfer protocol implementation and the intended use.
-
-Collectively, there are 9 distinct proposals that are used within the Asset Transfer protocol as you can see from the table below.
-A proposal simply is a message that is voted on which suggests a change in the merkle roots or system. Proposals can be unsigned and unsigned.
-Below are the proposal types included in the Asset Transfer protocol.
-
-| Proposals | Description |
-| -------------------------- | ------------------------------------------------------- |
-| SetVerifierProposal | Proposal to set a verifier address |
-| TokenAdd | Proposal to add token to a set |
-| TokenRemove | Proposal to remove token from a set |
-| WrappingFeeUpdate | Proposal to update fee parameter |
-| RescueToken | Proposal to move tokens from a Treasury |
-| MaxDepositLimitUpdate | Proposal to update a maximum deposit limit parameter |
-| MinWithdrawalLimitUpdate | Proposal to update a minimum withdrawal limit parameter |
-| FeeRecipientUpdateProposal | Proposal to update a fee recipient account |
-| SetTreasuryHandlerProposal | Proposal to set a treasury handler address |
-
-## Definitions
-
-### Typed Chain ID
-
-A unique Webb-specific chain identifier that is composed of a chain / virtual machine type and the respective identifier.
-**Size:** 6 bytes
-
-### Target System
-
-Relevant for identifying the actual resource / system / contract that is being targeted in the proposal lifecycle.
-**Size:** 26 bytes
-
-### Resource ID
-
-Relevant for identifying the system the proposal is being executed on and interacting with. The resource ID contains the target system and the target chain ID to be communicated with.
-**Size:** 32 bytes
-**Structure:** `(TargetSystem, TypedChainId)`
-
-### FunctionSig
-
-Relevant for EVM networks where call data is generic and not directly executable at a specific function.
-**Size:** 4 bytes
-
-### Nonce
-
-Relevant for tracking updates to contracts.
-**Size:** 4 bytes
-
-### Proposal Header
-
-**Size:** 40 bytes
-**Structure:** `(**Resource ID, FunctionSig, Nonce)`
-
-## Application Specific Proposals
-
-### TokenAddProposal
-
-| Protocol | Doc | Byte Size | Arguments |
-| --------- | ------------------------------------------------------------------------------------------------------------ | ---------------------- | --------------------------------------------------------------------------------- |
-| EVM | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/evm/struct.TokenAddProposal.html) | Total Size: 60 bytes | 1. ProposalHeader (40 bytes) 2. Token Address (20 bytes) |
-| Substrate | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/substrate/struct.TokenAddProposal.html) | Total Size: ≥ 40 bytes | Encoded call |
-| CosmWasm | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/cosmwasm/struct.TokenAddProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Token Address (20 bytes) |
-| ink! | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/ink/struct.TokenAddProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Token Address (20 bytes) |
-
-### TokenRemoveProposal
-
-| Protocol | Doc | Byte Size | Arguments |
-| --------- | --------------------------------------------------------------------------------------------------------------- | ---------------------- | --------------------------------------------------------------------------------- |
-| EVM | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/evm/struct.TokenRemoveProposal.html) | Total Size: 60 bytes | 1. ProposalHeader (40 bytes) 2. Token Address (20 bytes) |
-| Substrate | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/substrate/struct.TokenRemoveProposal.html) | Total Size: ≥ 40 bytes | Encoded call |
-| CosmWasm | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/cosmwasm/struct.TokenRemoveProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Token Address (20 bytes) |
-| ink! | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/ink/struct.TokenRemoveProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Token Address (20 bytes) |
-
-### FeeRecipientUpdateProposal
-
-| Protocol | Doc | Byte Size | Arguments |
-| -------- | --------------------------------------------------------------------------------------------------------------------- | ---------------------- | ------------------------------------------------------------------------------------- |
-| EVM | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/evm/struct.FeeRecipientUpdateProposal.html) | Total Size: 60 bytes | 1. ProposalHeader (40 bytes) 2. Recipient Address (20 bytes) |
-| CosmWasm | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/cosmwasm/struct.FeeRecipientUpdateProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Recipient Address (20 bytes) |
-| ink! | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/ink/struct.FeeRecipientUpdateProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Recipient Address (20 bytes) |
-
-### WrappingFeeUpdateProposal
-
-| Protocol | Doc | Byte Size | Arguments |
-| --------- | --------------------------------------------------------------------------------------------------------------------- | ---------------------- | -------------------------------------------------------------------------------- |
-| EVM | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/evm/struct.WrappingFeeUpdateProposal.html) | Total Size: 42 bytes | 1. ProposalHeader (40 bytes) 2. Fee percentage (2 byte) |
-| Substrate | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/substrate/struct.WrappingFeeUpdateProposal.html) | Total Size: ≥ 40 bytes | Encoded call |
-| CosmWasm | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/cosmwasm/struct.WrappingFeeUpdateProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Fee percentage (2 byte) |
-| ink! | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/ink/struct.WrappingFeeUpdateProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Fee percentage (2 byte) |
-
-### RescueTokensProposal
-
-| Protocol | Doc | Byte Size | Arguments |
-| --------- | --------------------------------------------------------------------------------------------------------------- | ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
-| EVM | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/evm/struct.RescueTokensProposal.html) | Total Size: 112 bytes | 1. ProposalHeader (40 bytes) 2. Token address (20 bytes) 3. Recipient address (20 bytes) 4. Amount (32 bytes) |
-| CosmWasm | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/cosmwasm/struct.RescueTokensProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Token address (20 bytes) 3. Recipient address (20 bytes) 4. Amount (32 bytes) |
-| ink! | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/ink/struct.RescueTokensProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Token address (20 bytes) 3. Recipient address (20 bytes) 4. Amount (32 bytes) |
-| Substrate | Not documented | Total Size: ≥ 40 bytes | Encoded call |
-
-### MaxDepositLimitProposal
-
-| Protocol | Doc | Byte Size | Arguments |
-| --------- | ------------------------------------------------------------------------------------------------------------------ | ---------------------- | ------------------------------------------------------------------------------------- |
-| EVM | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/evm/struct.MaxDepositLimitProposal.html) | Total Size: 72 bytes | 1. ProposalHeader (40 bytes) 2. Deposit limit (32 bytes) |
-| CosmWasm | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/cosmwasm/struct.MaxDepositLimitProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Deposit limit (32 bytes) |
-| ink! | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/ink/struct.MaxDepositLimitProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Deposit limit (32 bytes) |
-| Substrate | Not documented | | |
-
-### MinWithdrawalLimitProposal
-
-| Protocol | Doc | Byte Size | Arguments |
-| --------- | --------------------------------------------------------------------------------------------------------------------- | ---------------------- | ---------------------------------------------------------------------------------------- |
-| EVM | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/evm/struct.MinWithdrawalLimitProposal.html) | Total Size: 72 bytes | 1. ProposalHeader (40 bytes) 2. Withdrawal limit (32 bytes) |
-| CosmWasm | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/cosmwasm/struct.MinWithdrawalLimitProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Withdrawal limit (32 bytes) |
-| ink! | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/ink/struct.MinWithdrawalLimitProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. Withdrawal limit (32 bytes) |
-| Substrate | Not documented | | |
-
-### SetTreasuryHandlerProposal
-
-**Note:** Not required in Substrate implementation.
-
-| Protocol | Doc | Byte Size | Arguments |
-| -------- | --------------------------------------------------------------------------------------------------------------------- | ---------------------- | ------------------------------------------------------------------------------------------- |
-| EVM | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/evm/struct.SetTreasuryHandlerProposal.html) | Total Size: 60 bytes | 1. ProposalHeader (40 bytes) 2. New handler address (20 bytes) |
-| CosmWasm | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/cosmwasm/struct.SetTreasuryHandlerProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. New handler address (20 bytes) |
-| ink! | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/ink/struct.SetTreasuryHandlerProposal.html) | Total Size: ≥ 40 bytes | Encoded call 1. ProposalHeader (40 bytes) 2. New handler address (20 bytes) |
-
-### SetVerifierProposal
-
-**Note:** Not required in Substrate implementation.
-
-| Protocol | Doc | Byte Size | Arguments |
-| -------- | --------------------------------------------------------------------------------------------------------- | -------------------- | --------------------------------------------------------------------- |
-| EVM | [Proposal Docs](https://docs.rs/webb-proposals/latest/webb_proposals/evm/struct.SetVerifierProposal.html) | Total Size: 60 bytes | 1. ProposalHeader (40 bytes) 2. New verifier address (20 bytes) |
diff --git a/pages/docs/protocols/single-asset-shielded-pool/overview.mdx b/pages/docs/protocols/single-asset-shielded-pool/overview.mdx
deleted file mode 100644
index d16c3ec5..00000000
--- a/pages/docs/protocols/single-asset-shielded-pool/overview.mdx
+++ /dev/null
@@ -1,84 +0,0 @@
----
-title: Asset Transfer
-description: A description of Webb's Asset Transfer protocol.
----
-
-import AssetTransferImage from '../../../../components/images/AssetTransferProtocol'
-
-# Asset Transfer
-
-The asset protocol we describe here is inspired and based on shielded pool protocols that allow for private and arbitrary valued transactions using a shielded unspent transaction output (UTXO) model.
-
-In our version of the asset protocol, we leverage Webb’s technology to design an interoperable shielded pool that allows shielded transactions on a single chain as well as across chains in a single design.
-
-### UTXO Model
-
-The UTXO model for user balances is predicated on the UTXO data structure. Each UTXO stores, among other things, a value representing the amount of funds that can be spent and transferred to another UTXO. UTXOs can be split by breaking this value into multiple parts, creating many UTXOs from one. We refer to this action as "spending" a UTXO and "splitting" its outputs.
-
-We refer to shielded UTXOs as UTXOs for which we use collision-resistant hash commitments to reference them. Shielded UTXOs are hidden UTXOs because an outside observer will only see the hash commitment of the UTXO and no underlying data. We will describe later on how we "spend" shielded UTXOs using only these hashes.
-
-For a given UTXO system, we will refer to the UTXO set as the set of all unspent transaction outputs. We will maintain the set of all spent and unspent transaction outputs in a merkle tree for efficient storage and proofs of membership. We will assume the existence of a method that allows us to check whether a transaction output has been spent or is otherwise unspent.
-
-### Shielded UTXO Model
-
-In the shielded UTXO model, the UTXO set is maintained as a merkle tree of commitments to the hidden UTXOs. To spend shielded UTXOs, we require a valid zero-knowledge proof that satisfies various constraints, including the sum of inputs equaling the sum of outputs, well-formedness of UTXOs and their commitments, and uniqueness of spent UTXOs.
-
-Spending shielded UTXOs hides information about the UTXOs themselves, so we require the spender to generate a zero-knowledge proof that proves to the overall system that no malicious or otherwise improper changes take place. This enables spending UTXOs without disclosing their contents, ensuring private transactions on a public blockchain.
-
-### Interoperable UTXO Model
-
-In Webb’s asset protocol, we leverage the Anchor System to manage a connected set of shielded UTXO sets. Each shielded UTXO set exists on its respective chain and maintains connections through the Anchor System's edge list data structure. Anchors in this protocol instance directly correspond to shielded UTXO sets. We extend the generic anchor to maintain a shielded transaction system, where merkle trees of anchors become shielded UTXO sets. The Asset Protocol API augments the standard Anchor System API to provide the new functionality.
-
-## Data Structures
-
-The main data structure presented here is the **shielded UTXO**. A shielded UTXO contains:
-
-- **Chain Identifier**: A unique identifier for the blockchain where the UTXO can be spent.
-- **Amount**: A numerical identifier for the amount of funds the UTXO represents.
-- **Public Key**: A public key pair.
-- **Blinding Randomness**: A random value that provides additional randomization to the hash commitment.
-
-The **shielded** component comes from the hash commitment to the UTXO data. We take the commitment to be the Poseidon hash function of the underlying data.
-
-```
-commitment = Poseidon(chain_id, amount, public_key, blinding)
-```
-
-We also define a signature and nullifier that allow us to indicate if a UTXO has been spent. We take them to be Poseidon hash functions of various pieces of data.
-
-```
-signature = Poseidon(private_key, commitment, path_index)
-
-nullifier = Poseidon(commitment, path_index, signature)
-```
-
-## API
-
-The Asset Protocol API extends the Anchor System API and adds the following functionality:
-
-- **`transact(public_amount, input_nullifiers, output_commitments, zk_proof)`**
- - `public_amount` - A signed integer value for adding or removing funds from the pool.
- - `input_nullifiers` - Identifying data about input UTXOs being spent.
- - `output_commitments` - Commitments for new UTXOs to insert into the pool.
- - `zk_proof` - A zero-knowledge proof of the transaction's validity.
-
-The protocol assumes the existence of public input variables that are globally available, namely the same values provided by the Anchor System. The required global variables are:
-
-- **Roots** - The set of merkle roots for the bridged anchors provided by the Anchor System.
-- **Chain Id** - The active chain ID provided ideally through an un-tamperable opcode.
-
-## Circuit
-
-The zero-knowledge circuit encodes a variety of constraints to ensure that the asset transfer system works properly. This includes, among other things, the constraints necessary for preventing double spending across chains.
-
-The constraints required are:
-
-- Verifying the correctness of input nullifiers.
-- Verifying the uniqueness of input nullifiers.
-- Verifying the existence of input commitments.
-- Verifying the correctness of output commitments.
-- Verifying that the sum of input amounts and public amount equals the sum of output amounts.
-
-### Formal Description
-
-
diff --git a/pages/docs/tangle-network/_meta.json b/pages/docs/tangle-network/_meta.json
deleted file mode 100644
index 9da11bf7..00000000
--- a/pages/docs/tangle-network/_meta.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "overview": "Overview",
- "build": "Build",
- "node": "Node",
- "learn": "Learn",
- "governance": "Govern"
-}
diff --git a/pages/docs/tangle-network/build/pallets/precompile-overview.mdx b/pages/docs/tangle-network/build/pallets/precompile-overview.mdx
deleted file mode 100644
index 130edfd3..00000000
--- a/pages/docs/tangle-network/build/pallets/precompile-overview.mdx
+++ /dev/null
@@ -1,55 +0,0 @@
-# Overview of Precompiles and Pallets
-
-## Introduction to Precompiles
-
-On Tangle Network, a precompiled contract refers to native Substrate code that possesses an Ethereum-like address and can be engaged through the Ethereum API, as with any other smart contract. These precompiles enable you to directly interact with the Substrate runtime, a functionality that is usually inaccessible from the Ethereum aspect of Tangle Network.
-
-The Substrate code that oversees the implementation of precompiles is located within the EVM pallet. This EVM pallet comprises the standard precompiles existing on Ethereum along with some other precompiles that aren't unique to Ethereum. It further offers the capacity to form and execute custom precompiles through the versatile Precompiles trait. A range of custom Tangle Network-specific precompiles have been developed and can be found within the Tangle Network codebase.
-
-The Ethereum precompiled contracts encompass complex functions that require substantial computational resources, including hashing and encryption. On Tangle Network, the custom precompiled contracts allow access to Substrate-based features such as staking, governance, XCM-related operations, and more.
-
-These Tangle Network-specific precompiles can be accessed through familiar and user-friendly Solidity interfaces utilizing the Ethereum API, which ultimately interact with the underlying Substrate interface.
-
-### Ethereum Mainnet Precompiles
-
-See the repositories for these at the [Parity Github.](https://github.com/paritytech/frontier/tree/master/frame/evm/precompile)
-
-| Contract | Address |
-| ---------------------- | ------------------------------------------ |
-| ECRECOVER | 0x0000000000000000000000000000000000000001 |
-| SHA256 | 0x0000000000000000000000000000000000000002 |
-| RIPEMD160 | 0x0000000000000000000000000000000000000003 |
-| Identity | 0x0000000000000000000000000000000000000004 |
-| Modular Exponentiation | 0x0000000000000000000000000000000000000005 |
-| BN128Add | 0x0000000000000000000000000000000000000006 |
-| BN128Mul | 0x0000000000000000000000000000000000000007 |
-| BN128Pairing | 0x0000000000000000000000000000000000000008 |
-| Blake2 | 0x0000000000000000000000000000000000000009 |
-| SHA3FIPS256 | 0x0000000000000000000000000000000000000400 |
-| Dispatch | 0x0000000000000000000000000000000000000401 |
-| ECRecoverPublicKey | 0x0000000000000000000000000000000000000402 |
-
-### General Precompiles
-
-| Contract | Address |
-| ------------------- | ------------------------------------------ |
-| Democracy | 0x0000000000000000000000000000000000000803 |
-| Batch | 0x0000000000000000000000000000000000000808 |
-| Call Permit | 0x000000000000000000000000000000000000080a |
-| Preimage | 0x0000000000000000000000000000000000000813 |
-| Precompile Registry | 0x0000000000000000000000000000000000000815 |
-| Pallet Staking | 0x0000000000000000000000000000000000000800 |
-
-## Introduction to Pallets
-
-In the Substrate ecosystem, a pallet refers to a distinct, reusable piece of code which contributes a specific piece of functionality to a runtime. Think of pallets as modules that are utilized to construct Substrate-based blockchains. The Tangle Network, like any Substrate-based chain, employs a variety of these pallets to accomplish the network's overall functionalities.
-
-The repository of each pallet consists of a set of related functionalities, which collectively contribute to the overall operation of the Substrate runtime. From managing balances and transaction fees to handling governance and staking processes, pallets essentially serve as the backbone of the Substrate infrastructure.
-
-The flexibility and modularity of Substrate pallets contribute significantly to the customization and upgradeability of the Tangle Network. They provide for easy runtime upgrades without needing to fork the entire network, ensuring a seamless evolution of the network's capabilities.
-
-Amongst the Substrate standard pallets, Tangle Network also incorporates several custom pallets specifically tailored to meet its unique requirements. These custom pallets encapsulate Tangle Network's unique features and functions that extend beyond the conventional Substrate offerings.
-
-Just as with precompiles, these custom pallets can be interacted with through familiar interfaces, this time using the Substrate API. This ensures that developers can make the most out of the rich functionalities offered by the Tangle Network without having to navigate through the complex underlying logic of the pallets themselves.
-
-In essence, pallets form the building blocks of the Tangle Network, contributing to its robustness, customization, and scalability. The beauty of this modular architecture lies in its adaptability and expandability, allowing the Tangle Network to continuously grow and adapt to meet the changing needs of its community.
diff --git a/pages/docs/tangle-network/governance/how-to-vote-on-tangle.mdx b/pages/docs/tangle-network/governance/how-to-vote-on-tangle.mdx
deleted file mode 100644
index ff62f518..00000000
--- a/pages/docs/tangle-network/governance/how-to-vote-on-tangle.mdx
+++ /dev/null
@@ -1,31 +0,0 @@
-# Voting in Democracy Referenda
-
-Substrate-based blockchains often have built-in on-chain governance mechanisms, which include voting on referenda. Here's a step-by-step guide on how to vote in democracy referenda on a Substrate blockchain:
-
-Note: This guide assumes you have already set up a Substrate-based wallet and have some tokens in your account.
-
-1. **Access the Polkadot/Substrate User Interface (UI):**
- Visit the [Substrate UI](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Frpc.tangle.tools#/explorer). This web interface is used to interact with the Tangle network and other Substrate chains, during our testnet phase you can use [Webb's alpha interface](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Frpc.tangle.tools#/explorer)
-
-2. **Connect to the correct network:**
- Ensure you're connected to the Tangle Network, if not, at the top-left of the page, you will see a drop-down menu. Here you can select the Tangle network.
-
-3. **Access the Democracy module:**
- On the left sidebar, under the Governance tab, click on Democracy. This is the on-chain voting system where all the current referenda are listed.
-
-4. **Choose a Referendum:**
- You will see a list of active referenda each represented by a number. Click on a specific referendum to see more details.
-
-5. **Review the Referendum Details:**
- Each referendum has a description and specific details. Review these carefully to understand what you are voting for or against.
-
-6. **Cast Your Vote:**
- Once you've decided how to vote, click on the "Vote" button. You'll be asked to choose between 'Aye' (yes) and 'Nay' (no), and you'll have the option to adjust your vote's "conviction," which multiplies your vote's power at the cost of locking your tokens for a longer period.
-
-7. **Sign and Submit the Transaction:**
- After clicking the "Vote" button, you will need to sign the transaction using your account. Enter your password and click on "Sign and Submit". Your vote will be recorded on the blockchain once the transaction is included in a block.
-
-8. **Wait for the Voting Period to End:**
- Each referendum has a voting period. When this period ends, votes are tallied, and the decision is enacted based on the majority vote.
-
-Remember that **voting in a referendum will lock your tokens until the end of the enactment period (if the proposal passes) or until the end of the voting period (if the proposal does not pass).** The length of these periods can vary, refer to [our parameters.](../governance/governance-parameters/)
diff --git a/pages/docs/tangle-network/learn/_meta.json b/pages/docs/tangle-network/learn/_meta.json
deleted file mode 100644
index 08f2b574..00000000
--- a/pages/docs/tangle-network/learn/_meta.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "webb-protocol": "Webb Protocol on Tangle",
- "incentives": "Incentives, Staking and Slashing",
- "understanding-dkg-tangle": "Distributed Key Generation (DKG)"
-}
diff --git a/pages/docs/tangle-network/node/monitoring/_meta.json b/pages/docs/tangle-network/node/monitoring/_meta.json
deleted file mode 100644
index e8491a85..00000000
--- a/pages/docs/tangle-network/node/monitoring/_meta.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "quickstart": "Quickstart",
- "prometheus": "Prometheus",
- "alert-manager": "AlertManager",
- "grafana": "Grafana Dashboard",
- "loki": "Loki Log Manager"
-}
diff --git a/pages/docs/tangle-network/node/monitoring/alert-manager.mdx b/pages/docs/tangle-network/node/monitoring/alert-manager.mdx
deleted file mode 100644
index a6b4664a..00000000
--- a/pages/docs/tangle-network/node/monitoring/alert-manager.mdx
+++ /dev/null
@@ -1,342 +0,0 @@
----
-title: Alert Manager Setup
-description: Create alerts to notify the team when issues arise.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Alert Manager Setup
-
-The following is a guide outlining the steps to setup AlertManager to send alerts when a Tangle node or DKG is being disrupted. If you do not have Tangle node setup yet, please
-review the **Tangle Node Quickstart** setup guide [here](/docs/ecosystem-roles/validator/quickstart/).
-
-In this guide we will configure the following modules to send alerts from a running Tangle node.
-
-- **Alert Manager** listens to Prometheus metrics and pushes an alert as soon as a threshold is crossed (CPU % usage for example).
-
-## What is Alert Manager?
-
-The Alertmanager handles alerts sent by client applications such as the Prometheus server. It takes care of deduplicating, grouping,
-and routing them to the correct receiver integration such as email, PagerDuty, or OpsGenie. It also takes care of silencing and
-inhibition of alerts. To learn more about Alertmanager, please
-visit the official docs site [here](https://prometheus.io/docs/alerting/latest/alertmanager/).
-
-### Getting Started
-
-Let's first start by downloading the latest releases of the above mentioned modules (Alertmanager).
-
-
- This guide assumes the user has root access to the machine running the Tangle node, and following the below steps inside that machine. As well as,
- the user has already configured Prometheus on this machine.
-
-
-**1. Download Alertmanager**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.darwin-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.darwin-arm64.tar.gz
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.linux-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.linux-arm64.tar.gz &&
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/prometheus/prometheus/releases).
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.windows-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/alertmanager/releases/download/v0.24.0/alertmanager-0.24.0.windows-arm64.tar.gz
- ```
-
-
-
-
-**2. Extract the Downloaded Files:**
-
-Run the following command:
-
-```sh filename="tar" copy
-tar xvf alertmanager-*.tar.gz
-```
-
-**3. Copy the Extracted Files into `/usr/local/bin`:**
-
-
- **Note:** The example below makes use of the `linux-amd64` installations, please update to make use of the target system you have installed.
-
-
-Copy the `alertmanager` binary and `amtool`:
-
-```sh filename="cp" copy
-sudo cp ./alertmanager-*.linux-amd64/alertmanager /usr/local/bin/ &&
-sudo cp ./alertmanager-*.linux-amd64/amtool /usr/local/bin/
-```
-
-**4. Create Dedicated Users:**
-
-Now we want to create dedicated users for the Alertmanager module we have installed:
-
-```sh filename="useradd" copy
-sudo useradd --no-create-home --shell /usr/sbin/nologin alertmanager
-```
-
-**5. Create Directories for `Alertmanager`:**
-
-```sh filename="mkdir" copy
-sudo mkdir /etc/alertmanager &&
-sudo mkdir /var/lib/alertmanager
-```
-
-**6. Change the Ownership for all Directories:**
-
-We need to give our user permissions to access these directories:
-
-**alertManager**:
-
-```sh filename="chown" copy
-sudo chown alertmanager:alertmanager /etc/alertmanager/ -R &&
-sudo chown alertmanager:alertmanager /var/lib/alertmanager/ -R &&
-sudo chown alertmanager:alertmanager /usr/local/bin/alertmanager &&
-sudo chown alertmanager:alertmanager /usr/local/bin/amtool
-```
-
-**7. Finally, let's clean up these directories:**
-
-```sh filename="rm" copy
-rm -rf ./alertmanager*
-```
-
-Great! You have now installed and setup your environment. The next series of steps will be configuring the service.
-
-## Configuration
-
-If you are interested to see how we configure the Tangle Network nodes for monitoring check out https://github.com/webb-tools/tangle/tree/main/monitoring.
-
-### Prometheus
-
-The first thing we need to do is add `rules.yml` file to our Prometheus configuration:
-
-Let’s create the `rules.yml` file that will give the rules for Alert manager:
-
-```sh filename="nano" copy
-sudo touch /etc/prometheus/rules.yml
-sudo nano /etc/prometheus/rules.yml
-```
-
-We are going to create 2 basic rules that will trigger an alert in case the instance is down or the CPU usage crosses 80%.
-You can create all kinds of rules that can triggered, for an exhausted list of rules see our rules list [here](https://github.com/webb-tools/tangle/blob/main/monitoring/prometheus/rules.yml).
-
-Add the following lines and save the file:
-
-```sh filename="group" copy
-groups:
- - name: alert_rules
- rules:
- - alert: InstanceDown
- expr: up == 0
- for: 5m
- labels:
- severity: critical
- annotations:
- summary: "Instance $labels.instance down"
- description: "[{{ $labels.instance }}] of job [{{ $labels.job }}] has been down for more than 1 minute."
-
- - alert: HostHighCpuLoad
- expr: 100 - (avg by(instance)(rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 80
- for: 0m
- labels:
- severity: warning
- annotations:
- summary: Host high CPU load (instance bLd Kusama)
- description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
-```
-
-The criteria for triggering an alert are set in the `expr:` part. You can customize these triggers as you see fit.
-
-Then, check the rules file:
-
-```yaml filename="promtool rules" copy
-promtool check rules /etc/prometheus/rules.yml
-```
-
-And finally, check the Prometheus config file:
-
-```yaml filename="promtool check" copy
-promtool check config /etc/prometheus/prometheus.yml
-```
-
-### Gmail setup
-
-We can use a Gmail address to send the alert emails. For that, we will need to generate an app password from our Gmail account.
-
-Note: we recommend you here to use a dedicated email address for your alerts. Review Google's own guide for
-proper set up [here](https://support.google.com/mail/answer/185833?hl=en).
-
-### Slack notifications
-
-We can also utilize Slack notifications to send the alerts through. For that we need to a specific Slack channel to send the notifications to, and
-to install Incoming WebHooks Slack application.
-
-To do so, navigate to:
-
-1. Administration > Manage Apps.
-2. Search for "Incoming Webhooks"
-3. Install into your Slack workspace.
-
-### Alertmanager
-
-The Alert manager config file is used to set the external service that will be called when an alert is triggered. Here, we are going to use the Gmail and Slack notification created previously.
-
-Let’s create the file:
-
-```sh filename="nano" copy
-sudo touch /etc/alertmanager/alertmanager.yml
-sudo nano /etc/alertmanager/alertmanager.yml
-```
-
-And add the Gmail configuration to it and save the file:
-
-```sh filename="Gmail config" copy
-global:
- resolve_timeout: 1m
-
-route:
- receiver: 'gmail-notifications'
-
-receivers:
-- name: 'gmail-notifications'
- email_configs:
- - to: 'EMAIL-ADDRESS'
- from: 'EMAIL-ADDRESS'
- smarthost: 'smtp.gmail.com:587'
- auth_username: 'EMAIL-ADDRESS'
- auth_identity: 'EMAIL-ADDRESS'
- auth_password: 'EMAIL-ADDRESS'
- send_resolved: true
-
-
-# ********************************************************************************************************************************************
-# Alert Manager for Slack Notifications *
-# ********************************************************************************************************************************************
-
- global:
- resolve_timeout: 1m
- slack_api_url: 'INSERT SLACK API URL'
-
- route:
- receiver: 'slack-notifications'
-
- receivers:
- - name: 'slack-notifications'
- slack_configs:
- - channel: 'channel-name'
- send_resolved: true
- icon_url: https://avatars3.githubusercontent.com/u/3380462
- title: |-
- [{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
- {{- if gt (len .CommonLabels) (len .GroupLabels) -}}
- {{" "}}(
- {{- with .CommonLabels.Remove .GroupLabels.Names }}
- {{- range $index, $label := .SortedPairs -}}
- {{ if $index }}, {{ end }}
- {{- $label.Name }}="{{ $label.Value -}}"
- {{- end }}
- {{- end -}}
- )
- {{- end }}
- text: >-
- {{ range .Alerts -}}
- *Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
- *Description:* {{ .Annotations.description }}
- *Details:*
- {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
- {{ end }}
- {{ end }}
-```
-
-Of course, you have to change the email addresses and the auth_password with the one generated from Google previously.
-
-## Service Setup
-
-### Alert manager
-
-Create and open the Alert manager service file:
-
-```sh filename="create service" copy
-sudo tee /etc/systemd/system/alertmanager.service > /dev/null << EOF
-[Unit]
- Description=AlertManager Server Service
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=alertmanager
- Group=alertmanager
- Type=simple
- ExecStart=/usr/local/bin/alertmanager \
- --config.file /etc/alertmanager/alertmanager.yml \
- --storage.path /var/lib/alertmanager \
- --web.external-url=http://localhost:9093 \
- --cluster.advertise-address='0.0.0.0:9093'
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-## Starting the Services
-
-Launch a daemon reload to take the services into account in systemd:
-
-```sh filename="daemon-reload" copy
-sudo systemctl daemon-reload
-```
-
-Next, we will want to start the alertManager service:
-
-**alertManager**:
-
-```sh filename="start service" copy
-sudo systemctl start alertmanager.service
-```
-
-And check that they are working fine:
-
-**alertManager**::
-
-```sh filename="status" copy
-sudo systemctl status alertmanager.service
-```
-
-If everything is working adequately, activate the services!
-
-**alertManager**:
-
-```sh filename="enable" copy
-sudo systemctl enable alertmanager.service
-```
-
-Amazing! We have now successfully added alert monitoring for our Tangle node!
diff --git a/pages/docs/tangle-network/node/monitoring/grafana.mdx b/pages/docs/tangle-network/node/monitoring/grafana.mdx
deleted file mode 100644
index 916cb9ac..00000000
--- a/pages/docs/tangle-network/node/monitoring/grafana.mdx
+++ /dev/null
@@ -1,193 +0,0 @@
----
-title: Grafana Dashboard Setup
-description: Create visual dashboards for the metrics captured by Prometheus.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Grafana Setup
-
-The following is a guide outlining the steps to setup Grafana Dashboard to visualize metric data for a Tangle node. If you do not have Tangle node setup yet, please
-review the **Tangle Node Quickstart** setup guide [here](/docs/ecosystem-roles/validator/quickstart/).
-
-In this guide we will configure the following modules to visualize metric data from a running Tangle node.
-
-- **Grafana** is the visual dashboard tool that we access from the outside (through SSH tunnel to keep the node secure).
-
-## What are Grafana Dashboards?
-
-A dashboard is a set of one or more panels organized and arranged into one or more rows. Grafana ships with a variety of panels making it easy to
-construct the right queries, and customize the visualization so that you can create the perfect dashboard for your need. Each panel can interact
-with data from any configured Grafana data source. To learn more about Grafana Dashboards, please
-visit the official docs site [here](https://grafana.com/docs/grafana/latest/dashboards/).
-
-### Getting Started
-
-Let's first start by downloading the latest releases of the above mentioned modules (Grafana).
-
-
- This guide assumes the user has root access to the machine running the Tangle node, and following the below steps inside that machine. As well as,
- the user has already configured Prometheus on this machine.
-
-
-**1. Download Grafana**
-
-
-
-
- ```sh filename="brew" copy
- brew update
- brew install grafana
- ```
-
-
-
-
- ```sh filename="linux" copy
- sudo apt-get install -y apt-transport-https
- sudo apt-get install -y software-properties-common wget
- wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add -
- ```
-
- For other linux distrubutions please visit official release page [here](https://grafana.com/grafana/download?edition=oss&platform=linux).
-
-
-
-
-**2. Add Grafana repository to APT sources:**
-
-
- This guide assumes the user is installing and configuring Grafana for a linux machine. For Macos instructions
- please visit the offical docs [here](https://grafana.com/docs/grafana/v9.0/setup-grafana/installation/mac/).
-
-
-```sh filename="add-apt" copy
-sudo add-apt-repository "deb https://packages.grafana.com/oss/deb stable main"
-```
-
-**3. Refresh your APT cache to update your package lists:**
-
-```sh filename="apt update" copy
-sudo apt update
-```
-
-**4. Next, make sure Grafana will be installed from the Grafana repository:**
-
-```sh filename="apt-cache" copy
-apt-cache policy grafana
-```
-
-The output of the previous command tells you the version of Grafana that you are about to install, and where you will retrieve the package from. Verify that the installation candidate at the top of the list will come from the official Grafana repository at `https://packages.grafana.com/oss/deb`.
-
-```sh filename="output"
-Output of apt-cache policy grafana
-grafana:
- Installed: (none)
- Candidate: 6.3.3
- Version table:
- 6.3.3 500
- 500 https://packages.grafana.com/oss/deb stable/main amd64 Packages
-...
-```
-
-**5. You can now proceed with the installation:**
-
-```sh filename="install grafana" copy
-sudo apt install grafana
-```
-
-**6. Install the Alert manager plugin for Grafana:**
-
-```sh filename="grafana-cli" copy
-sudo grafana-cli plugins install camptocamp-prometheus-alertmanager-datasource
-```
-
-## Service Setup
-
-### Grafana
-
-The Grafana’s service is automatically created during extraction of the deb package, you do not need to create it manually.
-
-Launch a daemon reload to take the services into account in systemd:
-
-```sh filename="daemon-reload" copy
-sudo systemctl daemon-reload
-```
-
-**Start the Grafana service:**
-
-```sh filename="start service" copy
-sudo systemctl start grafana-server
-```
-
-And check that they are working fine, one by one:
-
-```sh filename="status" copy
-systemctl status grafana-server
-```
-
-If everything is working adequately, activate the services!
-
-```sh filename="enable" copy
-sudo systemctl enable grafana-server
-```
-
-## Run Grafana dashboard
-
-Now we are going to setup the dashboard to visiualize the metrics we are capturing.
-
-From the browser on your local machine, navigate to `http://localhost:3000/login`. You should be greeted with
-a login screen. You can login with the default credentials, `admin/admin`. Be sure to update your password afterwards.
-
-
- This guide assumes the user has configured Prometheus, AlertManager, and Loki as a data source.
-
-
-**Next, we need to add Prometheus as a data source.**
-
-1. Open the Settings menu
-2. Select **Data Sources**
-3. Select **Add Data Source**
-4. Select Prometheus
-5. Input the URL field with http://localhost:9090
-6. Click Save & Test
-
-**Next, we need to add AlertManager as a data source.**
-
-1. Open the Settings menu
-2. Select **Data Sources**
-3. Select **Add Data Source**
-4. Select AlertManager
-5. Input the URL field with http://localhost:9093
-6. Click Save & Test
-
-**Next, we need to add Loki as a data source.**
-
-1. Open the Settings menu
-2. Select **Data Sources**
-3. Select **Add Data Source**
-4. Select Loki
-5. Input the URL field with http://localhost:3100
-6. Click Save & Test
-
-We have our data sources connected, now its time to import the dashboard we want to use. You may
-create your own or import others, but the purposes of this guide we will use the Polkadot Essentials dashboard created
-by bLD nodes!
-
-**To import a dashboard:**
-
-1. Select the + button
-2. Select **Import**
-3. Input the dashboard number, **13840**
-4. Select Prometheus and AlertManager as data sources from the dropdown menu
-5. Click Load
-
-**In the dashboard selection, make sure you select:**
-
-- **Chain Metrics**: substrate
-- **Chain Instance Host**: localhost:9615 to point the chain data scrapper
-- **Chain Process Name**: the name of your node binary
-
-Congratulations!! You have now configured Grafana to visualize the metrics we are capturing. You now
-have monitoring setup for your node!
diff --git a/pages/docs/tangle-network/node/monitoring/loki.mdx b/pages/docs/tangle-network/node/monitoring/loki.mdx
deleted file mode 100644
index 31d92fa6..00000000
--- a/pages/docs/tangle-network/node/monitoring/loki.mdx
+++ /dev/null
@@ -1,334 +0,0 @@
----
-title: Loki Log Management
-description: A service dedidated to aggregate and query system logs.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Loki Log Management
-
-The following is a guide outlining the steps to setup Loki for log management of a Tangle node. If you do not have Tangle node setup yet, please
-review the **Tangle Node Quickstart** setup guide [here](/docs/ecosystem-roles/validator/quickstart/).
-
-In this guide we will configure the following modules to scrape metrics from the running Tangle node.
-
-- **Loki** provides log aggregation system and metrics.
-- **Promtail** is the agent responsible for gathering logs, and sending them to Loki.
-
-Let's first start by downloading the latest releases of the above mentioned modules (Loki, Promtail download pages).
-
-
- This guide assumes the user has root access to the machine running the Tangle node, and following the below steps inside that machine.
-
-
-**1. Download Loki**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-darwin-amd64.zip"
- ```
- ARM version:
- ```sh filename="ARM" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-darwin-arm64.zip"
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-linux-amd64.zip"
- ```
- ARM version:
- ```sh filename="ARM" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-linux-arm64.zip"
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/grafana/loki/releases).
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/loki-windows-amd64.exe.zip"
- ```
-
-
-
-
-**2. Download Promtail**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-darwin-amd64.zip"
- ```
- ARM version:
- ```sh filename="ARM" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-darwin-arm64.zip"
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-linux-amd64.zip"
- ```
- ARM version:
- ```sh filename="ARM" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-linux-arm64.zip"
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- curl -O -L "https://github.com/grafana/loki/releases/download/v2.7.0/promtail-windows-amd64.exe.zip"
- ```
-
-
-
-
-**3. Extract the Downloaded Files:**
-
-```sh filename="unzip" copy
-unzip "loki-linux-amd64.zip" &&
-unzip "promtail-linux-amd64.zip"
-```
-
-**4. Copy the Extracted Files into `/usr/local/bin`:**
-
-```sh filename="cp" copy
-sudo cp loki-linux-amd64 /usr/local/bin/ &&
-sudo cp promtail-linux-amd64 /usr/local/bin/
-```
-
-**5. Create Dedicated Users:**
-
-Now we want to create dedicated users for each of the modules we have installed:
-
-```sh filename="useradd" copy
-sudo useradd --no-create-home --shell /usr/sbin/nologin loki &&
-sudo useradd --no-create-home --shell /usr/sbin/nologin promtail
-```
-
-**6. Create Directories for `loki`, and `promtail`:**
-
-```sh filename="mkdir" copy
-sudo mkdir /etc/loki &&
-sudo mkdir /etc/promtail
-```
-
-**7. Change the Ownership for all Directories:**
-
-We need to give our user permissions to access these directories:
-
-```sh filename="chown" copy
-sudo chown loki:loki /usr/local/bin/loki-linux-amd64 &&
-sudo chown promtail:promtail /usr/local/bin/promtail-linux-amd64
-```
-
-**9. Finally, let's clean up these directories:**
-
-```sh filename="rm" copy
-rm -rf ./loki-linux-amd64* &&
-rm -rf ./promtail-linux-amd64*
-```
-
-The next series of steps will be configuring each service.
-
-## Configuration
-
-If you are interested to see how we configure the Tangle Network nodes for monitoring check out https://github.com/webb-tools/tangle/tree/main/monitoring.
-
-### Loki
-
-Loki's configuration details what ports to listen to, how to store the logs, and other configuration options.
-There are many other config options for Loki, and you can read more about Loki configuration at: https://grafana.com/docs/loki/latest/configuration/
-
-Let’s create the file:
-
-```sh filename="nano" copy
-sudo touch /etc/loki/config.yml
-sudo nano /etc/loki/config.yml
-```
-
-```yaml filename="config.yaml" copy
-auth_enabled: false
-
-server:
- http_listen_port: 3100
- grpc_listen_port: 9096
-
-ingester:
- lifecycler:
- address: 127.0.0.1
- ring:
- kvstore:
- store: inmemory
- replication_factor: 1
- final_sleep: 0s
- chunk_idle_period: 5m
- chunk_retain_period: 30s
- max_transfer_retries: 0
-
-schema_config:
- configs:
- - from: 2020-10-24
- store: boltdb-shipper
- object_store: filesystem
- schema: v11
- index:
- prefix: index_
- period: 168h
-
-
-storage_config:
- boltdb:
- directory: /data/loki/index
-
- filesystem:
- directory: /data/loki/chunks
-
-limits_config:
- enforce_metric_name: false
- reject_old_samples: true
- reject_old_samples_max_age: 168h
-
-chunk_store_config:
- max_look_back_period: 0s
-
-table_manager:
- retention_deletes_enabled: false
- retention_period: 0
-```
-
-### Promtail
-
-The Promtail configuration details what logs to send to Loki. In the below configuration we are indicating
-to send the logs to Loki from the `/var/log/dkg` directory. This directory can be changed based on what logs you
-want to pick up. There are many other config options for Promtail, and you can read more about Promtail configuration at: https://grafana.com/docs/loki/latest/clients/promtail/configuration/
-
-Let’s create the file:
-
-```sh filename="nano" copy
-sudo touch /etc/promtail/config.yml
-sudo nano /etc/promtail/config.yml
-```
-
-```yaml filename="config.yaml" copy
-server:
- http_listen_port: 9080
- grpc_listen_port: 0
-
-positions:
- filename: /data/loki/positions.yaml
-
-clients:
- - url: http://localhost:3100/loki/api/v1/push
-
-scrape_configs:
-- job_name: system
- static_configs:
- - targets:
- - localhost
- labels:
- job: varlogs
- __path__: /var/log/dkg/*log
-```
-
-## Service Setup
-
-### Loki
-
-Create and open the Loki service file:
-
-```sh filename="loki.service" copy
-sudo tee /etc/systemd/system/loki.service > /dev/null << EOF
-[Unit]
- Description=Loki Service
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=loki
- Group=loki
- Type=simple
- ExecStart=/usr/local/bin/loki-linux-amd64 -config.file /etc/loki/config.yml
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-### Promtail
-
-Create and open the Promtail service file:
-
-```sh filename="promtail.service" copy
-sudo tee /etc/systemd/system/promtail.service > /dev/null << EOF
-[Unit]
- Description=Promtail Service
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=promtail
- Group=promtail
- Type=simple
- ExecStart=/usr/local/bin/promtail-linux-amd64 -config.file /etc/promtail/config.yml
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-Great! You have now configured all the services needed to run Loki.
-
-## Starting the Services
-
-Launch a daemon reload to take the services into account in systemd:
-
-```sh filename="daemon-reload" copy
-sudo systemctl daemon-reload
-```
-
-Next, we will want to start each service:
-
-```sh filename="start service" copy
-sudo systemctl start loki.service &&
-sudo systemctl start promtail.service
-```
-
-And check that they are working fine, one by one:
-
-**loki**:
-
-```sh filename="status" copy
-systemctl status loki.service
-```
-
-**promtail**:
-
-```sh filename="status" copy
-systemctl status promtail.service
-```
-
-If everything is working adequately, activate the services!
-
-```sh filename="enable" copy
-sudo systemctl enable loki.service &&
-sudo systemctl enable promtail.service
-```
-
-Amazing! You have now successfully configured Loki for log management. Check out the Grafana
-documentation to create a Loki log dashboard!
diff --git a/pages/docs/tangle-network/node/monitoring/prometheus.mdx b/pages/docs/tangle-network/node/monitoring/prometheus.mdx
deleted file mode 100644
index bbcb3f74..00000000
--- a/pages/docs/tangle-network/node/monitoring/prometheus.mdx
+++ /dev/null
@@ -1,435 +0,0 @@
----
-title: Prometheus Setup
-description: Setup Prometheus for scraping node metrics and more.
----
-
-import { Tabs, Tab } from "../../../../../components/Tabs";
-import Callout from "../../../../../components/Callout";
-
-# Prometheus Setup
-
-The following is a guide outlining the steps to setup Prometheus to monitor a Tangle node. If you do not have Tangle node setup yet, please
-review the **Tangle Node Quickstart** setup guide [here](/docs/ecosystem-roles/validator/quickstart/). It is important to note that
-this guide's purpose is to help you get started with monitoring your Tangle node, not to advise on how to setup a node securely. Please
-take additional security and privacy measures into consideration.
-
-In this guide we will configure the following modules to scrape metrics from the running Tangle node.
-
-- **Prometheus** is the central module; it pulls metrics from different sources to provide them to the Grafana dashboard and Alert Manager.
-- **Node exporter** provides hardware metrics of the dashboard.
-- **Process exporter** provides processes metrics for the dashboard (optional).
-
-## What is Prometheus?
-
-Prometheus is an open-source systems monitoring and alerting toolkit originally built at SoundCloud. Since its inception in 2012,
-many companies and organizations have adopted Prometheus, and the project has a very active developer and user community.
-It is now a standalone open source project and maintained independently of any company. To learn more about Prometheus, please
-visit the official docs site [here](https://prometheus.io/docs/introduction/overview/).
-
-### Getting Started
-
-Let's first start by downloading the latest releases of the above mentioned modules (Prometheus, Process exporter, and Node exporter).
-
-
- This guide assumes the user has root access to the machine running the Tangle node, and following the below steps inside that machine.
-
-
-**1. Download Prometheus**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.darwin-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.darwin-arm64.tar.gz
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.linux-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.linux-arm64.tar.gz
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/prometheus/prometheus/releases).
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.windows-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/prometheus/releases/download/v2.40.3/prometheus-2.40.3.windows-arm64.tar.gz
- ```
-
-
-
-
-**2. Download Node Exporter**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/node_exporter/releases/download/v1.40.0/node_exporter-1.4.0.darwin-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/node_exporter/releases/download/v1.40.0/node_exporter-1.4.0.darwin-arm64.tar.gz
- ```
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/prometheus/node_exporter/releases/download/v1.40.0/node_exporter-1.4.0.linux-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/prometheus/node_exporter/releases/download/v1.40.0/node_exporter-1.4.0.linux-arm64.tar.gz
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/prometheus/node_exporter/releases).
-
-
-
-
-**3. Download Process Exporter**
-
-
-
-
- AMD version:
- ```sh filename="AMD" copy
- wget https://github.com/ncabatoff/process-exporter/releases/download/v0.7.10/process-exporter-0.7.10.linux-amd64.tar.gz
- ```
- ARM version:
- ```sh filename="ARM" copy
- wget https://github.com/ncabatoff/process-exporter/releases/download/v0.7.10/process-exporter-0.7.10.linux-arm64.tar.gz
- ```
-
- For other linux distrubutions please visit official release page [here](https://github.com/ncabatoff/process-exporter/releases).
-
-
-
-
-**4. Extract the Downloaded Files:**
-
-Run the following command:
-
-```sh filename="tar" copy
-tar xvf prometheus-*.tar.gz &&
-tar xvf node_exporter-*.tar.gz &&
-tar xvf process-exporter-*.tar.gz
-```
-
-**5. Copy the Extracted Files into `/usr/local/bin`:**
-
-
- **Note:** The example below makes use of the `linux-amd64` installations, please update to make use of the target system you have installed.
-
-
-We are first going to copy the `prometheus` binary:
-
-```sh filename="cp" copy
-sudo cp ./prometheus-*.linux-amd64/prometheus /usr/local/bin/
-```
-
-Next, we are going to copy over the `prometheus` console libraries:
-
-```sh filename="cp" copy
-sudo cp -r ./prometheus-*.linux-amd64/consoles /etc/prometheus &&
-sudo cp -r ./prometheus-*.linux-amd64/console_libraries /etc/prometheus
-```
-
-We are going to do the same with `node-exporter` and `process-exporter`:
-
-```sh filename="cp" copy
-sudo cp ./node_exporter-*.linux-amd64/node_exporter /usr/local/bin/ &&
-sudo cp ./process-exporter-*.linux-amd64/process-exporter /usr/local/bin/
-```
-
-**6. Create Dedicated Users:**
-
-Now we want to create dedicated users for each of the modules we have installed:
-
-```sh filename="useradd" copy
-sudo useradd --no-create-home --shell /usr/sbin/nologin prometheus &&
-sudo useradd --no-create-home --shell /usr/sbin/nologin node_exporter &&
-sudo useradd --no-create-home --shell /usr/sbin/nologin process-exporter
-```
-
-**7. Create Directories for `Prometheus`, and `Process exporter`:**
-
-```sh filename="mkdir" copy
-sudo mkdir /var/lib/prometheus &&
-sudo mkdir /etc/process-exporter
-```
-
-**8. Change the Ownership for all Directories:**
-
-We need to give our user permissions to access these directories:
-
-**prometheus**:
-
-```sh filename="chown" copy
-sudo chown prometheus:prometheus /etc/prometheus/ -R &&
-sudo chown prometheus:prometheus /var/lib/prometheus/ -R &&
-sudo chown prometheus:prometheus /usr/local/bin/prometheus
-```
-
-**node_exporter**:
-
-```sh filename="chwon" copy
-sudo chown node_exporter:node_exporter /usr/local/bin/node_exporter
-```
-
-**process-exporter**:
-
-```sh filename="chown" copy
-sudo chown process-exporter:process-exporter /etc/process-exporter -R &&
-sudo chown process-exporter:process-exporter /usr/local/bin/process-exporter
-```
-
-**9. Finally, let's clean up these directories:**
-
-```sh filename="rm" copy
-rm -rf ./prometheus* &&
-rm -rf ./node_exporter* &&
-rm -rf ./process-exporter*
-```
-
-Great! You have now installed and setup your environment. The next series of steps will be configuring each service.
-
-## Configuration
-
-If you are interested to see how we configure the Tangle Network nodes for monitoring check out https://github.com/webb-tools/tangle/tree/main/monitoring.
-
-### Prometheus
-
-Let’s edit the Prometheus config file and add all the modules in it:
-
-```sh filename="nano" copy
-sudo nano /etc/prometheus/prometheus.yml
-```
-
-Add the following code to the file and save:
-
-```yaml filename="promtheus.yml" copy
-global:
- scrape_interval: 15s
- evaluation_interval: 15s
-
-rule_files:
- - 'rules.yml'
-
-alerting:
- alertmanagers:
- - static_configs:
- - targets:
- - localhost:9093
-
-scrape_configs:
- - job_name: "prometheus"
- scrape_interval: 5s
- static_configs:
- - targets: ["localhost:9090"]
- - job_name: "substrate_node"
- scrape_interval: 5s
- static_configs:
- - targets: ["localhost:9615"]
- - job_name: "node_exporter"
- scrape_interval: 5s
- static_configs:
- - targets: ["localhost:9100"]
- - job_name: "process-exporter"
- scrape_interval: 5s
- static_configs:
- - targets: ["localhost:9256"]
-```
-
-- **scrape_interval** defines how often Prometheus scrapes targets, while evaluation_interval controls how often the software will evaluate rules.
-- **rule_files** set the location of Alert manager rules we will add next.
-- **alerting** contains the alert manager target.
-- **scrape_configs** contain the services Prometheus will monitor.
-
-You can notice the first scrap where Prometheus monitors itself.
-
-### Process exporter
-
-Process exporter needs a config file to be told which processes they should take into account:
-
-```sh filename="nano" copy
-sudo touch /etc/process-exporter/config.yml
-sudo nano /etc/process-exporter/config.yml
-```
-
-Add the following code to the file and save:
-
-```sh filename="config.yml" copy
-process_names:
- - name: "{{.Comm}}"
- cmdline:
- - '.+'
-```
-
-## Service Setup
-
-### Prometheus
-
-Create and open the Prometheus service file:
-
-```sh filename="promtheus.service" copy
-sudo tee /etc/systemd/system/prometheus.service > /dev/null << EOF
-[Unit]
- Description=Prometheus Monitoring
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=prometheus
- Group=prometheus
- Type=simple
- ExecStart=/usr/local/bin/prometheus \
- --config.file /etc/prometheus/prometheus.yml \
- --storage.tsdb.path /var/lib/prometheus/ \
- --web.console.templates=/etc/prometheus/consoles \
- --web.console.libraries=/etc/prometheus/console_libraries
- ExecReload=/bin/kill -HUP $MAINPID
-
-[Install]
- WantedBy=multi-user.target
-EOF
-```
-
-### Node exporter
-
-Create and open the Node exporter service file:
-
-```sh filename="node_exporter.service" copy
-sudo tee /etc/systemd/system/node_exporter.service > /dev/null << EOF
-[Unit]
- Description=Node Exporter
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=node_exporter
- Group=node_exporter
- Type=simple
- ExecStart=/usr/local/bin/node_exporter
-
-[Install]
- WantedBy=multi-user.target
-EOF
-```
-
-### Process exporter
-
-Create and open the Process exporter service file:
-
-```sh filename="process-exporter.service" copy
-sudo tee /etc/systemd/system/process-exporter.service > /dev/null << EOF
-[Unit]
- Description=Process Exporter
- Wants=network-online.target
- After=network-online.target
-
-[Service]
- User=process-exporter
- Group=process-exporter
- Type=simple
- ExecStart=/usr/local/bin/process-exporter \
- --config.path /etc/process-exporter/config.yml
-
-[Install]
-WantedBy=multi-user.target
-EOF
-```
-
-## Starting the Services
-
-Launch a daemon reload to take the services into account in systemd:
-
-```sh filename="deamon-reload" copy
-sudo systemctl daemon-reload
-```
-
-Next, we will want to start each service:
-
-**prometheus**:
-
-```sh filename="start serive" copy
-sudo systemctl start prometheus.service
-```
-
-**node_exporter**:
-
-```sh filename="start serive" copy
-sudo systemctl start node_exporter.service
-```
-
-**process-exporter**:
-
-```sh filename="start serive" copy
-sudo systemctl start process-exporter.service
-```
-
-And check that they are working fine:
-
-**prometheus**:
-
-```sh filename="status" copy
-systemctl status prometheus.service
-```
-
-**node_exporter**:
-
-```sh filename="status" copy
-systemctl status node_exporter.service
-```
-
-**process-exporter**:
-
-```sh filename="status" copy
-systemctl status process-exporter.service
-```
-
-If everything is working adequately, activate the services!
-
-**prometheus**:
-
-```sh filename="enable" copy
-sudo systemctl enable prometheus.service
-```
-
-**node_exporter**:
-
-```sh filename="enable" copy
-sudo systemctl enable node_exporter.service
-```
-
-**process-exporter**:
-
-```sh filename="enable" copy
-sudo systemctl enable process-exporter.service
-```
-
-Amazing! We have now completely setup our Prometheus monitoring and are scraping metrics from our
-running Tangle node.
-
-You can view those metrics on the Prometheus dashboard by going to `http://localhost:9090/metrics` !
diff --git a/pages/docs/tangle-network/node/validator/validator-rewards.mdx b/pages/docs/tangle-network/node/validator/validator-rewards.mdx
deleted file mode 100644
index 3168e931..00000000
--- a/pages/docs/tangle-network/node/validator/validator-rewards.mdx
+++ /dev/null
@@ -1,125 +0,0 @@
----
-title: Validator Rewards
-description: A brief overview of Tangle Network rewards and their payout scheme.
----
-
-# Validator Rewards
-
-Running a [validator](validation.mdx) node on the Tangle Network allows you to connect to the network, sync with a bootnode, obtain local access to RPC endpoints, and also author blocks. The network rewards successful validators (users running validator nodes and actively producing blocks) by paying a set amount of network tokens as rewards. Validators are chosen using an algorithm [AURA](https://docs.substrate.io/reference/glossary/#authority-round-aura) that works to give every validator in the active set, a chance at authoring a block.
-
-## How Rewards are Calculated
-
-## Era Points
-
-For every era (a period of time approximately 6 hours in length in Tangle), validators are paid proportionally to the amount of _era points_ they have collected. Era
-points are reward points earned for payable actions like:
-
-- producing a non-uncle block in the Chain.
-- producing a reference to a previously unreferenced uncle block.
-- producing a referenced uncle block.
-
-An uncle block is a block that is valid in every regard, but which failed to become
-canonical. This can happen when two or more validators are block producers in a single slot, and the
-block produced by one validator reaches the next block producer before the others. We call the
-lagging blocks uncle blocks.
-
-Payments occur at the end of every era.
-
-Era points create a probabilistic component for staking rewards.
-
-If the _mean_ of staking rewards is the average rewards per era, then the _variance_ is the
-variability from the average staking rewards. The exact TNT value of each era point is not known in
-advance since it depends on the total number of points earned by all validators in a given era. This
-is designed this way so that the total payout per era depends on Tangle's inflation model, and not on the number of payable
-actions (f.e., authoring a new block) executed.
-
-In this case, analyzing the _expected value_ of staking rewards will paint a better picture as the
-weight of era points of validators and para-validators in the reward average are taken into
-consideration.
-
-#### High-level breakdown of reward variance
-
-This should only serve as a high-level overview of the probabilistic nature for staking rewards.
-
-Let:
-
-- `pe` = para-validator era points,
-- `ne` = non-para-validator era points,
-- `EV` = expected value of staking rewards,
-
-Then, `EV(pe)` has more influence on the `EV` than `EV(ne)`.
-
-Since `EV(pe)` has a more weighted probability on the `EV`, the increase in variance against the
-`EV` becomes apparent between the different validator pools (aka. validators in the active set and
-the ones chosen to para-validate).
-
-Also, let:
-
-- `v` = the variance of staking rewards,
-- `p` = number of para-validators,
-- `w` = number validators in the active set,
-- `e` = era,
-
-Then, `v` ↑ if `w` ↑, as this reduces `p` : `w`, with respect to `e`.
-
-Increased `v` is expected, and initially keeping `p` ↓ using the same para-validator set for
-all parachains ensures availability and approval voting. In addition, despite `v` ↑ on an `e` to `e`
-basis, over time, the amount of rewards each validator receives will equal out based on the
-continuous selection of para-validators.
-
-## Payout Scheme
-
-No matter how much total stake is behind a validator, all validators split the block authoring
-payout essentially equally. The payout of a specific validator, however, may differ based on
-era points, as described above. Although there is a probabilistic component to
-receiving era points, and they may be impacted slightly depending on factors such as network
-connectivity, well-behaving validators should generally average out to having similar era point
-totals over a large number of eras.
-
-Validators may also receive "tips" from senders as an incentive to include transactions in their
-produced blocks. Validators will receive 100% of these tips directly.
-
-For simplicity, the examples below will assume all validators have the same amount of era points,
-and received no tips.
-
-```
-Validator Set Size (v): 4
-Validator 1 Stake (v1): 18 tokens
-Validator 2 Stake (v2): 9 tokens
-Validator 3 Stake (v3): 8 tokens
-Validator 4 Stake (v4): 7 tokens
-Payout (p): 8 TNT
-
-Payout for each validator (v1 - v4):
-p / v = 8 / 4 = 2 tokens
-```
-
-Note that this is different than most other Proof-of-Stake systems such as Cosmos. As long as a
-validator is in the validator set, it will receive the same block reward as every other validator.
-Validator `v1`, who had 18 tokens staked, received the same reward (2 tokens) in this era as `v4`
-who had only 7 tokens staked.
-
-## Slashing
-
-Although rewards are paid equally, slashes are relative to a validator's stake. Therefore, if you do
-have enough TNT to run multiple validators, it is in your best interest to do so. A slash of 30%
-will, of course, be more TNT for a validator with 18 TNT staked than one with 9 TNT staked.
-
-Running multiple validators does not absolve you of the consequences of misbehavior. Polkadot
-punishes attacks that appear coordinated more severely than individual attacks. You should not, for
-example, run multiple validators hosted on the same infrastructure. A proper multi-validator
-configuration would ensure that they do not fail simultaneously.
-
-Nominators have the incentive to nominate the lowest-staked validator, as this will result in the
-lowest risk and highest reward. This is due to the fact that while their vulnerability to slashing
-remains the same (since it is percentage-based), their rewards are higher since they will be a
-higher proportion of the total stake allocated to that validator.
-
-To clarify this, let us imagine two validators, `v1` and `v2`. Assume both are in the active set,
-have commission set to 0%, and are well-behaved. The only difference is that `v1` has 90 TNT
-nominating it and `v2` only has 10. If you nominate `v1`, it now has `90 + 10 = 100` TNT, and you
-will get 10% of the staking rewards for the next era. If you nominate `v2`, it now has
-`10 + 10 = 20` TNT nominating it, and you will get 50% of the staking rewards for the next era. In
-actuality, it would be quite rare to see such a large difference between the stake of validators,
-but the same principle holds even for smaller differences. If there is a 10% slash of either
-validator, then you will lose 1 TNT in each case.
diff --git a/pages/docs/tangle-network/overview.mdx b/pages/docs/tangle-network/overview.mdx
deleted file mode 100644
index e1d5686f..00000000
--- a/pages/docs/tangle-network/overview.mdx
+++ /dev/null
@@ -1,140 +0,0 @@
----
-title: Overview and Use Cases
-description: General overview of the Tangle Network, use cases like including interoperable shielded pools, identity-based systems, decentralized private finance, and secure multi-party computation, as well as features, and roadmap.
----
-
-import { UseCasesArea, ParticipateArea, TestNetworkArea } from "../../../components/UseCasesTangle";
-import FullWebbCTA from "../../../components/FullWebbCTA";
-import SvgComponent from "../../../components/TangleOverviewSVG.tsx";
-
-# Tangle Network Overview
-
-## Introduction
-
-The Tangle Network is a specialized platform engineered to support and govern cross-chain Zero-Knowledge (ZK) applications. Leveraging the robust Substrate blockchain framework, the Tangle Network forms the core infrastructure to facilitate a new generation of privacy-enhancing and governance-decentralized applications.
-
-The Tangle Network's unique offering centers around the intersection of cross-chain functionality, familiar EVM tooling and compatibility, advanced governance systems, and the world of ZK applications. Our network seamlessly merges these distinct elements, creating an ecosystem that amplifies the strengths of each component.
-
-## Key Features
-
-**Cross-Chain Functionality and EVM Compatibility**
-
-The Tangle Network breaks traditional chain boundaries, allowing seamless interaction, data exchange, and operation of ZK applications across different blockchain networks. Leveraging the power of the Ethereum Virtual Machine (EVM) on Substrate, developers can utilize familiar EVM tooling and compatibility to build decentralized applications (DApps), create Non-Fungible Tokens (NFTs), and utilize ERC20 tokens across multiple networks.
-
-**Advanced Governance**
-
-The Tangle Network implements an innovative governance model based on Distributed Key Generation (DKG) protocol. The DKG protocol serves as a security instrument for the Tangle Network's cross-chain Anchor zkApps, ensuring the validity of bridge updates with crypto-economically secured threshold-signatures.
-
-DKG is a cryptographic method where multiple entities collaboratively produce a shared public and private key. In the Tangle Network, DKG fortifies the governance of distributed applications, particularly the Anchor System. It bolsters the security of the network by ensuring the integrity of signed messages via threshold signatures. This not only provides resistance against potential threats but also amplifies the Tangle Network's credibility.
-
-**Privacy-Enhancing ZK Applications**
-
-Privacy is a paramount concern in the Tangle Network. By providing an infrastructure for Zero-Knowledge (ZK) applications, we enable users to experience a new generation of privacy-enhancing functionalities. ZK applications empower users to transact, communicate, and interact privately while maintaining the security and immutability of blockchain technology.
-
-**Built on Substrate**
-
-The Tangle Network is built on Substrate, an advanced blockchain framework renowned for its flexibility, scalability, and cutting-edge features. This strategic choice ensures that our platform remains at the forefront of speed, security, and scalability, serving as a reliable backbone for the Tangle Network. Leveraging Substrate's modular architecture, we enable seamless interaction and interoperability with other blockchain networks.
-
----
-
-## Use Cases
-
-The Tangle Network and Webb Protocol have been designed to serve a variety of use cases. A majority of these applications revolve around enhancing privacy and improving cross-chain interoperability. Here are some key proposed applications for the Tangle Network and Webb Protocol:
-
-**Interoperable Shielded Pools**
-
-The most immediate application is the creation of private bridges for assets, or "interoperable shielded pools." The decentralized, updatable Tangle Network is ideal for maintaining the state of a set of bridge anchors, making it possible to design data to be inserted into these anchors around that of an asset system. Unspent transaction outputs (UTXOs) are inserted into anchors, and users can spend these UTXOs privately across the chains the bridge exists over.
-
-**Interoperable Membership Groups**
-
-The Tangle Network and Webb Protocol can support interoperable membership groups. These are communities that exist across chains and leverage privacy. A natural implementation might look like an interoperable Semaphore system, where anyone in the Semaphore membership group can relay a vote or response to a poll from any chain privately, and potentially without even needing a wallet on that chain.
-
-**Interoperable Badge System**
-
-An identity-based application, an interoperable badge system, could use expressive data blobs for arbitrary proofs of ownership, participation, and identity. Using Webb's technology stack, these badges can be proven to exist from anywhere, enabling new types of composable application development due to the zero-knowledge and private nature of data disclosure.
-
-**Variable Asset Anchor System**
-
-This system allows for interoperable shielded pools, wherein users can transfer arbitrary amounts of assets privately between blockchains. The Variable Asset Anchor System uses zero-knowledge proofs and is similar to a shielded UTXO system, but with cross-chain capabilities.
-
-**Semaphore Anchor System**
-
-Semaphore is a popular zero-knowledge protocol that enables members of an on-chain community to create anonymous signals using zero-knowledge proofs of membership in the community’s identity set. This concept can be extended to a cross-chain identity set, allowing any member of a set of non-fungible token (NFT) communities to register.
-
-**Identity-Based Variable Asset Anchor System**
-
-By combining the Semaphore Identity protocol and the Variable Asset protocol, a cross-chain shielded pool application over a restricted identity set can be designed. This creates a private transaction system where only users with proofs of membership in a cross-chain identity system can transact. This opens up possibilities for even more diverse use cases in the realm of zero-knowledge applications.
-
-In addition, with the integration of threshold Elliptic Curve Digital Signature Algorithm (ECDSA), the Tangle Network can sign arbitrary Bitcoin transactions, Ethereum transactions, and more. It can also be extended to include a Bitcoin bridge into the system with a proper custody rotation, or to sign messages for other smart contracts across chains for governance purposes.
-
-**Decentralized Private Finance (Private Defi)**
-
-Decentralized Finance, or DeFi, has experienced significant growth over the past few years, facilitating peer-to-peer financial transactions without intermediaries. However, DeFi transactions on public blockchains are often not private, posing privacy concerns for users. Leveraging Tangle Network's and Webb Protocol's privacy-preserving capabilities, we can establish Decentralized Private DefFi applications.
-
-These allow users to make transactions while keeping their financial activities private. They can engage in yield farming, liquidity provision, lending, borrowing, and other DeFi operations while remaining anonymous. The added privacy benefits could attract users that prefer to keep their financial activities private due to security concerns, thus expanding the overall user base of DeFi.
-
-**Secure Multi-Party Computation (SMPC)**
-
-Secure Multi-Party Computation (SMPC) is an area of cryptography concerned with enabling multiple parties to jointly compute a function over their inputs while keeping those inputs private. With its cryptographic properties, Tangle Network can be extended to facilitate SMPC.
-
-For example, consider a consortium of companies willing to collaboratively train a machine learning model using their data without exposing sensitive information to each other. By leveraging SMPC on the Tangle Network, these companies can jointly compute the machine learning model without revealing their individual datasets.
-
-This not only preserves privacy but also fosters collaboration between different entities that would otherwise be reluctant to share sensitive data.
-
-**Cross-chain Atomic Swaps**
-
-In the current state of the blockchain ecosystem, transferring assets between different blockchains (cross-chain) often involves centralized exchanges or trusted intermediaries. With the Tangle Network and Webb Protocol, we can enable cross-chain atomic swaps with enhanced privacy.
-
-An atomic swap is a smart contract technology that enables the exchange of one cryptocurrency for another without using centralized intermediaries. Users will be able to privately and securely exchange tokens between different blockchains directly. For instance, a user can exchange Bitcoin for Ethereum directly from their wallets without an exchange, ensuring privacy and reducing the reliance on intermediaries.
-
-**Private and Secure Messaging Systems**
-
-In today's digital age, privacy and security in communication are paramount. With the Tangle Network's zero-knowledge proofs and privacy-oriented architecture, we can develop a private and secure messaging system.
-
-In this system, all communications would be encrypted and can be securely stored across multiple blockchains. This would ensure that only the intended recipients can read the messages. Additionally, the decentralized nature of the system would make it resistant to censorship and control by any single entity.
-
-**Privacy-Preserving Data Marketplace**
-
-Data is often referred to as the "new oil." However, data transactions can be challenging due to privacy and trust concerns. By leveraging the Tangle Network, we can establish a privacy-preserving data marketplace.
-
-In this marketplace, data sellers can list their datasets without revealing the actual data. Using zero-knowledge proofs, they can provide evidence of the data's authenticity and other characteristics. Buyers, on the other hand, can verify these proofs and make purchases without exposing their identities. The entire transaction can be managed on-chain, ensuring fairness and transparency while preserving privacy.
-
-**Decentralized Identity Systems (DID)**
-
-Identity is fundamental in both the physical and digital worlds. However, traditional identity systems are often centralized and vulnerable to attacks. The Tangle Network can support Decentralized Identity Systems (DID), offering privacy, control, and cross-chain compatibility.
-
-In a DID system on the Tangle Network, each user can generate a self-sovereign identity that could be used across different blockchains. With zero-knowledge proofs, users can prove certain attributes of their identity without revealing any unnecessary personal information. This would not only enhance privacy but also give users full control over their identities, avoiding reliance on any single authority.
-
-These use cases showcase the versatility and potential of the Tangle Network and Webb Protocol in various sectors, underscoring its ability to drive forward both privacy and interoperability in the blockchain space.
-
-## Roadmap
-
-The following is subject to change as DAO governance supports different initiatives.
-
-**Phase 1**
-
-- Test Runtime Upgrade
-- Finalize Tangle Token Distribution
-- Launch Incentivized testnet
-
-**Phase 2**
-
-- Update Tangle Genesis for Launch
-- Distribute TNT Tokens
-- Launch Democracy Governance
-- Launch OFAC VAnchor Bridges
-
-**Phase 3**
-
-- Launch Cross-chain Transfers
-- Validator Staking Upgrades
-- Launch Semaphore VAnchor bridges
-
-**Phase 4**
-
-- Remove Sudo
-- Improve Relayer & Proposer Security
-
-## Participate
-
-
diff --git a/theme.config.js b/theme.config.js
index 40976840..3d4d0fe6 100644
--- a/theme.config.js
+++ b/theme.config.js
@@ -73,11 +73,11 @@ const theme = {