diff --git a/.github/workflows/build-release-artifacts.yml b/.github/workflows/build-release-artifacts.yml index 05010b5553..17a816958b 100644 --- a/.github/workflows/build-release-artifacts.yml +++ b/.github/workflows/build-release-artifacts.yml @@ -64,18 +64,13 @@ jobs: # This job isn't necessary, but it's useful for debugging the packaging process for the real release # workflow, just in case any issues are ever encountered there. package: - name: publish and release + name: package artifacts runs-on: ubuntu-latest needs: [build] - env: - AWS_ACCESS_KEY_ID: ${{ secrets.S3_DEPLOY_AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DEPLOY_AWS_SECRET_ACCESS_KEY }} - AWS_DEFAULT_REGION: eu-west-2 steps: - uses: actions/checkout@v4 with: - fetch-depth: "0" - token: ${{ secrets.VERSION_BUMP_COMMIT_PAT }} + ref: ${{ inputs.tag || inputs.branch }} - uses: actions/download-artifact@master with: name: safe_network-x86_64-pc-windows-msvc @@ -100,22 +95,12 @@ jobs: with: name: safe_network-aarch64-unknown-linux-musl path: artifacts/aarch64-unknown-linux-musl/release - # It's possible to `cargo install` just, but it's very slow to compile on GHA infra. - # Therefore we just pull the binary from the Github Release. - - name: install just - shell: bash - run: | - curl -L -O $JUST_BIN_URL - mkdir just - tar xvf just-1.25.2-x86_64-unknown-linux-musl.tar.gz -C just - rm just-1.25.2-x86_64-unknown-linux-musl.tar.gz - sudo mv just/just /usr/local/bin - rm -rf just - sudo apt-get install -y tree + - uses: cargo-bins/cargo-binstall@main + - shell: bash + run: cargo binstall --no-confirm just - name: package artifacts shell: bash run: | - tree artifacts just package-release-assets "faucet" just package-release-assets "nat-detection" just package-release-assets "node-launchpad" diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..9ce23135f3 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,78 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +*When editing this file, please respect a line length of 100.* + +## 2024-07-10 + +### Binaries + +* `faucet` v0.4.30 +* `nat-detection` v0.2.0 +* `node-launchpad` v0.3.10 +* `safe` v0.93.9 +* `safenode` v0.109.0 +* `safenode-manager` v0.10.0 +* `sn_auditor` v0.2.2 +* `sn_node_rpc_client` v0.6.25 + +### Network + +#### Added + +- The node exposes more metrics, including its uptime, number of connected peers, number of peers in + the routing table, and the number of open connections. These will help us more effectively + diagnose user issues. + +#### Changed + +- Communication between node and client is strictly limited through synchronised public keys. The + current beta network allows the node and client to use different public keys, resulting in + undefined behaviour and performance issues. This change mitigates some of those issues and we also + expect it to prevent other double spend issues. +- Reduced base traffic for nodes, resulting in better upload performance. This will result in better + distribution of nanos, meaning users with a smaller number of nodes will be expected to receive + nanos more often. + +#### Fixed + +- In the case where a client retries a failed upload, they would re-send their payment. In a rare + circumstance, the node would forward this reward for a second time too. This is fixed on the node. +- Nodes are prevented from double spending under rare circumstances. +- ARM builds are no longer prevented from connecting to the network. + +### Node Manager + +#### Added + +- Global `--debug` and `--trace` arguments are provided. These will output debugging and trace-level + logging, respectively, direct to stderr. + +#### Changed + +- The mechanism used by the node manager to refresh its state is significantly changed to address + issues that caused commands to hang for long periods of time. Now, when using commands like + `start`, `stop`, and `reset`, users should no longer experience the commands taking excessively + long to complete. +- The `nat-detection run` command provides a default list of servers, meaning the `--servers` + argument is now optional. + +### Launchpad + +#### Added + +- Launchpad and node versions are displayed on the user interface. + +#### Changed + +- The node manager change for refreshing its state also applies to the launchpad. Users should + experience improvements in operations that appeared to be hanging but were actually just taking + an excessive amount of time to complete. + +#### Fixed + +- The correct primary storage will now be selected on Linux and macOS. diff --git a/Cargo.lock b/Cargo.lock index 53704a25fc..55e0dd315b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4559,7 +4559,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.1.0" +version = "0.2.0" dependencies = [ "clap", "clap-verbosity-flag", @@ -4674,7 +4674,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.3.9" +version = "0.3.10" dependencies = [ "atty", "better-panic", @@ -6917,7 +6917,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.9.7" +version = "0.10.0" dependencies = [ "assert_cmd", "assert_fs", @@ -6979,7 +6979,7 @@ dependencies = [ [[package]] name = "sn_auditor" -version = "0.2.1" +version = "0.2.2" dependencies = [ "blsttc", "clap", @@ -7013,14 +7013,14 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.8" +version = "0.1.9" dependencies = [ "vergen", ] [[package]] name = "sn_cli" -version = "0.93.8" +version = "0.93.9" dependencies = [ "aes 0.7.5", "base64 0.22.1", @@ -7061,7 +7061,7 @@ dependencies = [ [[package]] name = "sn_client" -version = "0.107.9" +version = "0.108.0" dependencies = [ "assert_matches", "async-trait", @@ -7146,7 +7146,7 @@ dependencies = [ [[package]] name = "sn_faucet" -version = "0.4.29" +version = "0.4.30" dependencies = [ "assert_fs", "base64 0.22.1", @@ -7178,7 +7178,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.29" +version = "0.2.30" dependencies = [ "chrono", "color-eyre", @@ -7203,7 +7203,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.9" +version = "0.1.10" dependencies = [ "clap", "color-eyre", @@ -7217,7 +7217,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.16.6" +version = "0.17.0" dependencies = [ "aes-gcm-siv", "async-trait", @@ -7259,7 +7259,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.108.4" +version = "0.109.0" dependencies = [ "assert_fs", "assert_matches", @@ -7314,7 +7314,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.24" +version = "0.6.25" dependencies = [ "assert_fs", "async-trait", @@ -7341,7 +7341,7 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.3.5" +version = "0.4.0" dependencies = [ "clap", "lazy_static", @@ -7357,7 +7357,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.4" +version = "0.17.5" dependencies = [ "blsttc", "bytes", @@ -7384,7 +7384,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.3.14" +version = "0.3.15" dependencies = [ "blsttc", "crdts", @@ -7401,7 +7401,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.3.7" +version = "0.3.8" dependencies = [ "async-trait", "dirs-next", @@ -7427,7 +7427,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.18.7" +version = "0.18.8" dependencies = [ "assert_fs", "blsttc", diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 880aee316c..3eafb22135 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.0" +version = "0.2.0" [[bin]] name = "nat-detection" @@ -28,7 +28,7 @@ libp2p = { version = "0.53", features = [ "macros", "upnp", ] } -sn_networking = { path = "../sn_networking", version = "0.16.6" } +sn_networking = { path = "../sn_networking", version = "0.17.0" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index 1d04807df4..e3376cad89 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.3.9" +version = "0.3.10" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -49,10 +49,10 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn-node-manager = { version = "0.9.7", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.3.5", path = "../sn_peers_acquisition" } +sn-node-manager = { version = "0.10.0", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.4.0", path = "../sn_peers_acquisition" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.3.7", path = "../sn_service_management" } +sn_service_management = { version = "0.3.8", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/resources/scripts/dag-user-comparator.sh b/resources/scripts/dag-user-comparator.sh index c201032abd..211ba462c9 100755 --- a/resources/scripts/dag-user-comparator.sh +++ b/resources/scripts/dag-user-comparator.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Check if the correct number of arguments is provided if [ "$#" -ne 2 ]; then diff --git a/resources/scripts/dag-user-sync.sh b/resources/scripts/dag-user-sync.sh index 56c44c6faf..9f54af84ca 100755 --- a/resources/scripts/dag-user-sync.sh +++ b/resources/scripts/dag-user-sync.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Check if the correct number of arguments is provided if [ "$#" -ne 2 ]; then diff --git a/resources/scripts/make-wallets.sh b/resources/scripts/make-wallets.sh index aa80a21f1f..b886fcc7a7 100755 --- a/resources/scripts/make-wallets.sh +++ b/resources/scripts/make-wallets.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Function to print a message in a box print_in_box() { diff --git a/resources/scripts/set-release-channel.sh b/resources/scripts/set-release-channel.sh index fe7ac4252e..d61928b3c0 100755 --- a/resources/scripts/set-release-channel.sh +++ b/resources/scripts/set-release-channel.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Define the workspace Cargo.toml location (ensure you're in the workspace root) WORKSPACE_CARGO_TOML="./Cargo.toml" diff --git a/resources/scripts/sync_crates_versions.sh b/resources/scripts/sync_crates_versions.sh index 49cf55d6cf..bc33ecc53e 100755 --- a/resources/scripts/sync_crates_versions.sh +++ b/resources/scripts/sync_crates_versions.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Parse members from Cargo.toml using tomlq members=() diff --git a/resources/scripts/upload-random-data.sh b/resources/scripts/upload-random-data.sh index e0fe843d7a..dbcf5b06be 100755 --- a/resources/scripts/upload-random-data.sh +++ b/resources/scripts/upload-random-data.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Target rate of 1.5mb/s diff --git a/sn_auditor/Cargo.toml b/sn_auditor/Cargo.toml index 8528f827bb..7c8497118a 100644 --- a/sn_auditor/Cargo.toml +++ b/sn_auditor/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Network Auditor" name = "sn_auditor" -version = "0.2.1" +version = "0.2.2" edition = "2021" homepage = "https://maidsafe.net" repository = "https://github.com/maidsafe/safe_network" @@ -31,9 +31,9 @@ graphviz-rust = { version = "0.9.0", optional = true } lazy_static = "1.4.0" serde = { version = "1.0.133", features = ["derive", "rc"] } serde_json = "1.0.108" -sn_client = { path = "../sn_client", version = "0.107.9" } -sn_logging = { path = "../sn_logging", version = "0.2.29" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.3.5" } +sn_client = { path = "../sn_client", version = "0.108.0" } +sn_logging = { path = "../sn_logging", version = "0.2.30" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } tiny_http = { version = "0.12", features = ["ssl-rustls"] } tracing = { version = "~0.1.26" } tokio = { version = "1.32.0", features = [ diff --git a/sn_auditor/src/dag_db.rs b/sn_auditor/src/dag_db.rs index 0ed400dd05..84bf0b40f1 100644 --- a/sn_auditor/src/dag_db.rs +++ b/sn_auditor/src/dag_db.rs @@ -92,15 +92,19 @@ impl SpendDagDb { client: Client, encryption_sk: Option, ) -> Result { + if !path.exists() { + debug!("Creating directory {path:?}..."); + std::fs::create_dir_all(&path)?; + } let dag_path = path.join(SPEND_DAG_FILENAME); info!("Loading DAG from {dag_path:?}..."); let dag = match SpendDag::load_from_file(&dag_path) { Ok(d) => { - println!("Found a local spend DAG file"); + info!("Found a local spend DAG file"); d } Err(_) => { - println!("Found no local spend DAG file, starting from Genesis"); + info!("Found no local spend DAG file, starting from Genesis"); client.new_dag_with_genesis_only().await? } }; @@ -232,7 +236,7 @@ impl SpendDagDb { }); Some(tx) } else { - eprintln!("Foundation secret key not set! Beta rewards will not be processed."); + warn!("Foundation secret key not set! Beta rewards will not be processed."); None }; @@ -364,7 +368,6 @@ impl SpendDagDb { { if let Some(user_name) = beta_participants_read.get(&default_user_name_hash) { warn!("With default key, got forwarded reward {amount} from {user_name} of {amount} at {addr:?}"); - println!("With default key, got forwarded reward {amount} from {user_name} of {amount} at {addr:?}"); beta_tracking .forwarded_payments .entry(user_name.to_owned()) @@ -375,7 +378,6 @@ impl SpendDagDb { } warn!("Found a forwarded reward {amount} for an unknown participant at {addr:?}: {user_name_hash:?}"); - println!("Found a forwarded reward {amount} for an unknown participant at {addr:?}: {user_name_hash:?}"); beta_tracking .forwarded_payments .entry(format!("unknown participant: {user_name_hash:?}")) diff --git a/sn_auditor/src/main.rs b/sn_auditor/src/main.rs index e3a58aec6e..6d2421bd69 100644 --- a/sn_auditor/src/main.rs +++ b/sn_auditor/src/main.rs @@ -139,7 +139,7 @@ fn logging_init( async fn connect_to_network(peers_args: PeersArgs) -> Result { let bootstrap_peers = peers_args.get_peers().await?; - println!( + info!( "Connecting to the network with {} bootstrap peers", bootstrap_peers.len(), ); @@ -153,7 +153,7 @@ async fn connect_to_network(peers_args: PeersArgs) -> Result { .await .map_err(|err| eyre!("Failed to connect to the network: {err}"))?; - println!("Connected to the network"); + info!("Connected to the network"); Ok(client) } @@ -168,10 +168,10 @@ fn initialize_background_rewards_backup(dag: SpendDagDb) { BETA_REWARDS_BACKUP_INTERVAL_SECS, )) .await; - println!("Backing up beta rewards..."); + info!("Backing up beta rewards..."); if let Err(e) = dag.backup_rewards().await { - eprintln!("Failed to backup beta rewards: {e}"); + error!("Failed to backup beta rewards: {e}"); } } }); @@ -187,14 +187,18 @@ async fn initialize_background_spend_dag_collection( beta_participants: BTreeSet, foundation_sk: Option, ) -> Result { - println!("Initialize spend dag..."); + info!("Initialize spend dag..."); let path = get_auditor_data_dir_path()?; + if !path.exists() { + debug!("Creating directory {path:?}..."); + std::fs::create_dir_all(&path)?; + } // clean the local spend DAG if requested if clean { - println!("Cleaning local spend DAG..."); + info!("Cleaning local spend DAG..."); let dag_file = path.join(dag_db::SPEND_DAG_FILENAME); - let _ = std::fs::remove_file(dag_file).map_err(|e| eprintln!("Cleanup interrupted: {e}")); + let _ = std::fs::remove_file(dag_file).map_err(|e| error!("Cleanup interrupted: {e}")); } // initialize the DAG @@ -205,7 +209,6 @@ async fn initialize_background_spend_dag_collection( // optional force restart from genesis and merge into our current DAG // feature guard to prevent a mis-use of opt if force_from_genesis && cfg!(feature = "dag-collection") { - println!("Forcing DAG to be updated from genesis..."); warn!("Forcing DAG to be updated from genesis..."); let mut d = dag.clone(); let mut genesis_dag = client @@ -219,7 +222,7 @@ async fn initialize_background_spend_dag_collection( let _ = d .merge(genesis_dag) .await - .map_err(|e| eprintln!("Failed to merge from genesis DAG into our DAG: {e}")); + .map_err(|e| error!("Failed to merge from genesis DAG into our DAG: {e}")); }); } @@ -233,21 +236,21 @@ async fn initialize_background_spend_dag_collection( panic!("Foundation SK required to initialize beta rewards program"); }; - println!("Initializing beta rewards program tracking..."); + info!("Initializing beta rewards program tracking..."); if let Err(e) = dag.track_new_beta_participants(beta_participants).await { - eprintln!("Could not initialize beta rewards: {e}"); + error!("Could not initialize beta rewards: {e}"); return Err(e); } } // background thread to update DAG - println!("Starting background DAG collection thread..."); + info!("Starting background DAG collection thread..."); let d = dag.clone(); tokio::spawn(async move { let _ = d .continuous_background_update() .await - .map_err(|e| eprintln!("Failed to update DAG in background thread: {e}")); + .map_err(|e| error!("Failed to update DAG in background thread: {e}")); }); Ok(dag) @@ -255,9 +258,9 @@ async fn initialize_background_spend_dag_collection( async fn start_server(dag: SpendDagDb) -> Result<()> { let server = Server::http("0.0.0.0:4242").expect("Failed to start server"); - println!("Starting dag-query server listening on port 4242..."); + info!("Starting dag-query server listening on port 4242..."); for request in server.incoming_requests() { - println!( + info!( "Received request! method: {:?}, url: {:?}", request.method(), request.url(), @@ -313,7 +316,7 @@ fn load_and_update_beta_participants( .lines() .map(|line| line.trim().to_string()) .collect::>(); - println!( + debug!( "Tracking beta rewards for the {} discord usernames provided in {:?}", discord_names.len(), participants_file @@ -331,7 +334,7 @@ fn load_and_update_beta_participants( .lines() .map(|line| line.trim().to_string()) .collect::>(); - println!( + debug!( "Restoring beta rewards for the {} discord usernames from {:?}", discord_names.len(), local_participants_file @@ -340,7 +343,7 @@ fn load_and_update_beta_participants( } // write the beta participants to disk let _ = std::fs::write(local_participants_file, beta_participants.join("\n")) - .map_err(|e| eprintln!("Failed to write beta participants to disk: {e}")); + .map_err(|e| error!("Failed to write beta participants to disk: {e}")); Ok(beta_participants.into_iter().collect()) } diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index 4466b45199..43517827bb 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.8" +version = "0.1.9" [build-dependencies] vergen = { version = "8.0.0", features = ["build", "git", "gitcl"] } diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml index cb79a986bc..cd83b0acf4 100644 --- a/sn_cli/Cargo.toml +++ b/sn_cli/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_cli" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.93.8" +version = "0.93.9" [[bin]] path = "src/bin/main.rs" @@ -57,11 +57,11 @@ reqwest = { version = "0.12.2", default-features = false, features = [ ] } rmp-serde = "1.1.1" serde = { version = "1.0.133", features = ["derive"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.8" } -sn_client = { path = "../sn_client", version = "0.107.9" } -sn_logging = { path = "../sn_logging", version = "0.2.29" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.3.5" } -sn_protocol = { path = "../sn_protocol", version = "0.17.4" } +sn_build_info = { path = "../sn_build_info", version = "0.1.9" } +sn_client = { path = "../sn_client", version = "0.108.0" } +sn_logging = { path = "../sn_logging", version = "0.2.30" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } +sn_protocol = { path = "../sn_protocol", version = "0.17.5" } tempfile = "3.6.0" tiny-keccak = "~2.0.2" tokio = { version = "1.32.0", features = [ @@ -83,7 +83,7 @@ eyre = "0.6.8" criterion = "0.5.1" tempfile = "3.6.0" rand = { version = "~0.8.5", features = ["small_rng"] } -sn_client = { path = "../sn_client", version = "0.107.9", features = [ +sn_client = { path = "../sn_client", version = "0.108.0", features = [ "test-utils", ] } diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml index 6f83bf14d2..6aa122ca3d 100644 --- a/sn_client/Cargo.toml +++ b/sn_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.107.9" +version = "0.108.0" [features] default = [] @@ -49,17 +49,17 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.16.6" } -sn_protocol = { path = "../sn_protocol", version = "0.17.4" } +sn_networking = { path = "../sn_networking", version = "0.17.0" } +sn_protocol = { path = "../sn_protocol", version = "0.17.5" } serde_json = "1.0" -sn_registers = { path = "../sn_registers", version = "0.3.14" } -sn_transfers = { path = "../sn_transfers", version = "0.18.7" } +sn_registers = { path = "../sn_registers", version = "0.3.15" } +sn_transfers = { path = "../sn_transfers", version = "0.18.8" } tempfile = "3.6.0" thiserror = "1.0.23" tiny-keccak = "~2.0.2" tracing = { version = "~0.1.26" } xor_name = "5.0.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.3.5", optional = true } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0", optional = true } eyre = { version = "0.6.8", optional = true } lazy_static = { version = "~1.4.0", optional = true } @@ -69,8 +69,8 @@ dirs-next = "~2.0.0" # add rand to libp2p libp2p-identity = { version = "0.2.7", features = ["rand"] } sn_client = { path = "../sn_client", features = ["test-utils"] } -sn_logging = { path = "../sn_logging", version = "0.2.29" } -sn_registers = { path = "../sn_registers", version = "0.3.13", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.30" } +sn_registers = { path = "../sn_registers", version = "0.3.15", features = [ "test-utils", ] } @@ -85,7 +85,7 @@ crate-type = ["cdylib", "rlib"] getrandom = { version = "0.2.12", features = ["js"] } wasm-bindgen = "0.2.90" wasm-bindgen-futures = "0.4.40" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.3.5" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } console_error_panic_hook = "0.1.6" tracing-wasm = "0.2.1" wasmtimer = "0.2.0" diff --git a/sn_faucet/Cargo.toml b/sn_faucet/Cargo.toml index 96dd5c2c85..18e85260da 100644 --- a/sn_faucet/Cargo.toml +++ b/sn_faucet/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_faucet" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.29" +version = "0.4.30" [features] default = ["gifting"] @@ -37,13 +37,13 @@ indicatif = { version = "0.17.5", features = ["tokio"] } minreq = { version = "2.11.0", features = ["https-rustls"], optional = true } serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" -sn_build_info = { path = "../sn_build_info", version = "0.1.8" } -sn_cli = { path = "../sn_cli", version = "0.93.8" } -sn_client = { path = "../sn_client", version = "0.107.9" } -sn_logging = { path = "../sn_logging", version = "0.2.29" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.3.5" } -sn_protocol = { path = "../sn_protocol", version = "0.17.4" } -sn_transfers = { path = "../sn_transfers", version = "0.18.7" } +sn_build_info = { path = "../sn_build_info", version = "0.1.9" } +sn_cli = { path = "../sn_cli", version = "0.93.9" } +sn_client = { path = "../sn_client", version = "0.108.0" } +sn_logging = { path = "../sn_logging", version = "0.2.30" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } +sn_protocol = { path = "../sn_protocol", version = "0.17.5" } +sn_transfers = { path = "../sn_transfers", version = "0.18.8" } tokio = { version = "1.32.0", features = ["parking_lot", "rt"] } tracing = { version = "~0.1.26" } url = "2.5.0" diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index 6dd16309a0..01f93bd42d 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.29" +version = "0.2.30" [dependencies] chrono = "~0.4.19" diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index e135efb5e6..c34a35f57d 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.9" +version = "0.1.10" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index 91d8424b18..d712ff1b42 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.16.6" +version = "0.17.0" [features] default = ["libp2p/quic"] @@ -54,10 +54,10 @@ rand = { version = "~0.8.5", features = ["small_rng"] } rayon = "1.8.0" rmp-serde = "1.1.1" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path="../sn_build_info", version = "0.1.8" } -sn_protocol = { path = "../sn_protocol", version = "0.17.4" } -sn_transfers = { path = "../sn_transfers", version = "0.18.7" } -sn_registers = { path = "../sn_registers", version = "0.3.14" } +sn_build_info = { path="../sn_build_info", version = "0.1.9" } +sn_protocol = { path = "../sn_protocol", version = "0.17.5" } +sn_transfers = { path = "../sn_transfers", version = "0.18.8" } +sn_registers = { path = "../sn_registers", version = "0.3.15" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 064e02540e..419183bf3d 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -310,7 +310,7 @@ impl SwarmDriver { } SwarmEvent::NewListenAddr { - address, + mut address, listener_id, } => { event_string = "new listen addr"; @@ -327,7 +327,10 @@ impl SwarmDriver { }; let local_peer_id = *self.swarm.local_peer_id(); - let address = address.with(Protocol::P2p(local_peer_id)); + // Make sure the address ends with `/p2p/`. In case of relay, `/p2p` is already there. + if address.iter().last() != Some(Protocol::P2p(local_peer_id)) { + address.push(Protocol::P2p(local_peer_id)); + } // Trigger server mode if we're not a client and we should not add our own address if we're behind // home network. diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index db1f13ee92..848c8210a8 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -42,7 +42,6 @@ pub use self::{ error::{GetRecordError, NetworkError}, event::{MsgResponder, NetworkEvent}, record_store::{calculate_cost_for_records, NodeRecordStore}, - spends::SpendVerificationOk, transfers::{get_raw_signed_spends_from_record, get_signed_spend_from_record}, }; diff --git a/sn_networking/src/spends.rs b/sn_networking/src/spends.rs index faff6e82c3..447bbb9633 100644 --- a/sn_networking/src/spends.rs +++ b/sn_networking/src/spends.rs @@ -11,22 +11,15 @@ use futures::future::join_all; use sn_transfers::{is_genesis_spend, SignedSpend, SpendAddress, TransferError}; use std::{collections::BTreeSet, iter::Iterator}; -#[derive(Debug)] -pub enum SpendVerificationOk { - Valid, - ParentDoubleSpend, -} - impl Network { /// This function verifies a single spend. /// This is used by nodes for spends validation, before storing them. /// - It checks if the spend has valid ancestry, that its parents exist on the Network. - /// - If the parent is a double spend, we still carry out the valdiation, but return SpendVerificationOk::ParentDoubleSpend + /// - If the parent is a double spend, we still carry out the valdiation, but at the end return the error /// - It checks that the spend has a valid signature and content /// - It does NOT check if the spend exists online /// - It does NOT check if the spend is already spent on the Network - pub async fn verify_spend(&self, spend: &SignedSpend) -> Result { - let mut result = SpendVerificationOk::Valid; + pub async fn verify_spend(&self, spend: &SignedSpend) -> Result<()> { let unique_key = spend.unique_pubkey(); debug!("Verifying spend {unique_key}"); spend.verify(spend.spent_tx_hash())?; @@ -34,10 +27,11 @@ impl Network { // genesis does not have parents so we end here if is_genesis_spend(spend) { debug!("Verified {unique_key} was Genesis spend!"); - return Ok(result); + return Ok(()); } // get its parents + let mut result = Ok(()); let parent_keys = spend .spend .parent_tx @@ -61,7 +55,7 @@ impl Network { Err(NetworkError::DoubleSpendAttempt(attempts)) => { warn!("While verifying {unique_key:?}, a double spend attempt ({attempts:?}) detected for the parent with pub key {parent_key:?} . Continuing verification."); parent_spends.insert(BTreeSet::from_iter(attempts)); - result = SpendVerificationOk::ParentDoubleSpend; + result = Err(NetworkError::Transfer(TransferError::DoubleSpentParent)); } Err(e) => { let s = format!("Failed to get parent spend of {unique_key} parent pubkey: {parent_key:?} error: {e}"); @@ -74,6 +68,6 @@ impl Network { // verify the parents spend.verify_parent_spends(parent_spends.iter())?; - Ok(result) + result } } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 0e7d3dec94..1996cc6058 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.108.4" +version = "0.109.0" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -51,15 +51,15 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.8" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.3.5" } -sn_client = { path = "../sn_client", version = "0.107.9" } -sn_logging = { path = "../sn_logging", version = "0.2.29" } -sn_networking = { path = "../sn_networking", version = "0.16.6" } -sn_protocol = { path = "../sn_protocol", version = "0.17.4" } -sn_registers = { path = "../sn_registers", version = "0.3.14" } -sn_transfers = { path = "../sn_transfers", version = "0.18.7" } -sn_service_management = { path = "../sn_service_management", version = "0.3.7" } +sn_build_info = { path = "../sn_build_info", version = "0.1.9" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } +sn_client = { path = "../sn_client", version = "0.108.0" } +sn_logging = { path = "../sn_logging", version = "0.2.30" } +sn_networking = { path = "../sn_networking", version = "0.17.0" } +sn_protocol = { path = "../sn_protocol", version = "0.17.5" } +sn_registers = { path = "../sn_registers", version = "0.3.15" } +sn_transfers = { path = "../sn_transfers", version = "0.18.8" } +sn_service_management = { path = "../sn_service_management", version = "0.3.8" } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ "io-util", @@ -86,10 +86,10 @@ reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_protocol = { path = "../sn_protocol", version = "0.17.4", features = [ +sn_protocol = { path = "../sn_protocol", version = "0.17.5", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.18.7", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.18.8", features = [ "test-utils", ] } tempfile = "3.6.0" diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 2af142ca5d..656eadc8da 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -8,10 +8,7 @@ use crate::{node::Node, quote::verify_quote_for_storecost, Error, Marker, Result}; use libp2p::kad::{Record, RecordKey}; -use sn_networking::{ - get_raw_signed_spends_from_record, GetRecordError, NetworkError, SpendVerificationOk, - MAX_PACKET_SIZE, -}; +use sn_networking::{get_raw_signed_spends_from_record, GetRecordError, NetworkError}; use sn_protocol::{ storage::{ try_deserialize_record, try_serialize_record, Chunk, RecordHeader, RecordKind, RecordType, @@ -28,12 +25,6 @@ use std::collections::BTreeSet; use tokio::task::JoinSet; use xor_name::XorName; -/// The maximum number of double spend attempts to store that we got from PUTs -const MAX_DOUBLE_SPEND_ATTEMPTS_TO_KEEP_FROM_PUTS: usize = 15; - -/// The maximum number of double spend attempts to store inside a record -const MAX_DOUBLE_SPEND_ATTEMPTS_TO_KEEP_PER_RECORD: usize = 30; - impl Node { /// Validate a record and it's payment, and store the record to the RecordStore pub(crate) async fn validate_and_store_record(&self, record: Record) -> Result<()> { @@ -99,7 +90,7 @@ impl Node { let value_to_hash = record.value.clone(); let spends = try_deserialize_record::>(&record)?; let result = self - .validate_merge_and_store_spends(spends, &record_key, true) + .validate_merge_and_store_spends(spends, &record_key) .await; if result.is_ok() { Marker::ValidSpendPutFromClient(&PrettyPrintRecordKey::from(&record_key)).log(); @@ -207,7 +198,7 @@ impl Node { RecordKind::Spend => { let record_key = record.key.clone(); let spends = try_deserialize_record::>(&record)?; - self.validate_merge_and_store_spends(spends, &record_key, false) + self.validate_merge_and_store_spends(spends, &record_key) .await } RecordKind::Register => { @@ -342,7 +333,6 @@ impl Node { &self, signed_spends: Vec, record_key: &RecordKey, - from_put: bool, ) -> Result<()> { let pretty_key = PrettyPrintRecordKey::from(record_key); debug!("Validating spends before storage at {pretty_key:?}"); @@ -381,10 +371,11 @@ impl Node { // validate the signed spends against the network and the local knowledge debug!("Validating spends for {pretty_key:?} with unique key: {unique_pubkey:?}"); let validated_spends = match self - .signed_spends_to_keep(spends_for_key.clone(), *unique_pubkey, from_put) + .signed_spends_to_keep(spends_for_key.clone(), *unique_pubkey) .await { - Ok(s) => s, + Ok((one, None)) => vec![one], + Ok((one, Some(two))) => vec![one, two], Err(e) => { warn!("Failed to validate spends at {pretty_key:?} with unique key {unique_pubkey:?}: {e}"); return Err(e); @@ -648,47 +639,31 @@ impl Node { } /// Determine which spends our node should keep and store - /// - if our local copy has reached the len/size limits, we don't store anymore from kad::PUT and return the local copy - /// - else if the request is from replication OR if limit not reached during kad::PUT, then: - /// - trust local spends - /// - downloads spends from the network - /// - verifies incoming spend + network spends and ignores the invalid ones. - /// - orders all the verified spends from local + incoming + network - /// - returns a maximum of MAX_DOUBLE_SPEND_ATTEMPTS_TO_KEEP_PER_RECORD spends + /// - get local spends and trust them + /// - get spends from the network + /// - verify incoming spend + network spends and ignore the invalid ones + /// - orders all the verified spends by: + /// - if they have spent descendants (meaning live branch) + /// - deterministicaly by their order in the BTreeSet + /// - returns the spend to keep along with another spend if it was a double spend + /// - when we get more than two spends, only keeps 2 that are chosen deterministically so + /// all nodes running this code are eventually consistent async fn signed_spends_to_keep( &self, signed_spends: Vec, unique_pubkey: UniquePubkey, - from_put: bool, - ) -> Result> { + ) -> Result<(SignedSpend, Option)> { let spend_addr = SpendAddress::from_unique_pubkey(&unique_pubkey); debug!( "Validating before storing spend at {spend_addr:?} with unique key: {unique_pubkey}" ); + // trust local spends as we've verified them before let local_spends = self.get_local_spends(spend_addr).await?; - let size_of_local_spends = try_serialize_record(&local_spends, RecordKind::Spend)? - .to_vec() - .len(); - let max_spend_len_reached = - local_spends.len() >= MAX_DOUBLE_SPEND_ATTEMPTS_TO_KEEP_FROM_PUTS; - let max_spend_size_reached = { - // todo: limit size of a single signed spend to < max_packet_size/2 - let size_limit = size_of_local_spends >= MAX_PACKET_SIZE / 2; - // just so that we can store the double spend - size_limit && local_spends.len() > 1 - }; - - if (max_spend_len_reached || max_spend_size_reached) && from_put { - info!("We already have {MAX_DOUBLE_SPEND_ATTEMPTS_TO_KEEP_FROM_PUTS} spends locally or have maximum size of spends, skipping spends received via PUT for {unique_pubkey:?}"); - return Ok(local_spends); - } - let mut all_verified_spends = BTreeSet::from_iter(local_spends.into_iter()); // get spends from the network at the address for that unique pubkey let network_spends = match self.network().get_raw_spends(spend_addr).await { Ok(spends) => spends, - Err(NetworkError::GetRecordError(GetRecordError::RecordNotFound)) => vec![], Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => { warn!("Got a split record (double spend) for {unique_pubkey:?} from the network"); let mut spends = vec![]; @@ -722,28 +697,43 @@ impl Node { vec![] } }; + debug!( + "For {unique_pubkey:?} got {} local spends, {} from network and {} provided", + local_spends.len(), + network_spends.len(), + signed_spends.len() + ); + debug!("Local spends {local_spends:?}; from network {network_spends:?}; provided {signed_spends:?}"); + + // only verify spends we don't know of + let mut all_verified_spends = BTreeSet::from_iter(local_spends.into_iter()); + let unverified_spends = + BTreeSet::from_iter(network_spends.into_iter().chain(signed_spends.into_iter())); + let known_spends = all_verified_spends.clone(); + let new_unverified_spends: BTreeSet<_> = + unverified_spends.difference(&known_spends).collect(); - let mut parent_is_a_double_spend = false; - // check the received spends and the spends got from the network let mut tasks = JoinSet::new(); - for s in signed_spends.into_iter().chain(network_spends.into_iter()) { + for s in new_unverified_spends.into_iter() { let self_clone = self.clone(); + let spend_clone = s.clone(); let _ = tasks.spawn(async move { - let res = self_clone.network().verify_spend(&s).await; - (s, res) + let res = self_clone.network().verify_spend(&spend_clone).await; + (spend_clone, res) }); } - // collect spends until we have a double spend or until we have all the results + // gather verified spends + let mut double_spent_parent = BTreeSet::new(); while let Some(res) = tasks.join_next().await { match res { - Ok((spend, Ok(spend_verification_ok))) => { - info!("Successfully verified {spend:?} with result: {spend_verification_ok:?}"); - if let SpendVerificationOk::ParentDoubleSpend = spend_verification_ok { - // the parent is a double spend, but we will store it incase our spend is also a double spend. - parent_is_a_double_spend = true; - } - let _inserted = all_verified_spends.insert(spend); + Ok((spend, Ok(()))) => { + info!("Successfully verified {spend:?}"); + let _inserted = all_verified_spends.insert(spend.to_owned().clone()); + } + Ok((spend, Err(NetworkError::Transfer(TransferError::DoubleSpentParent)))) => { + warn!("Parent of {spend:?} was double spent, keeping aside in case we're a double spend as well"); + let _ = double_spent_parent.insert(spend.clone()); } Ok((spend, Err(e))) => { // an error here most probably means the received spend is invalid @@ -758,33 +748,100 @@ impl Node { } } - if parent_is_a_double_spend && all_verified_spends.len() == 1 { - warn!("Parent is a double spend for {unique_pubkey:?}, ignoring this spend"); - return Err(Error::Transfers(TransferError::InvalidParentSpend( - format!("Parent is a double spend for {unique_pubkey:?}"), - ))); - } else if parent_is_a_double_spend && all_verified_spends.len() > 1 { - warn!("Parent is a double spend for {unique_pubkey:?}, but we're also a double spend. So storing our double spend attempt."); + // keep track of double spend with double spent parent + if !all_verified_spends.is_empty() && !double_spent_parent.is_empty() { + warn!("Parent of {unique_pubkey:?} was double spent, but it's also a double spend. So keeping track of this double spend attempt."); + all_verified_spends.extend(double_spent_parent.into_iter()) } - // todo: should we also check the size of spends here? Maybe just limit the size of a single - // SignedSpend to < max_packet_size/2 so that we can store atleast 2 of them. - let verified_spends = all_verified_spends - .into_iter() - .take(MAX_DOUBLE_SPEND_ATTEMPTS_TO_KEEP_PER_RECORD) - .collect::>(); - - if verified_spends.is_empty() { - debug!("No valid spends found while validating Spend PUT. Who is sending us garbage?"); - Err(Error::InvalidRequest(format!( - "Found no valid spends while validating Spend PUT for {unique_pubkey:?}" - ))) - } else if verified_spends.len() > 1 { - warn!("Got a double spend for {unique_pubkey:?}"); - Ok(verified_spends) - } else { - debug!("Got a single valid spend for {unique_pubkey:?}"); - Ok(verified_spends) + // return 2 spends max + let all_verified_spends: Vec<_> = all_verified_spends.into_iter().collect(); + match all_verified_spends.as_slice() { + [one_spend] => Ok((one_spend.clone(), None)), + [one, two] => Ok((one.clone(), Some(two.clone()))), + [] => { + warn!("Invalid request: none of the spends were valid for {unique_pubkey:?}"); + Err(Error::InvalidRequest(format!( + "Found no valid spends while validating Spends for {unique_pubkey:?}" + ))) + } + more => { + warn!("Got more than 2 verified spends, this might be a double spend spam attack, making sure to favour live branches (branches with spent descendants)"); + let (one, two) = self.verified_spends_select_2_live(more).await?; + Ok((one, Some(two))) + } + } + } + + async fn verified_spends_select_2_live( + &self, + many_spends: &[SignedSpend], + ) -> Result<(SignedSpend, SignedSpend)> { + // get all spends descendants + let mut tasks = JoinSet::new(); + for spend in many_spends { + let descendants: BTreeSet<_> = spend + .spend + .spent_tx + .outputs + .iter() + .map(|o| o.unique_pubkey()) + .map(SpendAddress::from_unique_pubkey) + .collect(); + for d in descendants { + let self_clone = self.clone(); + let spend_clone = spend.to_owned(); + let _ = tasks.spawn(async move { + let res = self_clone.network().get_raw_spends(d).await; + (spend_clone, res) + }); + } + } + + // identify up to two live spends (aka spends with spent descendants) + let mut live_spends = BTreeSet::new(); + while let Some(res) = tasks.join_next().await { + match res { + Ok((spend, Ok(_descendant))) => { + trace!("Spend {spend:?} has a live descendant"); + let _inserted = live_spends.insert(spend); + } + Ok((spend, Err(NetworkError::GetRecordError(GetRecordError::RecordNotFound)))) => { + trace!("Spend {spend:?} descendant was not found, continuing..."); + } + Ok((spend, Err(e))) => { + warn!( + "Error fetching spend descendant while checking if {spend:?} is live: {e}" + ); + } + Err(e) => { + let s = format!("Async thread error while selecting live spends: {e}"); + error!("{}", s); + return Err(Error::JoinErrorInAsyncThread(s))?; + } + } + } + + // order by live or not live, then order in the BTreeSet and take first 2 + let not_live_spends: BTreeSet<_> = many_spends + .iter() + .filter(|s| !live_spends.contains(s)) + .collect(); + debug!( + "Got {} live spends and {} not live ones, keeping only the favoured 2", + live_spends.len(), + not_live_spends.len() + ); + let ordered_spends: Vec<_> = live_spends + .iter() + .chain(not_live_spends.into_iter()) + .collect(); + match ordered_spends.as_slice() { + [one, two, ..] => Ok((one.to_owned().clone(), two.to_owned().clone())), + _ => Err(Error::InvalidRequest(format!( + "Expected many spends but got {}", + many_spends.len() + ))), } } } diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index f67e9b42c5..7c253f618e 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -12,7 +12,9 @@ use assert_fs::TempDir; use assert_matches::assert_matches; use common::client::{get_client_and_funded_wallet, get_wallet}; use eyre::Result; +use itertools::Itertools; use sn_logging::LogBuilder; +use sn_networking::NetworkError; use sn_transfers::{ get_genesis_sk, rng, DerivationIndex, HotWallet, NanoTokens, OfflineTransfer, SpendReason, WalletError, GENESIS_CASHNOTE, @@ -332,6 +334,7 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() reason.clone(), )?; + info!("spend B to C: {:?}", transfer_to_c.all_spend_requests); client .send_spends(transfer_to_c.all_spend_requests.iter(), false) .await?; @@ -384,9 +387,18 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() wallet_b.address(), reason.clone(), )?; // reuse the old cash notes + + info!("spend B to Y: {:?}", transfer_to_y.all_spend_requests); client .send_spends(transfer_to_y.all_spend_requests.iter(), false) .await?; + let spend_b_to_y = transfer_to_y + .all_spend_requests + .first() + .expect("should have one"); + let b_spends = client.get_spend_from_network(spend_b_to_y.address()).await; + info!("B spends: {b_spends:?}"); + info!("Verifying the transfers from B -> Y wallet... It should error out."); let cash_notes_for_y: Vec<_> = transfer_to_y.cash_notes_for_recipient.clone(); let result = client.verify_cashnote(&cash_notes_for_y[0]).await; @@ -402,7 +414,7 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() assert!(str.starts_with("Network Error Double spend(s) was detected")); }); - info!("Verifying the original cashnote of B -> C"); + println!("Verifying the original cashnote of B -> C"); let result = client.verify_cashnote(&cash_notes_for_c[0]).await; info!("Got result while verifying the original spend from B -> C: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { @@ -411,3 +423,168 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() Ok(()) } + +#[tokio::test] +/// When A -> B -> C where C is the UTXO cashnote, double spending A many times over and over +/// should not lead to the original A disappearing and B becoming orphan +async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> { + let _log_guards = LogBuilder::init_single_threaded_tokio_test("double_spend", true); + let mut rng = rng::thread_rng(); + let reason = SpendReason::default(); + // create 1 wallet add money from faucet + let wallet_dir_a = TempDir::new()?; + + let (client, mut wallet_a) = get_client_and_funded_wallet(wallet_dir_a.path()).await?; + let balance_a = wallet_a.balance().as_nano(); + let amount = NanoTokens::from(balance_a / 2); + + // Send from A -> B + let wallet_dir_b = TempDir::new()?; + let mut wallet_b = get_wallet(wallet_dir_b.path()); + assert_eq!(wallet_b.balance(), NanoTokens::zero()); + + let (cash_notes_a, _exclusive_access) = wallet_a.available_cash_notes()?; + let to_b_unique_key = ( + amount, + wallet_b.address(), + DerivationIndex::random(&mut rng), + ); + let transfer_to_b = OfflineTransfer::new( + cash_notes_a.clone(), + vec![to_b_unique_key], + wallet_a.address(), + reason.clone(), + )?; + + info!("Sending A->B to the network..."); + client + .send_spends(transfer_to_b.all_spend_requests.iter(), false) + .await?; + + // save original A spend + let original_a_spend = if let [spend] = transfer_to_b.all_spend_requests.as_slice() { + spend + } else { + panic!("Expected to have one spend here!"); + }; + + info!("Verifying the transfers from A -> B wallet..."); + let cash_notes_for_b: Vec<_> = transfer_to_b.cash_notes_for_recipient.clone(); + client.verify_cashnote(&cash_notes_for_b[0]).await?; + wallet_b.deposit_and_store_to_disk(&cash_notes_for_b)?; // store inside B + + // Send from B -> C + let wallet_dir_c = TempDir::new()?; + let mut wallet_c = get_wallet(wallet_dir_c.path()); + assert_eq!(wallet_c.balance(), NanoTokens::zero()); + + let (cash_notes_b, _exclusive_access) = wallet_b.available_cash_notes()?; + assert!(!cash_notes_b.is_empty()); + let to_c_unique_key = ( + wallet_b.balance(), + wallet_c.address(), + DerivationIndex::random(&mut rng), + ); + let transfer_to_c = OfflineTransfer::new( + cash_notes_b.clone(), + vec![to_c_unique_key], + wallet_b.address(), + reason.clone(), + )?; + + client + .send_spends(transfer_to_c.all_spend_requests.iter(), false) + .await?; + + info!("Verifying the transfers from B -> C wallet..."); + let cash_notes_for_c: Vec<_> = transfer_to_c.cash_notes_for_recipient.clone(); + client.verify_cashnote(&cash_notes_for_c[0]).await?; + wallet_c.deposit_and_store_to_disk(&cash_notes_for_c.clone())?; // store inside c + + // Try to double spend from A -> X + let wallet_dir_x = TempDir::new()?; + let wallet_x = get_wallet(wallet_dir_x.path()); + assert_eq!(wallet_x.balance(), NanoTokens::zero()); + + let to_x_unique_key = ( + amount, + wallet_x.address(), + DerivationIndex::random(&mut rng), + ); + let transfer_to_x = OfflineTransfer::new( + cash_notes_a.clone(), + vec![to_x_unique_key], + wallet_a.address(), + reason.clone(), + )?; // reuse the old cash notes + client + .send_spends(transfer_to_x.all_spend_requests.iter(), false) + .await?; + info!("Verifying the transfers from A -> X wallet... It should error out."); + let cash_notes_for_x: Vec<_> = transfer_to_x.cash_notes_for_recipient.clone(); + let result = client.verify_cashnote(&cash_notes_for_x[0]).await; + info!("Got result while verifying double spend from A -> X: {result:?}"); + assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { + assert!(str.starts_with("Network Error Double spend(s) was detected")); + }); + + // the original A should still be present as one of the double spends + let res = client + .get_spend_from_network(original_a_spend.address()) + .await; + assert_matches!( + res, + Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( + _ + ))) + ); + if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { + assert!(spends.iter().contains(original_a_spend)) + } + + // Try to double spend A -> n different random keys + for _ in 0..20 { + println!("Spamming double spends on A"); + let wallet_dir_y = TempDir::new()?; + let wallet_y = get_wallet(wallet_dir_y.path()); + assert_eq!(wallet_y.balance(), NanoTokens::zero()); + + let to_y_unique_key = ( + amount, + wallet_y.address(), + DerivationIndex::random(&mut rng), + ); + let transfer_to_y = OfflineTransfer::new( + cash_notes_a.clone(), + vec![to_y_unique_key], + wallet_a.address(), + reason.clone(), + )?; // reuse the old cash notes + client + .send_spends(transfer_to_y.all_spend_requests.iter(), false) + .await?; + info!("Verifying the transfers from A -> Y wallet... It should error out."); + let cash_notes_for_y: Vec<_> = transfer_to_y.cash_notes_for_recipient.clone(); + let result = client.verify_cashnote(&cash_notes_for_y[0]).await; + info!("Got result while verifying double spend from A -> Y: {result:?}"); + assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { + assert!(str.starts_with("Network Error Double spend(s) was detected")); + }); + + // the original A should still be present as one of the double spends + let res = client + .get_spend_from_network(original_a_spend.address()) + .await; + assert_matches!( + res, + Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt( + _ + ))) + ); + if let Err(sn_client::Error::Network(NetworkError::DoubleSpendAttempt(spends))) = res { + assert!(spends.iter().contains(original_a_spend)) + } + } + + Ok(()) +} diff --git a/sn_node/tests/spend_simulation.rs b/sn_node/tests/spend_simulation.rs index 241114d1c8..4c0c4edf0b 100644 --- a/sn_node/tests/spend_simulation.rs +++ b/sn_node/tests/spend_simulation.rs @@ -15,7 +15,7 @@ use itertools::Itertools; use rand::{seq::IteratorRandom, Rng}; use sn_client::Client; use sn_logging::LogBuilder; -use sn_networking::NetworkError; +use sn_networking::{GetRecordError, NetworkError}; use sn_transfers::{ rng, CashNote, DerivationIndex, HotWallet, MainPubkey, NanoTokens, OfflineTransfer, SpendAddress, SpendReason, Transaction, UniquePubkey, @@ -30,23 +30,27 @@ use tokio::sync::mpsc; use tracing::*; const MAX_WALLETS: usize = 15; -const MAX_CYCLES: usize = 5; +const MAX_CYCLES: usize = 10; const AMOUNT_PER_RECIPIENT: NanoTokens = NanoTokens::from(1000); -/// The chance for an attack to happen. 1 in X chance. -const ONE_IN_X_CHANCE_FOR_AN_ATTACK: u32 = 2; +/// The chance for an double spend to happen. 1 in X chance. +const ONE_IN_X_CHANCE_FOR_AN_ATTACK: u32 = 3; enum WalletAction { Send { recipients: Vec<(NanoTokens, MainPubkey, DerivationIndex)>, }, DoubleSpend { - cashnotes: Vec, + input_cashnotes_to_double_spend: Vec, to: (NanoTokens, MainPubkey, DerivationIndex), }, ReceiveCashNotes { from: WalletId, cashnotes: Vec, }, + NotifyAboutInvalidCashNote { + from: WalletId, + cashnote: Vec, + }, } enum WalletTaskResult { @@ -67,13 +71,32 @@ enum WalletTaskResult { id: WalletId, received_cash_note: Vec, }, + NotifyAboutInvalidCashNoteSuccess { + id: WalletId, + }, } #[derive(Debug)] enum SpendStatus { Utxo, Spent, - Poisoned, + DoubleSpend, + UtxoWithParentDoubleSpend, +} + +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +enum TransactionStatus { + Valid, + /// All the inputs have been double spent. + DoubleSpentInputs, +} + +// Just for printing things +#[derive(Debug)] +enum AttackType { + Poison, + DoubleSpendAllUxtoOutputs, + DoubleSpendPartialUtxoOutputs, } #[derive(Debug, PartialEq, Eq, Clone, Copy, PartialOrd, Ord, Hash)] @@ -105,11 +128,14 @@ struct State { cashnotes_per_wallet: BTreeMap>, /// The map from WalletId to the outbound transactions that it has ever sent. outbound_transactions_per_wallet: BTreeMap>, + /// The status of each transaction + transaction_status: BTreeMap, } #[derive(Debug, Default)] struct PendingTasksTracker { pending_send_results: Vec, + pending_notify_invalid_cashnotes_results: Vec, pending_receive_results: Vec, } @@ -119,6 +145,7 @@ struct PendingTasksTracker { /// 1. A double spend of a transaction whose outputs are partially spent / partially UTXO /// 2. A double spend of a transcation whose outputs are all UTXO. /// 3. Poisoning of a transaction whose outputs are all spent. +/// Todo: Double spend just 1 input spend. Currently we double spend all the inputs. Have TransactionStatus::DoubleSpentInputs(vec) /// /// The test works by having a main loop that sends actions to all the wallets. These are then processed by the wallets /// in parallel. The wallets send back the results of the actions to the main loop, this is then tracked and the whole @@ -156,62 +183,93 @@ async fn spend_simulation() -> Result<()> { .iter() .map(|(id, s)| (*id, s.clone())) .collect_vec(); - for (id, action_sender) in iter { + for (our_id, action_sender) in iter { tokio::time::sleep(Duration::from_secs(3)).await; - let illicit_spend = rng.gen::() % ONE_IN_X_CHANCE_FOR_AN_ATTACK == 0; - - if illicit_spend { - let tx = get_tx_to_attack(id, &state)?; - if let Some(tx) = tx { - let mut input_cash_notes = Vec::new(); - for input in &tx.inputs { - let (status, cashnote) = state - .cashnote_tracker - .get_mut(&input.unique_pubkey) - .ok_or_eyre("Input spend not tracked")?; - *status = SpendStatus::Poisoned; - input_cash_notes.push(cashnote.clone()); + let try_performing_illicit_spend = + rng.gen::() % ONE_IN_X_CHANCE_FOR_AN_ATTACK == 0; + + let mut illicit_spend_done = false; + if try_performing_illicit_spend { + if let Some(( + input_cashnotes_to_double_spend, + output_cashnotes_that_are_unspendable, + amount, + attack_type, + )) = get_cashnotes_to_double_spend(our_id, &mut state)? + { + // tell wallets about the cashnotes that will become invalid after we perform the double spend. + if !output_cashnotes_that_are_unspendable.is_empty() { + info!("{our_id} is notifying wallets about invalid cashnotes: {output_cashnotes_that_are_unspendable:?}"); + for (i, sender) in state.action_senders.iter() { + sender + .send(WalletAction::NotifyAboutInvalidCashNote { + from: our_id, + cashnote: output_cashnotes_that_are_unspendable.clone(), + }) + .await?; + pending_task_results + .pending_notify_invalid_cashnotes_results + .push(*i); + } + // wait until all the wallets have received the notification. Else we'd try to spend those + // cashnotes while a double spend has just gone out. + while !pending_task_results + .pending_notify_invalid_cashnotes_results + .is_empty() + { + let result = result_rx + .recv() + .await + .ok_or_eyre("Senders will not be dropped")?; + + handle_wallet_task_result( + &mut state, + result, + &mut pending_task_results, + ) + .await?; + } } + info!( - "Wallet {id} is attempting to poison a old spend. Marking inputs {:?} as Poisoned", - input_cash_notes - .iter() - .map(|c| c.unique_pubkey()) - .collect_vec() + "{our_id} is now attempting a {attack_type:?} of {} cashnotes.", + input_cashnotes_to_double_spend.len() ); - //gotta make sure the amount adds up to the input, else not all cashnotes will be utilized - let mut input_total_amount = 0; - for cashnote in &input_cash_notes { - input_total_amount += cashnote.value()?.as_nano(); - } + println!( + "{our_id} is attempting a {attack_type:?} of {} cashnotes", + input_cashnotes_to_double_spend.len() + ); + action_sender .send(WalletAction::DoubleSpend { - cashnotes: input_cash_notes, + input_cashnotes_to_double_spend, to: ( - NanoTokens::from(input_total_amount), - state.main_pubkeys[&id], + amount, + state.main_pubkeys[&our_id], DerivationIndex::random(&mut rng), ), }) .await?; - pending_task_results.pending_send_results.push(id); - println!("Wallet {id} is attempting an attack"); - continue; + illicit_spend_done = true; } } - let recipients = get_recipients(id, &state); - let recipients_len = recipients.len(); - action_sender - .send(WalletAction::Send { - recipients: recipients - .into_iter() - .map(|key| (AMOUNT_PER_RECIPIENT, key, DerivationIndex::random(&mut rng))) - .collect_vec(), - }) - .await?; - pending_task_results.pending_send_results.push(id); - println!("Wallet {id} is sending tokens to {recipients_len:?} wallets",); + if !illicit_spend_done { + let recipients = get_recipients(our_id, &state); + let recipients_len = recipients.len(); + action_sender + .send(WalletAction::Send { + recipients: recipients + .into_iter() + .map(|key| { + (AMOUNT_PER_RECIPIENT, key, DerivationIndex::random(&mut rng)) + }) + .collect_vec(), + }) + .await?; + println!("{our_id} is sending tokens to {recipients_len:?} wallets"); + } + pending_task_results.pending_send_results.push(our_id); if let Ok(result) = result_rx.try_recv() { handle_wallet_task_result(&mut state, result, &mut pending_task_results).await?; } @@ -252,14 +310,22 @@ fn handle_action_per_wallet( ) { tokio::spawn(async move { let mut wallet = get_wallet(&wallet_dir); + let mut invalid_cashnotes = BTreeSet::new(); while let Some(action) = action_rx.recv().await { - let result = inner_handle_action(our_id, client.clone(), action, &mut wallet).await; + let result = inner_handle_action( + our_id, + client.clone(), + action, + &mut wallet, + &mut invalid_cashnotes, + ) + .await; match result { Ok(ok) => { result_sender.send(ok).await?; } Err(err) => { - error!("TestWallet {our_id} had error handling action : {err}"); + error!("{our_id} had error handling action : {err}"); result_sender .send(WalletTaskResult::Error { id: our_id, @@ -278,13 +344,18 @@ async fn inner_handle_action( client: Client, action: WalletAction, wallet: &mut HotWallet, + invalid_cashnotes: &mut BTreeSet, ) -> Result { match action { WalletAction::Send { recipients } => { - info!("TestWallet {our_id} sending to {recipients:?}"); + info!("{our_id} sending to {recipients:?}"); let (available_cash_notes, exclusive_access) = wallet.available_cash_notes()?; + let available_cash_notes = available_cash_notes + .into_iter() + .filter(|(note, _)| !invalid_cashnotes.contains(¬e.unique_pubkey())) + .collect_vec(); info!( - "TestWallet {our_id} Available CashNotes for local send: {:?}", + "{our_id} Available CashNotes for local send: {:?}", available_cash_notes .iter() .map(|(c, _)| c.unique_pubkey()) @@ -307,7 +378,7 @@ async fn inner_handle_action( .map(|c| c.parent_tx.clone()) .collect::>(); if transaction.len() != 1 { - bail!("TestWallet {our_id}: Transactions should have the same parent tx"); + bail!("{our_id}: Transactions should have the same parent tx"); } client @@ -315,7 +386,7 @@ async fn inner_handle_action( .await?; wallet.clear_confirmed_spend_requests(); if !wallet.unconfirmed_spend_requests().is_empty() { - bail!("TestWallet {our_id} has unconfirmed spend requests"); + bail!("{our_id} has unconfirmed spend requests"); } Ok(WalletTaskResult::SendSuccess { @@ -328,23 +399,31 @@ async fn inner_handle_action( .expect("Should've bailed earlier"), }) } - WalletAction::DoubleSpend { cashnotes, to } => { + // todo: we don't track the double spend tx. Track if needed. + WalletAction::DoubleSpend { + input_cashnotes_to_double_spend, + to, + } => { info!( - "TestWallet {our_id} double spending cash notes: {:?}", - cashnotes.iter().map(|c| c.unique_pubkey()).collect_vec() + "{our_id} double spending cash notes: {:?}", + input_cashnotes_to_double_spend + .iter() + .map(|c| c.unique_pubkey()) + .collect_vec() ); - let mut cashnotes_with_key = Vec::with_capacity(cashnotes.len()); - for cashnote in cashnotes { + let mut input_cashnotes_with_key = + Vec::with_capacity(input_cashnotes_to_double_spend.len()); + for cashnote in input_cashnotes_to_double_spend { let derived_key = cashnote.derived_key(wallet.key())?; - cashnotes_with_key.push((cashnote, Some(derived_key))); + input_cashnotes_with_key.push((cashnote, Some(derived_key))); } let transfer = OfflineTransfer::new( - cashnotes_with_key, + input_cashnotes_with_key, vec![to], wallet.address(), SpendReason::default(), )?; - info!("TestWallet {our_id} double spending transfer: {transfer:?}"); + info!("{our_id} double spending transfer: {transfer:?}"); client .send_spends(transfer.all_spend_requests.iter(), false) @@ -353,7 +432,7 @@ async fn inner_handle_action( Ok(WalletTaskResult::DoubleSpendSuccess { id: our_id }) } WalletAction::ReceiveCashNotes { from, cashnotes } => { - info!("TestWallet {our_id} receiving cash note from wallet {from}"); + info!("{our_id} receiving cash note from wallet {from}"); wallet.deposit_and_store_to_disk(&cashnotes)?; let our_cash_notes = cashnotes .into_iter() @@ -371,6 +450,14 @@ async fn inner_handle_action( received_cash_note: our_cash_notes, }) } + WalletAction::NotifyAboutInvalidCashNote { from, cashnote } => { + info!( + "{our_id} received notification from {from} about invalid cashnotes: {cashnote:?}. Tracking them" + ); + // we're just keeping track of all invalid cashnotes here, not just ours. filtering is a todo, not required for now. + invalid_cashnotes.extend(cashnote); + Ok(WalletTaskResult::NotifyAboutInvalidCashNoteSuccess { id: our_id }) + } } } @@ -381,7 +468,7 @@ async fn handle_wallet_task_result( ) -> Result<()> { match result { WalletTaskResult::DoubleSpendSuccess { id } => { - info!("TestWallet {id} received a successful double spend result"); + info!("{id} received a successful double spend result"); pending_task_tracker.send_task_completed(id); } WalletTaskResult::SendSuccess { @@ -390,7 +477,10 @@ async fn handle_wallet_task_result( change_cash_note, transaction, } => { - info!("TestWallet {id} received a successful send result. Tracking the outbound transaction {:?}", transaction.hash()); + info!( + "{id} received a successful send result. Tracking the outbound transaction {:?}. Also setting status to TransactionStatus::Valid", + transaction.hash() + ); pending_task_tracker.send_task_completed(id); match state.outbound_transactions_per_wallet.entry(id) { Entry::Vacant(entry) => { @@ -400,12 +490,12 @@ async fn handle_wallet_task_result( entry.into_mut().insert(transaction.clone()); } } + state + .transaction_status + .insert(transaction.clone(), TransactionStatus::Valid); // mark the input cashnotes as spent - info!( - "TestWallet {id} marking inputs {:?} as spent", - transaction.inputs - ); + info!("{id} marking inputs {:?} as spent", transaction.inputs); for input in &transaction.inputs { let (status, _cashnote) = state .cashnote_tracker @@ -417,7 +507,7 @@ async fn handle_wallet_task_result( // track the change cashnote that is stored by our wallet. if let Some(change) = change_cash_note { info!( - "TestWallet {id} tracking change cash note {} as UTXO", + "{id} tracking change cash note {} as UTXO", change.unique_pubkey() ); state @@ -429,11 +519,11 @@ async fn handle_wallet_task_result( .cashnote_tracker .insert(change.unique_pubkey(), (SpendStatus::Utxo, change)); if result.is_some() { - bail!("TestWallet {id} received a new cash note that was already tracked"); + bail!("{id} received a new cash note that was already tracked"); } } - info!("TestWallet {id}, sending the recipient cash notes to the other wallets"); + info!("{id}, sending the recipient cash notes to the other wallets"); // send the recipient cash notes to the wallets for cashnote in recipient_cash_notes { let recipient_id = state @@ -461,7 +551,7 @@ async fn handle_wallet_task_result( received_cash_note, } => { info!( - "TestWallet {id} received cashnotes successfully. Marking {:?} as UTXO", + "{id} received cashnotes successfully. Marking {:?} as UTXO", received_cash_note .iter() .map(|c| c.unique_pubkey()) @@ -474,21 +564,25 @@ async fn handle_wallet_task_result( .cashnote_tracker .insert(unique_pubkey, (SpendStatus::Utxo, cashnote)); if result.is_some() { - bail!("TestWallet {id} received a new cash note that was already tracked"); + bail!("{id} received a new cash note that was already tracked"); } match state.cashnotes_per_wallet.entry(id) { Entry::Vacant(_) => { - bail!("TestWallet {id} should not be empty, something went wrong.") + bail!("{id} should not be empty, something went wrong.") } Entry::Occupied(entry) => entry.into_mut().push(unique_pubkey), } } } + WalletTaskResult::NotifyAboutInvalidCashNoteSuccess { id } => { + info!("{id} received notification about invalid cashnotes successfully. Marking task as completed."); + pending_task_tracker.notify_invalid_cashnote_task_completed(id); + } WalletTaskResult::Error { id, err } => { - error!("TestWallet {id} had an error: {err}"); + error!("{id} had an error: {err}"); info!("state: {state:?}"); - bail!("TestWallet {id} had an error: {err}"); + bail!("{id} had an error: {err}"); } } Ok(()) @@ -497,28 +591,35 @@ async fn handle_wallet_task_result( async fn verify_wallets(state: &State, client: Client) -> Result<()> { for (id, spends) in state.cashnotes_per_wallet.iter() { println!("Verifying wallet {id}"); - info!("TestWallet {id} verifying {} spends", spends.len()); + info!("{id} verifying {} spends", spends.len()); let mut wallet = get_wallet(state.all_wallets.get(id).expect("Wallet not found")); let (available_cash_notes, _lock) = wallet.available_cash_notes()?; - for spend in spends { + for (num, spend) in spends.iter().enumerate() { let (status, _cashnote) = state .cashnote_tracker .get(spend) .ok_or_eyre("Something went wrong. Spend not tracked")?; - info!("TestWallet {id} verifying status of spend: {spend:?} : {status:?}"); + info!("{id} verifying status of spend number({num:?}): {spend:?} : {status:?}"); match status { SpendStatus::Utxo => { available_cash_notes .iter() .find(|(c, _)| &c.unique_pubkey() == spend) .ok_or_eyre("UTXO not found in wallet")?; - // todo: should not be present in the network. + let addr = SpendAddress::from_unique_pubkey(spend); + let result = client.peek_a_spend(addr).await; + assert_matches!( + result, + Err(sn_client::Error::Network(NetworkError::GetRecordError( + GetRecordError::RecordNotFound + ))) + ); } SpendStatus::Spent => { let addr = SpendAddress::from_unique_pubkey(spend); let _spend = client.get_spend_from_network(addr).await?; } - SpendStatus::Poisoned => { + SpendStatus::DoubleSpend => { let addr = SpendAddress::from_unique_pubkey(spend); let result = client.get_spend_from_network(addr).await; assert_matches!( @@ -530,7 +631,23 @@ async fn verify_wallets(state: &State, client: Client) -> Result<()> { // todo: for poison the outputs should still be valid + create a spend with this input and it should pass. // for double spend: try to create a spend with this input and it should fail. } + SpendStatus::UtxoWithParentDoubleSpend => { + // should not have been spent (we're tracking this internally in the test) + available_cash_notes + .iter() + .find(|(c, _)| &c.unique_pubkey() == spend) + .ok_or_eyre("UTXO not found in wallet")?; + let addr = SpendAddress::from_unique_pubkey(spend); + let result = client.peek_a_spend(addr).await; + assert_matches!( + result, + Err(sn_client::Error::Network(NetworkError::GetRecordError( + GetRecordError::RecordNotFound + ))) + ); + } } + info!("{id} successfully verified spend number({num:?}): {spend:?} : {status:?}"); } } println!("All wallets verified successfully"); @@ -548,6 +665,7 @@ async fn init_state(count: usize) -> Result<(Client, State)> { cashnote_tracker: BTreeMap::new(), cashnotes_per_wallet: BTreeMap::new(), outbound_transactions_per_wallet: BTreeMap::new(), + transaction_status: BTreeMap::new(), }; for i in 0..count { @@ -598,7 +716,7 @@ async fn init_state(count: usize) -> Result<(Client, State)> { let mut wallet = get_wallet(state.all_wallets.get(id).expect("Id should be present")); wallet.deposit_and_store_to_disk(&transfer.cash_notes_for_recipient)?; trace!( - "TestWallet {id} with main_pubkey: {address:?} has balance: {}", + "{id} with main_pubkey: {address:?} has balance: {}", wallet.balance() ); assert_eq!(wallet.balance(), amount); @@ -628,7 +746,7 @@ fn get_recipients(our_id: WalletId, state: &State) -> Vec { let mut recipients = Vec::new(); let mut random_number = our_id; - while random_number != our_id { + while random_number == our_id { random_number = WalletId(rand::thread_rng().gen_range(0..state.main_pubkeys.len())); } recipients.push(state.main_pubkeys[&random_number]); @@ -640,49 +758,178 @@ fn get_recipients(our_id: WalletId, state: &State) -> Vec { } } - info!("TestWallet {our_id} the recipients for send are: {recipients:?}"); + info!("{our_id} the recipients for send are: {recipients:?}"); recipients } -fn get_tx_to_attack(our_id: WalletId, state: &State) -> Result> { +/// Checks our state and tries to perform double spends in these order: +/// Poison old spend whose outputs are all spent. +/// Double spend a transaction whose outputs are partially spent / partially UTXO +/// Double spend a transaction whose outputs are all UTXO. +/// Returns the set of input cashnotes to double spend and the keys of the output cashnotes that will be unspendable +/// after the attack. +#[allow(clippy::type_complexity)] +fn get_cashnotes_to_double_spend( + our_id: WalletId, + state: &mut State, +) -> Result, Vec, NanoTokens, AttackType)>> { let mut rng = rand::thread_rng(); + let mut attack_type; + let mut cashnotes_to_double_spend; + + cashnotes_to_double_spend = get_random_transaction_to_poison(our_id, state, &mut rng)?; + attack_type = AttackType::Poison; + + if cashnotes_to_double_spend.is_none() { + cashnotes_to_double_spend = + get_random_transaction_with_partially_spent_output(our_id, state, &mut rng)?; + attack_type = AttackType::DoubleSpendPartialUtxoOutputs; + } + if cashnotes_to_double_spend.is_none() { + cashnotes_to_double_spend = + get_random_transaction_with_all_unspent_output(our_id, state, &mut rng)?; + attack_type = AttackType::DoubleSpendAllUxtoOutputs; + } + + if let Some((cashnotes_to_double_spend, output_cash_notes_that_are_unspendable)) = + cashnotes_to_double_spend + { + //gotta make sure the amount adds up to the input, else not all cashnotes will be utilized + let mut input_total_amount = 0; + for cashnote in &cashnotes_to_double_spend { + input_total_amount += cashnote.value()?.as_nano(); + } + return Ok(Some(( + cashnotes_to_double_spend, + output_cash_notes_that_are_unspendable, + NanoTokens::from(input_total_amount), + attack_type, + ))); + } + + Ok(None) +} + +/// Returns the input cashnotes of a random transaction whose: outputs are all spent. +/// This also modified the status of the cashnote. +fn get_random_transaction_to_poison( + our_id: WalletId, + state: &mut State, + rng: &mut rand::rngs::ThreadRng, +) -> Result, Vec)>> { let Some(our_transactions) = state.outbound_transactions_per_wallet.get(&our_id) else { - info!("TestWallet {our_id} has no outbound transactions yet. Skipping attack"); + info!("{our_id} has no outbound transactions yet. Skipping double spend"); return Ok(None); }; if our_transactions.is_empty() { - info!("TestWallet {our_id} has no outbound transactions yet. Skipping attack"); + info!("{our_id} has no outbound transactions yet. Skipping double spend"); return Ok(None); } - let poisonable_tx = find_all_poisonable_spends(our_transactions, state)?; + // A spend / transaction is poisonable if all of its outputs are already spent. + let mut poisonable_tx = Vec::new(); + for tx in our_transactions { + let tx_status = state + .transaction_status + .get(tx) + .ok_or_eyre("The tx should be present")?; + // This tx has already been attacked. Skip. + if tx_status == &TransactionStatus::DoubleSpentInputs { + continue; + } + let mut utxo_found = false; + for output in &tx.outputs { + let (status, _) = state + .cashnote_tracker + .get(output.unique_pubkey()) + .ok_or_eyre(format!( + "Output {} not found in cashnote tracker", + output.unique_pubkey() + ))?; + + if let SpendStatus::Utxo = *status { + utxo_found = true; + break; + } + } + if !utxo_found { + poisonable_tx.push(tx); + } + } if !poisonable_tx.is_empty() { let random_tx = poisonable_tx .into_iter() - .choose(&mut rng) + .choose(rng) .ok_or_eyre("Cannot choose a random tx")?; + // update the tx status + *state + .transaction_status + .get_mut(random_tx) + .ok_or_eyre("The tx should be present")? = TransactionStatus::DoubleSpentInputs; info!( - "TestWallet {our_id}. Poisoning transaction {:?}", - random_tx.hash() + "{our_id} is attempting to double spend a transaction {:?} whose outputs all ALL spent. Setting tx status to TransactionStatus::DoubleSpentInputs", random_tx.hash() + ); + info!( + "{our_id} is marking inputs {:?} as DoubleSpend", + random_tx + .inputs + .iter() + .map(|i| i.unique_pubkey()) + .collect_vec() ); - return Ok(Some(random_tx.clone())); + let mut cashnotes_to_double_spend = Vec::new(); + for input in &random_tx.inputs { + let (status, cashnote) = state + .cashnote_tracker + .get_mut(&input.unique_pubkey) + .ok_or_eyre("Input spend not tracked")?; + *status = SpendStatus::DoubleSpend; + cashnotes_to_double_spend.push(cashnote.clone()); + } + + return Ok(Some((cashnotes_to_double_spend, vec![]))); } Ok(None) } -/// A spend / transaction is poisonable if all of its outputs are already spent. -fn find_all_poisonable_spends<'a>( - our_transactions: &'a BTreeSet, - state: &State, -) -> Result> { - let mut poisonable_tx = Vec::new(); +/// Returns the input cashnotes of a random transaction whose: outputs are partially spent / partially UTXO. +/// Also returns the uniquepub key of output UTXOs that will be unspendable after the attack. This info is sent to +/// each wallet, so that they don't try to spend these outputs. +/// This also modified the status of the cashnote. +fn get_random_transaction_with_partially_spent_output( + our_id: WalletId, + state: &mut State, + rng: &mut rand::rngs::ThreadRng, +) -> Result, Vec)>> { + let Some(our_transactions) = state.outbound_transactions_per_wallet.get(&our_id) else { + info!("{our_id} has no outbound transactions yet. Skipping double spend"); + return Ok(None); + }; + + if our_transactions.is_empty() { + info!("{our_id} has no outbound transactions yet. Skipping double spend"); + return Ok(None); + } + + // The list of transactions that have outputs that are partially spent / partially UTXO. + let mut double_spendable_tx = Vec::new(); for tx in our_transactions { + let tx_status = state + .transaction_status + .get(tx) + .ok_or_eyre("The tx should be present")?; + // This tx has already been attacked. Skip. + if tx_status == &TransactionStatus::DoubleSpentInputs { + continue; + } let mut utxo_found = false; + let mut spent_output_found = false; + let mut change_cashnote_found = false; for output in &tx.outputs { - let (status, _) = state + let (status, cashnote) = state .cashnote_tracker .get(output.unique_pubkey()) .ok_or_eyre(format!( @@ -690,21 +937,206 @@ fn find_all_poisonable_spends<'a>( output.unique_pubkey() ))?; + match status { + SpendStatus::Utxo => { + // skip if the cashnote is the change. The test can't progress if we make the change unspendable. + if cashnote.value()? > NanoTokens::from(AMOUNT_PER_RECIPIENT.as_nano()*10) { + change_cashnote_found = true; + break; + } + utxo_found = true; + }, + SpendStatus::UtxoWithParentDoubleSpend => bail!("UtxoWithParentDoubleSpend should not be present here. We skip txs that has been attacked"), + SpendStatus::Spent + // DoubleSpend can be present. TransactionStatus::DoubleSpentInputs means that inputs are double spent, we skip those. + // So the output with DoubleSpend will be present here. + | SpendStatus::DoubleSpend => spent_output_found = true, + + } + } + if change_cashnote_found { + continue; + } else if utxo_found && spent_output_found { + double_spendable_tx.push(tx); + } + } + + if !double_spendable_tx.is_empty() { + let random_tx = double_spendable_tx + .into_iter() + .choose(rng) + .ok_or_eyre("Cannot choose a random tx")?; + // update the tx status + *state + .transaction_status + .get_mut(random_tx) + .ok_or_eyre("The tx should be present")? = TransactionStatus::DoubleSpentInputs; + + info!("{our_id} is attempting to double spend a transaction {:?} whose outputs are partially spent. Setting tx status to TransactionStatus::DoubleSpentInputs", random_tx.hash()); + info!( + "{our_id} is marking inputs {:?} as DoubleSpend", + random_tx + .inputs + .iter() + .map(|i| i.unique_pubkey()) + .collect_vec() + ); + + let mut cashnotes_to_double_spend = Vec::new(); + for input in &random_tx.inputs { + let (status, cashnote) = state + .cashnote_tracker + .get_mut(&input.unique_pubkey) + .ok_or_eyre("Input spend not tracked")?; + *status = SpendStatus::DoubleSpend; + cashnotes_to_double_spend.push(cashnote.clone()); + } + + let mut marked_output_as_cashnotes_unspendable_utxo = Vec::new(); + for output in &random_tx.outputs { + let (status, cashnote) = state + .cashnote_tracker + .get_mut(output.unique_pubkey()) + .ok_or_eyre("Output spend not tracked")?; if let SpendStatus::Utxo = *status { - utxo_found = true; - break; + *status = SpendStatus::UtxoWithParentDoubleSpend; + marked_output_as_cashnotes_unspendable_utxo.push(cashnote.unique_pubkey); } } - if !utxo_found { - poisonable_tx.push(tx); + info!( + "{our_id} is marking some outputs {:?} as UtxoWithParentDoubleSpend", + marked_output_as_cashnotes_unspendable_utxo + ); + + return Ok(Some(( + cashnotes_to_double_spend, + marked_output_as_cashnotes_unspendable_utxo, + ))); + } + + Ok(None) +} + +/// Returns the input cashnotes of a random transaction whose: outputs are all UTXO. +/// Also returns the uniquepub key of output UTXOs that will be unspendable after the attack. This info is sent to +/// each wallet, so that they don't try to spend these outputs. +/// This also modified the status of the cashnote. +fn get_random_transaction_with_all_unspent_output( + our_id: WalletId, + state: &mut State, + rng: &mut rand::rngs::ThreadRng, +) -> Result, Vec)>> { + let Some(our_transactions) = state.outbound_transactions_per_wallet.get(&our_id) else { + info!("{our_id} has no outbound transactions yet. Skipping double spend"); + return Ok(None); + }; + + if our_transactions.is_empty() { + info!("{our_id} has no outbound transactions yet. Skipping double spend"); + return Ok(None); + } + + let mut double_spendable_tx = Vec::new(); + for tx in our_transactions { + let tx_status = state + .transaction_status + .get(tx) + .ok_or_eyre("The tx should be present")?; + if tx_status == &TransactionStatus::DoubleSpentInputs { + continue; + } + let mut all_utxos = true; + let mut change_cashnote_found = false; + for output in &tx.outputs { + let (status, cashnote) = state + .cashnote_tracker + .get(output.unique_pubkey()) + .ok_or_eyre(format!( + "Output {} not found in cashnote tracker", + output.unique_pubkey() + ))?; + + match status { + SpendStatus::Utxo => { + // skip if the cashnote is the change. The test can't progress if we make the change unspendable. + if cashnote.value()? > NanoTokens::from(AMOUNT_PER_RECIPIENT.as_nano()*10) { + change_cashnote_found = true; + break; + } + } + SpendStatus::UtxoWithParentDoubleSpend => bail!("UtxoWithParentDoubleSpend should not be present here. We skip txs that has been attacked"), + _ => { + all_utxos = false; + break; + } + } + } + if change_cashnote_found { + continue; + } else if all_utxos { + double_spendable_tx.push(tx); + } + } + + if !double_spendable_tx.is_empty() { + let random_tx = double_spendable_tx + .into_iter() + .choose(rng) + .ok_or_eyre("Cannot choose a random tx")?; + // update the tx status + *state + .transaction_status + .get_mut(random_tx) + .ok_or_eyre("The tx should be present")? = TransactionStatus::DoubleSpentInputs; + + info!("{our_id} is attempting to double spend a transaction {:?} whose outputs are all UTXO. Setting tx status to TransactionStatus::DoubleSpentInputs", random_tx.hash()); + info!( + "{our_id} is marking inputs {:?} as DoubleSpend", + random_tx + .inputs + .iter() + .map(|i| i.unique_pubkey()) + .collect_vec() + ); + + let mut cashnotes_to_double_spend = Vec::new(); + for input in &random_tx.inputs { + let (status, cashnote) = state + .cashnote_tracker + .get_mut(&input.unique_pubkey) + .ok_or_eyre("Input spend not tracked")?; + *status = SpendStatus::DoubleSpend; + cashnotes_to_double_spend.push(cashnote.clone()); } + + let mut marked_output_cashnotes_as_unspendable_utxo = Vec::new(); + for output in &random_tx.outputs { + let (status, cashnote) = state + .cashnote_tracker + .get_mut(output.unique_pubkey()) + .ok_or_eyre("Output spend not tracked")?; + *status = SpendStatus::UtxoWithParentDoubleSpend; + marked_output_cashnotes_as_unspendable_utxo.push(cashnote.unique_pubkey); + } + info!( + "{our_id} is marking all outputs {:?} as UtxoWithParentDoubleSpend", + marked_output_cashnotes_as_unspendable_utxo + ); + + return Ok(Some(( + cashnotes_to_double_spend, + marked_output_cashnotes_as_unspendable_utxo, + ))); } - Ok(poisonable_tx) + + Ok(None) } impl PendingTasksTracker { fn is_empty(&self) -> bool { - self.pending_send_results.is_empty() && self.pending_receive_results.is_empty() + self.pending_send_results.is_empty() + && self.pending_receive_results.is_empty() + && self.pending_notify_invalid_cashnotes_results.is_empty() } fn send_task_completed(&mut self, id: WalletId) { @@ -724,4 +1156,13 @@ impl PendingTasksTracker { .unwrap_or_else(|| panic!("Receive task for {id} was not found ")); self.pending_receive_results.remove(pos); } + + fn notify_invalid_cashnote_task_completed(&mut self, id: WalletId) { + let pos = self + .pending_notify_invalid_cashnotes_results + .iter() + .position(|x| *x == id) + .unwrap_or_else(|| panic!("Notify invalid cashnote task for {id} was not found ")); + self.pending_notify_invalid_cashnotes_results.remove(pos); + } } diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index 8159f0b502..b5635edc6b 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.9.7" +version = "0.10.0" [[bin]] name = "safenode-manager" @@ -44,12 +44,12 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.29" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.3.5" } -sn_protocol = { path = "../sn_protocol", version = "0.17.4" } -sn_service_management = { path = "../sn_service_management", version = "0.3.7" } +sn_logging = { path = "../sn_logging", version = "0.2.30" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } +sn_protocol = { path = "../sn_protocol", version = "0.17.5" } +sn_service_management = { path = "../sn_service_management", version = "0.3.8" } sn-releases = "0.2.6" -sn_transfers = { path = "../sn_transfers", version = "0.18.7" } +sn_transfers = { path = "../sn_transfers", version = "0.18.8" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index 5ab5bcfa08..5f59246281 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.24" +version = "0.6.25" [[bin]] name = "safenode_rpc_client" @@ -23,13 +23,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version="0.53", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_client = { path = "../sn_client", version = "0.107.9" } -sn_logging = { path = "../sn_logging", version = "0.2.29" } -sn_node = { path = "../sn_node", version = "0.108.4" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.3.5" } -sn_protocol = { path = "../sn_protocol", version = "0.17.4", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.3.7" } -sn_transfers = { path = "../sn_transfers", version = "0.18.7" } +sn_client = { path = "../sn_client", version = "0.108.0" } +sn_logging = { path = "../sn_logging", version = "0.2.30" } +sn_node = { path = "../sn_node", version = "0.109.0" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } +sn_protocol = { path = "../sn_protocol", version = "0.17.5", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.3.8" } +sn_transfers = { path = "../sn_transfers", version = "0.18.8" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index c86cc6ad13..8f4d7180dd 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.5" +version = "0.4.0" [features] local-discovery = [] @@ -21,7 +21,7 @@ lazy_static = "~1.4.0" libp2p = { version="0.53", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_networking = { path = "../sn_networking", version = "0.16.6", optional = true} +sn_networking = { path = "../sn_networking", version = "0.17.0", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false} tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 36eda75be4..8a7ebe6ea5 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.4" +version = "0.17.5" [features] default = [] @@ -27,8 +27,8 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_transfers = { path = "../sn_transfers", version = "0.18.7" } -sn_registers = { path = "../sn_registers", version = "0.3.14" } +sn_transfers = { path = "../sn_transfers", version = "0.18.8" } +sn_registers = { path = "../sn_registers", version = "0.3.15" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index 3adf8e6e1f..d5d37195db 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.14" +version = "0.3.15" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index 4616295579..63b07ff84b 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.7" +version = "0.3.8" [dependencies] async-trait = "0.1" @@ -19,11 +19,11 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.29" } -sn_protocol = { path = "../sn_protocol", version = "0.17.4", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.30" } +sn_protocol = { path = "../sn_protocol", version = "0.17.5", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.18.7" } +sn_transfers = { path = "../sn_transfers", version = "0.18.8" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index f96d8f425d..bdd83aac7d 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.18.7" +version = "0.18.8" [features] reward-forward = [] diff --git a/sn_transfers/src/cashnotes/signed_spend.rs b/sn_transfers/src/cashnotes/signed_spend.rs index 6a43e297e3..89d94110e3 100644 --- a/sn_transfers/src/cashnotes/signed_spend.rs +++ b/sn_transfers/src/cashnotes/signed_spend.rs @@ -235,27 +235,28 @@ impl std::hash::Hash for SignedSpend { } /// Represents the data to be signed by the DerivedSecretKey of the CashNote being spent. -#[derive(custom_debug::Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Spend { /// UniquePubkey of input CashNote that this SignedSpend is proving to be spent. pub unique_pubkey: UniquePubkey, /// The transaction that the input CashNote is being spent in (where it is an input) - #[debug(skip)] pub spent_tx: Transaction, /// Reason why this CashNote was spent. - #[debug(skip)] pub reason: SpendReason, /// The amount of the input CashNote. - #[debug(skip)] pub amount: NanoTokens, /// The transaction that the input CashNote was created in (where it is an output) - #[debug(skip)] pub parent_tx: Transaction, /// Data to claim the Network Royalties (if any) from the Spend's descendants (outputs in spent_tx) - #[debug(skip)] pub network_royalties: Vec, } +impl core::fmt::Debug for Spend { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Spend({:?}({:?}))", self.unique_pubkey, self.hash()) + } +} + impl Spend { /// Represent this Spend as bytes. /// There is no from_bytes, because this function is not symetric as it uses hashes diff --git a/sn_transfers/src/error.rs b/sn_transfers/src/error.rs index 096f4c915b..b1b8fad5d8 100644 --- a/sn_transfers/src/error.rs +++ b/sn_transfers/src/error.rs @@ -31,6 +31,8 @@ pub enum TransferError { InvalidSpentTx(String), #[error("Invalid parent spend: {0}")] InvalidParentSpend(String), + #[error("Parent spend was double spent")] + DoubleSpentParent, #[error("Invalid Spend Signature for {0:?}")] InvalidSpendSignature(UniquePubkey), #[error("Transaction hash is different from the hash in the the Spend: {0:?} != {1:?}")]