From 1e52e1bb44e8a24b0e90b9c6e426bfd96cb6d7b8 Mon Sep 17 00:00:00 2001 From: Millie C Date: Tue, 7 Mar 2023 20:28:15 -0500 Subject: [PATCH 01/46] Track failed tx proposals in consensus --- .../service/src/api/client_api_service.rs | 44 +++++++++++++++++-- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 0026b40ffa..9d12600ed4 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -27,7 +27,7 @@ use mc_ledger_db::Ledger; use mc_peers::ConsensusValue; use mc_transaction_core::mint::{MintConfigTx, MintTx}; use mc_util_grpc::{check_request_chain_id, rpc_logger, send_result, Authenticator}; -use std::sync::{Arc, Mutex}; +use std::{sync::{Arc, Mutex}, collections::VecDeque, time::{Instant, Duration}}; /// Maximum number of pending values for consensus service before rejecting /// add_transaction requests. @@ -35,7 +35,45 @@ const PENDING_LIMIT: i64 = 500; /// Data retained on a session with a client. #[derive(Clone, Debug)] -pub struct ClientSessionTracking {/* stub - todo: track failed TX proposals */} +pub struct ClientSessionTracking { + // This needs to be a VecDeque because popping elements oldest-first (i.e. + // in a FIFO fashion) needs to be efficient, since we'll be culling the + // oldest tx failure timestamps constantly - if the server is configured to + // drop clients with over 100 failed tx proposals in the last 30 seconds, + // we don't care about timestamps from over 30 seconds ago, for example. + tx_proposal_failures: VecDeque, +} + +impl ClientSessionTracking { + pub fn new() -> Self { + Self { + tx_proposal_failures: VecDeque::default(), + } + } + + /// Push a new failed tx proposal record, clear out samples older than + /// our tracking window, and return the number of tx failures remaining + /// on the list - as-in, tells you "there have been x number of failures + /// within the past `tracking_window` seconds". + /// + /// # Arguments + /// + /// * `now` - Used as both the instant to record as a new tx proposal + /// failure, and as the "present" time to check for stale records. + /// * `tracking_window` - How long of a period of time should we keep track + /// of an individual tx proposal failure incident for? Any records which + /// have existed for longer than this value will be dropped when this + /// method is called. + pub fn fail_tx_proposal(&mut self, + now: &Instant, + tracking_window: &Duration) -> usize { + self.tx_proposal_failures.retain(|past_failure| { + now.saturating_duration_since(*past_failure) <= *tracking_window + }); + self.tx_proposal_failures.push_back(now.clone()); + self.tx_proposal_failures.len() + } +} #[derive(Clone)] pub struct ClientApiService { @@ -225,7 +263,7 @@ impl ConsensusClientApi for ClientApiService { // TODO: Update fields } else { // TODO: Populate new session metadata - tracker.put(session, ClientSessionTracking {}); + tracker.put(session, ClientSessionTracking::new()); } } if let Err(err) = check_request_chain_id(&self.config.chain_id, &ctx) { From 9e7465b78a839f64201cbb7dd39325ee75df1cfd Mon Sep 17 00:00:00 2001 From: Millie C Date: Wed, 8 Mar 2023 21:28:18 -0500 Subject: [PATCH 02/46] Push an Instant::now() for every time validation fails on a tx proposal --- .../service/src/api/client_api_service.rs | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 9d12600ed4..186b255c55 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -130,12 +130,26 @@ impl ClientApiService { msg: Message, ) -> Result { counters::ADD_TX_INITIATED.inc(); + let session_id = ClientSession::from(msg.channel_id.clone()); let tx_context = self.enclave.client_tx_propose(msg.into())?; // Cache the transaction. This performs the well-formedness checks. let tx_hash = self.tx_manager.insert(tx_context).map_err(|err| { if let TxManagerError::TransactionValidation(cause) = &err { counters::TX_VALIDATION_ERROR_COUNTER.inc(&format!("{cause:?}")); + + let tracking_window = Duration::from_secs(5); // TODO, placeholder before draft PR gets published. + let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); + let record = if let Some(record) = tracker.get_mut(&session_id) { + record + } else { + tracker.put(session_id.clone(), ClientSessionTracking::new()); + tracker.get_mut(&session_id) + .expect("Adding session-tracking record should be atomic.") + + }; + let _recent_failure_count = record.fail_tx_proposal(&Instant::now(), &tracking_window); + // TODO: drop session when recent_failure_count reaches some number } err })?; @@ -256,16 +270,6 @@ impl ConsensusClientApi for ClientApiService { ) { let _timer = SVC_COUNTERS.req(&ctx); - { - let session = ClientSession::from(msg.channel_id.clone()); - let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); - if let Some(_session_info) = tracker.get(&session) { - // TODO: Update fields - } else { - // TODO: Populate new session metadata - tracker.put(session, ClientSessionTracking::new()); - } - } if let Err(err) = check_request_chain_id(&self.config.chain_id, &ctx) { return send_result(ctx, sink, Err(err), &self.logger); } From 94f99d9982123b4cd01d2e2a4ab74bbd76688293 Mon Sep 17 00:00:00 2001 From: Millie C Date: Mon, 13 Mar 2023 22:06:34 -0400 Subject: [PATCH 03/46] Fix unnecessary .clone() --- consensus/service/src/api/client_api_service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 186b255c55..0b8cea8298 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -70,7 +70,7 @@ impl ClientSessionTracking { self.tx_proposal_failures.retain(|past_failure| { now.saturating_duration_since(*past_failure) <= *tracking_window }); - self.tx_proposal_failures.push_back(now.clone()); + self.tx_proposal_failures.push_back(*now); self.tx_proposal_failures.len() } } From 8b2b437789be6553b4b1a178292f1b1de45c3e7f Mon Sep 17 00:00:00 2001 From: Millie C Date: Mon, 13 Mar 2023 22:35:31 -0400 Subject: [PATCH 04/46] Cargo fmt --- .../service/src/api/client_api_service.rs | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 0b8cea8298..30d14a28ad 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -27,7 +27,11 @@ use mc_ledger_db::Ledger; use mc_peers::ConsensusValue; use mc_transaction_core::mint::{MintConfigTx, MintTx}; use mc_util_grpc::{check_request_chain_id, rpc_logger, send_result, Authenticator}; -use std::{sync::{Arc, Mutex}, collections::VecDeque, time::{Instant, Duration}}; +use std::{ + collections::VecDeque, + sync::{Arc, Mutex}, + time::{Duration, Instant}, +}; /// Maximum number of pending values for consensus service before rejecting /// add_transaction requests. @@ -41,7 +45,7 @@ pub struct ClientSessionTracking { // oldest tx failure timestamps constantly - if the server is configured to // drop clients with over 100 failed tx proposals in the last 30 seconds, // we don't care about timestamps from over 30 seconds ago, for example. - tx_proposal_failures: VecDeque, + tx_proposal_failures: VecDeque, } impl ClientSessionTracking { @@ -54,8 +58,8 @@ impl ClientSessionTracking { /// Push a new failed tx proposal record, clear out samples older than /// our tracking window, and return the number of tx failures remaining /// on the list - as-in, tells you "there have been x number of failures - /// within the past `tracking_window` seconds". - /// + /// within the past `tracking_window` seconds". + /// /// # Arguments /// /// * `now` - Used as both the instant to record as a new tx proposal @@ -64,9 +68,7 @@ impl ClientSessionTracking { /// of an individual tx proposal failure incident for? Any records which /// have existed for longer than this value will be dropped when this /// method is called. - pub fn fail_tx_proposal(&mut self, - now: &Instant, - tracking_window: &Duration) -> usize { + pub fn fail_tx_proposal(&mut self, now: &Instant, tracking_window: &Duration) -> usize { self.tx_proposal_failures.retain(|past_failure| { now.saturating_duration_since(*past_failure) <= *tracking_window }); @@ -138,18 +140,20 @@ impl ClientApiService { if let TxManagerError::TransactionValidation(cause) = &err { counters::TX_VALIDATION_ERROR_COUNTER.inc(&format!("{cause:?}")); - let tracking_window = Duration::from_secs(5); // TODO, placeholder before draft PR gets published. + let tracking_window = Duration::from_secs(5); // TODO, placeholder before draft PR gets published. let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); let record = if let Some(record) = tracker.get_mut(&session_id) { record } else { tracker.put(session_id.clone(), ClientSessionTracking::new()); - tracker.get_mut(&session_id) + tracker + .get_mut(&session_id) .expect("Adding session-tracking record should be atomic.") - }; - let _recent_failure_count = record.fail_tx_proposal(&Instant::now(), &tracking_window); - // TODO: drop session when recent_failure_count reaches some number + let _recent_failure_count = + record.fail_tx_proposal(&Instant::now(), &tracking_window); + // TODO: drop session when recent_failure_count reaches some + // number } err })?; From d818079a32a96af36c18ad27aa894fe31bfa6e73 Mon Sep 17 00:00:00 2001 From: Millie C Date: Mon, 27 Mar 2023 17:26:50 -0400 Subject: [PATCH 05/46] Return to tracking clients as soon as they submit a proposeTX, per James' suggestion --- consensus/service/src/api/client_api_service.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 30d14a28ad..c7403cb61a 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -274,6 +274,16 @@ impl ConsensusClientApi for ClientApiService { ) { let _timer = SVC_COUNTERS.req(&ctx); + { + let session = ClientSession::from(msg.channel_id.clone()); + let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); + // Calling get() on the LRU bumps the entry to show up as more + // recently-used. + if tracker.get(&session).is_none() { + tracker.put(session, ClientSessionTracking::new()); + } + } + if let Err(err) = check_request_chain_id(&self.config.chain_id, &ctx) { return send_result(ctx, sink, Err(err), &self.logger); } From cf0b0953491bc91aa21649e3c97035c479195c48 Mon Sep 17 00:00:00 2001 From: Millie C Date: Mon, 27 Mar 2023 17:31:16 -0400 Subject: [PATCH 06/46] Cargo fmt --- consensus/service/src/api/client_api_service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index c7403cb61a..df3d0b67db 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -277,9 +277,9 @@ impl ConsensusClientApi for ClientApiService { { let session = ClientSession::from(msg.channel_id.clone()); let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); - // Calling get() on the LRU bumps the entry to show up as more + // Calling get() on the LRU bumps the entry to show up as more // recently-used. - if tracker.get(&session).is_none() { + if tracker.get(&session).is_none() { tracker.put(session, ClientSessionTracking::new()); } } From 9016e8e2fa771a41336a36fc25ad34ddee5fac44 Mon Sep 17 00:00:00 2001 From: Millie C Date: Tue, 28 Mar 2023 18:42:01 -0400 Subject: [PATCH 07/46] Failure limit on tx proposals --- consensus/api/proto/consensus_config.proto | 20 ++++++++++++++++ consensus/service/config/src/lib.rs | 23 +++++++++++++++++++ .../service/src/api/client_api_service.rs | 22 +++++++++++++----- 3 files changed, 59 insertions(+), 6 deletions(-) diff --git a/consensus/api/proto/consensus_config.proto b/consensus/api/proto/consensus_config.proto index 42a7667078..e8b7e886e5 100644 --- a/consensus/api/proto/consensus_config.proto +++ b/consensus/api/proto/consensus_config.proto @@ -69,4 +69,24 @@ message ConsensusNodeConfig { // SCP message signing key. external.Ed25519Public scp_message_signing_key = 8; + + // Maximum number of client session tracking structures to retain in + // a least-recently-used cache. + // + // This corresponds to Config::client_tracking_capacity + uint64 client_tracking_capacity = 9; + + // How many seconds to retain instances of proposed-transaction + // failures, per-client. This is used to implement DOS-protection, + // protecting against clients who make too many failed + // transaction proposals within this span. + uint64 tx_failure_window_seconds = 10; + + // How many tx proposal failures within the rolling window are required + // before it's treated as concerning, thereby tripping denial-of-service + // protection? + // In other words, how many failed transaction proposals are permitted + // within the last tx_failure_window_seconds before a user is + // disconnected or temporarily blocked? + uint32 tx_failure_limit = 11; } diff --git a/consensus/service/config/src/lib.rs b/consensus/service/config/src/lib.rs index e01c53598d..e5dcbeacf1 100644 --- a/consensus/service/config/src/lib.rs +++ b/consensus/service/config/src/lib.rs @@ -136,6 +136,24 @@ pub struct Config { /// config setting to match. #[clap(long, default_value = "10000", env = "MC_CLIENT_TRACKING_CAPACITY")] pub client_tracking_capacity: usize, + + /// How many seconds to retain instances of proposed-transaction + /// failures, per-client. This is used to implement DOS-protection, + /// along the lines of kicking clients who make too many failed transaction + /// proposals within the span of tx_failure_window + // TODO: slam-testing to derive reasonable default + #[clap(long, default_value = "30", value_parser = parse_duration_in_seconds, env = "MC_CLIENT_TX_FAILURE_WINDOW")] + pub tx_failure_window: Duration, + + /// How many tx proposal failures within the rolling window are required + /// before it's treated as concerning, thereby tripping denial-of-service + /// protection? + /// In other words, how many failed transaction proposals are permitted + /// within the last tx_failure_window seconds before a user is + /// disconnected or temporarily blocked? + // TODO: slam-testing to derive reasonable default + #[clap(long, default_value = "16384", env = "MC_CLIENT_TX_FAILURE_LIMIT")] + pub tx_failure_limit: u32, } impl Config { @@ -224,6 +242,9 @@ mod tests { tokens_path: None, block_version: BlockVersion::ZERO, client_tracking_capacity: 4096, + tx_failure_window: Duration::from_secs(30), + tx_failure_limit: 16384, + }; assert_eq!( @@ -293,6 +314,8 @@ mod tests { tokens_path: None, block_version: BlockVersion::ZERO, client_tracking_capacity: 4096, + tx_failure_window: Duration::from_secs(30), + tx_failure_limit: 16384, }; assert_eq!( diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index df3d0b67db..c2af143958 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -13,7 +13,7 @@ use crate::{ use grpcio::{RpcContext, RpcStatus, UnarySink}; use mc_attest_api::attest::Message; use mc_attest_enclave_api::ClientSession; -use mc_common::{logger::Logger, LruCache}; +use mc_common::{logger::{Logger, log}, LruCache}; use mc_consensus_api::{ consensus_client::{ProposeMintConfigTxResponse, ProposeMintTxResponse}, consensus_client_grpc::ConsensusClientApi, @@ -140,7 +140,6 @@ impl ClientApiService { if let TxManagerError::TransactionValidation(cause) = &err { counters::TX_VALIDATION_ERROR_COUNTER.inc(&format!("{cause:?}")); - let tracking_window = Duration::from_secs(5); // TODO, placeholder before draft PR gets published. let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); let record = if let Some(record) = tracker.get_mut(&session_id) { record @@ -150,10 +149,17 @@ impl ClientApiService { .get_mut(&session_id) .expect("Adding session-tracking record should be atomic.") }; - let _recent_failure_count = - record.fail_tx_proposal(&Instant::now(), &tracking_window); - // TODO: drop session when recent_failure_count reaches some - // number + let recent_failure_count = + record.fail_tx_proposal(&Instant::now(), &self.config.tx_failure_window); + + if (recent_failure_count as u32) >= self.config.tx_failure_limit { + // TODO: Some action to take to counter the harmful traffic + log::warn!(self.logger, + "Client has {} recent failed tx proposals within the last {}", + recent_failure_count, + self.config.tx_failure_window.as_secs_f32() + ); + } } err })?; @@ -261,6 +267,10 @@ impl ClientApiService { response.set_block_version(*self.config.block_version); response.set_scp_message_signing_key((&self.config.msg_signer_key.public_key()).into()); + response.set_client_tracking_capacity(self.config.client_tracking_capacity as u64); + response.set_tx_failure_window_seconds(self.config.tx_failure_window.as_secs()); + response.set_tx_failure_limit(self.config.tx_failure_limit); + Ok(response) } } From c4c9fb2910747fff325ff17b881f2513f82c5e69 Mon Sep 17 00:00:00 2001 From: Millie C Date: Wed, 29 Mar 2023 21:59:58 -0400 Subject: [PATCH 08/46] Cargo fmt. --- consensus/service/config/src/lib.rs | 3 +-- consensus/service/src/api/client_api_service.rs | 10 +++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/consensus/service/config/src/lib.rs b/consensus/service/config/src/lib.rs index e5dcbeacf1..68e6d575da 100644 --- a/consensus/service/config/src/lib.rs +++ b/consensus/service/config/src/lib.rs @@ -138,7 +138,7 @@ pub struct Config { pub client_tracking_capacity: usize, /// How many seconds to retain instances of proposed-transaction - /// failures, per-client. This is used to implement DOS-protection, + /// failures, per-client. This is used to implement DOS-protection, /// along the lines of kicking clients who make too many failed transaction /// proposals within the span of tx_failure_window // TODO: slam-testing to derive reasonable default @@ -244,7 +244,6 @@ mod tests { client_tracking_capacity: 4096, tx_failure_window: Duration::from_secs(30), tx_failure_limit: 16384, - }; assert_eq!( diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index c2af143958..d5daf58f3f 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -13,7 +13,10 @@ use crate::{ use grpcio::{RpcContext, RpcStatus, UnarySink}; use mc_attest_api::attest::Message; use mc_attest_enclave_api::ClientSession; -use mc_common::{logger::{Logger, log}, LruCache}; +use mc_common::{ + logger::{log, Logger}, + LruCache, +}; use mc_consensus_api::{ consensus_client::{ProposeMintConfigTxResponse, ProposeMintTxResponse}, consensus_client_grpc::ConsensusClientApi, @@ -152,9 +155,10 @@ impl ClientApiService { let recent_failure_count = record.fail_tx_proposal(&Instant::now(), &self.config.tx_failure_window); - if (recent_failure_count as u32) >= self.config.tx_failure_limit { + if (recent_failure_count as u32) >= self.config.tx_failure_limit { // TODO: Some action to take to counter the harmful traffic - log::warn!(self.logger, + log::warn!( + self.logger, "Client has {} recent failed tx proposals within the last {}", recent_failure_count, self.config.tx_failure_window.as_secs_f32() From 91bf8cda28e02f24342a825b07789b1e84c16b9b Mon Sep 17 00:00:00 2001 From: Millie C Date: Thu, 30 Mar 2023 19:03:00 -0400 Subject: [PATCH 09/46] Actually close sessions with problem clients --- .../service/src/api/client_api_service.rs | 71 +++++++++++-------- 1 file changed, 43 insertions(+), 28 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index d5daf58f3f..d57a8af3b1 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -135,35 +135,12 @@ impl ClientApiService { msg: Message, ) -> Result { counters::ADD_TX_INITIATED.inc(); - let session_id = ClientSession::from(msg.channel_id.clone()); let tx_context = self.enclave.client_tx_propose(msg.into())?; // Cache the transaction. This performs the well-formedness checks. let tx_hash = self.tx_manager.insert(tx_context).map_err(|err| { if let TxManagerError::TransactionValidation(cause) = &err { counters::TX_VALIDATION_ERROR_COUNTER.inc(&format!("{cause:?}")); - - let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); - let record = if let Some(record) = tracker.get_mut(&session_id) { - record - } else { - tracker.put(session_id.clone(), ClientSessionTracking::new()); - tracker - .get_mut(&session_id) - .expect("Adding session-tracking record should be atomic.") - }; - let recent_failure_count = - record.fail_tx_proposal(&Instant::now(), &self.config.tx_failure_window); - - if (recent_failure_count as u32) >= self.config.tx_failure_limit { - // TODO: Some action to take to counter the harmful traffic - log::warn!( - self.logger, - "Client has {} recent failed tx proposals within the last {}", - recent_failure_count, - self.config.tx_failure_window.as_secs_f32() - ); - } } err })?; @@ -288,13 +265,14 @@ impl ConsensusClientApi for ClientApiService { ) { let _timer = SVC_COUNTERS.req(&ctx); + let session_id = ClientSession::from(msg.channel_id.clone()); + { - let session = ClientSession::from(msg.channel_id.clone()); let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); // Calling get() on the LRU bumps the entry to show up as more // recently-used. - if tracker.get(&session).is_none() { - tracker.put(session, ClientSessionTracking::new()); + if tracker.get(&session_id).is_none() { + tracker.put(session_id.clone(), ClientSessionTracking::new()); } } @@ -322,8 +300,45 @@ impl ConsensusClientApi for ClientApiService { ConsensusGrpcError::NotServing.into() } } else { - self.handle_proposed_tx(msg) - .or_else(ConsensusGrpcError::into) + let result = self.handle_proposed_tx(msg); + // The block present below rate-limits suspicious behavior. + if let Err(err) = &result { + let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); + let record = if let Some(record) = tracker.get_mut(&session_id) { + record + } else { + tracker.put(session_id.clone(), ClientSessionTracking::new()); + tracker + .get_mut(&session_id) + .expect("Adding session-tracking record should be atomic.") + }; + let recent_failure_count = + record.fail_tx_proposal(&Instant::now(), &self.config.tx_failure_window); + + if (recent_failure_count as u32) >= self.config.tx_failure_limit { + log::warn!( + self.logger, + "Client has {} recent failed tx proposals within the \ + last {} seconds - dropping connection. \n\ + Most recent proposal failure reason was: {:?}", + recent_failure_count, + self.config.tx_failure_window.as_secs_f32(), + err + ); + // Rate-limiting is performed at the auth endpoint, so + // merely dropping the connection will be enough. + let close_result = self.enclave.client_close(session_id.clone()); + if let Err(e) = close_result { + log::error!( + self.logger, + "Failed to drop session {:?} due to: {:?}", + &session_id, + e + ); + } + } + } + result.or_else(ConsensusGrpcError::into) }; result = result.and_then(|mut response| { From ed3ecfeb52eacdeb2c9d38f63cc9b379eab2ba82 Mon Sep 17 00:00:00 2001 From: Millie C Date: Thu, 30 Mar 2023 19:03:08 -0400 Subject: [PATCH 10/46] Cargo fmt --- consensus/service/src/api/client_api_service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index d57a8af3b1..14d7dec47b 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -300,9 +300,9 @@ impl ConsensusClientApi for ClientApiService { ConsensusGrpcError::NotServing.into() } } else { - let result = self.handle_proposed_tx(msg); + let result = self.handle_proposed_tx(msg); // The block present below rate-limits suspicious behavior. - if let Err(err) = &result { + if let Err(err) = &result { let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); let record = if let Some(record) = tracker.get_mut(&session_id) { record From df612566d1de339db670e9b4c7912ac8f4588715 Mon Sep 17 00:00:00 2001 From: Millie C Date: Thu, 30 Mar 2023 19:08:23 -0400 Subject: [PATCH 11/46] Adding a comment --- consensus/service/src/api/client_api_service.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 14d7dec47b..367ff2128c 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -328,6 +328,11 @@ impl ConsensusClientApi for ClientApiService { // Rate-limiting is performed at the auth endpoint, so // merely dropping the connection will be enough. let close_result = self.enclave.client_close(session_id.clone()); + // At the time of writing (30th March, 2023), it should + // only be possible for client_close() to error if a + // mutex is poisoned. However, because the + // implementation of this method might change, it + // seems wise to handle any error this might throw. if let Err(e) = close_result { log::error!( self.logger, From ee2c4754ea3006d00800135a460ac7bf850aef0d Mon Sep 17 00:00:00 2001 From: Millie C Date: Thu, 30 Mar 2023 19:08:34 -0400 Subject: [PATCH 12/46] Cargo fmt --- consensus/service/src/api/client_api_service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 367ff2128c..bedf901204 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -332,7 +332,7 @@ impl ConsensusClientApi for ClientApiService { // only be possible for client_close() to error if a // mutex is poisoned. However, because the // implementation of this method might change, it - // seems wise to handle any error this might throw. + // seems wise to handle any error this might throw. if let Err(e) = close_result { log::error!( self.logger, From 72cd0d5ca62a9f85daf36d1205e7cec6a0fe9d38 Mon Sep 17 00:00:00 2001 From: Millie C Date: Fri, 31 Mar 2023 20:34:09 -0400 Subject: [PATCH 13/46] Test ensuring client_close() is called when limit is hit --- .../service/src/api/client_api_service.rs | 84 +++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index bedf901204..8474d4524e 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -1859,4 +1859,88 @@ mod client_api_tests { .expect("Attempt to lock session-tracking mutex failed."); assert_eq!(tracker.len(), 1); } + + #[test_with_logger] + #[serial(counters)] + fn test_get_kicked_failure_limit(logger: Logger) { + let limit = 3; + + let mut consensus_enclave = MockConsensusEnclave::new(); + + let scp_client_value_sender = Arc::new( + |_value: ConsensusValue, + _node_id: Option<&NodeID>, + _responder_id: Option<&ResponderId>| { + // TODO: store inputs for inspection. + }, + ); + + const NUM_BLOCKS: u64 = 5; + let mut ledger = MockLedger::new(); + ledger + .expect_num_blocks() + .times(limit as usize) + .return_const(Ok(NUM_BLOCKS)); + + let tx_manager = MockTxManager::new(); + let is_serving_fn = Arc::new(|| -> bool { true }); + let authenticator = AnonymousAuthenticator::default(); + + const LRU_CAPACITY: usize = 4096; + let tracked_sessions = Arc::new(Mutex::new(LruCache::new(LRU_CAPACITY))); + + let mut config = get_config(); + // Permit only 3 failed transactions + config.tx_failure_limit = limit; + + // Cause the mock enclave to consistently fail each request. + consensus_enclave + .expect_client_tx_propose() + .times(limit as usize) + .return_const( + Err( + EnclaveError::MalformedTx( + TransactionValidationError::ContainsSpentKeyImage + ) + ) + ); + // Expect a close, since this will be exceeding our limit + consensus_enclave.expect_client_close().times(1).return_const(Ok(())); + + let instance = ClientApiService::new( + config, + Arc::new(consensus_enclave), + scp_client_value_sender, + Arc::new(ledger), + Arc::new(tx_manager), + Arc::new(MockMintTxManager::new()), + is_serving_fn, + Arc::new(authenticator), + logger, + // Clone this, maintaining our own Arc reference into the tracked + // sessions structure so that we can inspect it later. + tracked_sessions.clone(), + ); + + // gRPC client and server. + let (client, _server) = get_client_server(instance); + let message = Message::default(); + + for _ in 0..limit { + println!("Sending request"); + let propose_tx_response = client + .client_tx_propose(&message) + .expect("Client tx propose error"); + assert_eq!(propose_tx_response.get_result(), ProposeTxResult::ContainsSpentKeyImage); + } + + let tracker = tracked_sessions + .lock() + .expect("Attempt to lock session-tracking mutex failed."); + assert_eq!(tracker.len(), 1); + let (_session_id, tracking_data) = tracker.iter().next().unwrap(); + assert_eq!(tracking_data.tx_proposal_failures.len(), limit as usize); + // Because of the behavior of Mockall, if this returns without calling + // client_close() exactly once, it will panic and the test will fail. + } } From 232f14915b92ec40d20034dcd95ead5eec8f2e4f Mon Sep 17 00:00:00 2001 From: Millie C Date: Fri, 31 Mar 2023 20:34:51 -0400 Subject: [PATCH 14/46] Cargo fmt --- .../service/src/api/client_api_service.rs | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 8474d4524e..be8924d4ac 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -1863,7 +1863,7 @@ mod client_api_tests { #[test_with_logger] #[serial(counters)] fn test_get_kicked_failure_limit(logger: Logger) { - let limit = 3; + let limit = 3; let mut consensus_enclave = MockConsensusEnclave::new(); @@ -1893,19 +1893,18 @@ mod client_api_tests { // Permit only 3 failed transactions config.tx_failure_limit = limit; - // Cause the mock enclave to consistently fail each request. + // Cause the mock enclave to consistently fail each request. consensus_enclave .expect_client_tx_propose() .times(limit as usize) - .return_const( - Err( - EnclaveError::MalformedTx( - TransactionValidationError::ContainsSpentKeyImage - ) - ) - ); + .return_const(Err(EnclaveError::MalformedTx( + TransactionValidationError::ContainsSpentKeyImage, + ))); // Expect a close, since this will be exceeding our limit - consensus_enclave.expect_client_close().times(1).return_const(Ok(())); + consensus_enclave + .expect_client_close() + .times(1) + .return_const(Ok(())); let instance = ClientApiService::new( config, @@ -1926,12 +1925,15 @@ mod client_api_tests { let (client, _server) = get_client_server(instance); let message = Message::default(); - for _ in 0..limit { + for _ in 0..limit { println!("Sending request"); let propose_tx_response = client .client_tx_propose(&message) .expect("Client tx propose error"); - assert_eq!(propose_tx_response.get_result(), ProposeTxResult::ContainsSpentKeyImage); + assert_eq!( + propose_tx_response.get_result(), + ProposeTxResult::ContainsSpentKeyImage + ); } let tracker = tracked_sessions @@ -1940,7 +1942,8 @@ mod client_api_tests { assert_eq!(tracker.len(), 1); let (_session_id, tracking_data) = tracker.iter().next().unwrap(); assert_eq!(tracking_data.tx_proposal_failures.len(), limit as usize); + // Because of the behavior of Mockall, if this returns without calling - // client_close() exactly once, it will panic and the test will fail. + // client_close() exactly once, it will panic and the test will fail. } } From a32f637ce550d78a1c1c6809f0e112cc951d9f2e Mon Sep 17 00:00:00 2001 From: Millie C Date: Mon, 3 Apr 2023 20:36:17 -0400 Subject: [PATCH 15/46] Fixes and improvements to tx proposal ratelimiting system. --- .../service/src/api/client_api_service.rs | 121 ++++++++++++------ 1 file changed, 82 insertions(+), 39 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index be8924d4ac..6b8eced187 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -10,7 +10,7 @@ use crate::{ tx_manager::{TxManager, TxManagerError}, SVC_COUNTERS, }; -use grpcio::{RpcContext, RpcStatus, UnarySink}; +use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use mc_attest_api::attest::Message; use mc_attest_enclave_api::ClientSession; use mc_common::{ @@ -58,6 +58,18 @@ impl ClientSessionTracking { } } + pub fn get_proposetx_failures(&self) -> usize { + self.tx_proposal_failures.len() + } + + /// Remove any transaction proposal failure record that is older than our + /// tracking window. + fn clear_stale_records(&mut self, now: &Instant, tracking_window: &Duration) { + self.tx_proposal_failures.retain(|past_failure| { + now.saturating_duration_since(*past_failure) <= *tracking_window + }); + } + /// Push a new failed tx proposal record, clear out samples older than /// our tracking window, and return the number of tx failures remaining /// on the list - as-in, tells you "there have been x number of failures @@ -72,9 +84,7 @@ impl ClientSessionTracking { /// have existed for longer than this value will be dropped when this /// method is called. pub fn fail_tx_proposal(&mut self, now: &Instant, tracking_window: &Duration) -> usize { - self.tx_proposal_failures.retain(|past_failure| { - now.saturating_duration_since(*past_failure) <= *tracking_window - }); + self.clear_stale_records(now, tracking_window); self.tx_proposal_failures.push_back(*now); self.tx_proposal_failures.len() } @@ -274,6 +284,46 @@ impl ConsensusClientApi for ClientApiService { if tracker.get(&session_id).is_none() { tracker.put(session_id.clone(), ClientSessionTracking::new()); } + + let session_info = tracker + .get(&session_id) + .expect("Session should be present after insert"); + let recent_failures = session_info.get_proposetx_failures() as u32; + if recent_failures >= self.config.tx_failure_limit { + log::debug!( + self.logger, + "Client has {} recent failed tx proposals within the \ + last {} seconds - dropping connection.", + recent_failures, + self.config.tx_failure_window.as_secs_f32() + ); + // Rate-limiting is performed at the auth endpoint, so + // merely dropping the connection will be enough. + let close_result = self.enclave.client_close(session_id.clone()); + // At the time of writing (30th March, 2023), it should + // only be possible for client_close() to error if a + // mutex is poisoned. However, because the + // implementation of this method might change, it + // seems wise to handle any error this might throw. + if let Err(e) = close_result { + log::error!( + self.logger, + "Failed to drop session {:?} due to: {:?}", + &session_id, + e + ); + } else { + let _ = tracker.pop(&session_id); + } + + // Send an error indicating the rate-limiting. + let rpc_code = RpcStatusCode::RESOURCE_EXHAUSTED; + let rpc_error = ConsensusGrpcError::RpcStatus(RpcStatus::new(rpc_code)); + let result: Result<_, RpcStatus> = rpc_error.into(); + + // Send the error and return early. + return send_result(ctx, sink, result, &self.logger); + } } if let Err(err) = check_request_chain_id(&self.config.chain_id, &ctx) { @@ -302,7 +352,7 @@ impl ConsensusClientApi for ClientApiService { } else { let result = self.handle_proposed_tx(msg); // The block present below rate-limits suspicious behavior. - if let Err(err) = &result { + if let Err(_err) = &result { let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); let record = if let Some(record) = tracker.get_mut(&session_id) { record @@ -312,36 +362,7 @@ impl ConsensusClientApi for ClientApiService { .get_mut(&session_id) .expect("Adding session-tracking record should be atomic.") }; - let recent_failure_count = - record.fail_tx_proposal(&Instant::now(), &self.config.tx_failure_window); - - if (recent_failure_count as u32) >= self.config.tx_failure_limit { - log::warn!( - self.logger, - "Client has {} recent failed tx proposals within the \ - last {} seconds - dropping connection. \n\ - Most recent proposal failure reason was: {:?}", - recent_failure_count, - self.config.tx_failure_window.as_secs_f32(), - err - ); - // Rate-limiting is performed at the auth endpoint, so - // merely dropping the connection will be enough. - let close_result = self.enclave.client_close(session_id.clone()); - // At the time of writing (30th March, 2023), it should - // only be possible for client_close() to error if a - // mutex is poisoned. However, because the - // implementation of this method might change, it - // seems wise to handle any error this might throw. - if let Err(e) = close_result { - log::error!( - self.logger, - "Failed to drop session {:?} due to: {:?}", - &session_id, - e - ); - } - } + record.fail_tx_proposal(&Instant::now(), &self.config.tx_failure_window); } result.or_else(ConsensusGrpcError::into) }; @@ -1926,7 +1947,6 @@ mod client_api_tests { let message = Message::default(); for _ in 0..limit { - println!("Sending request"); let propose_tx_response = client .client_tx_propose(&message) .expect("Client tx propose error"); @@ -1935,13 +1955,36 @@ mod client_api_tests { ProposeTxResult::ContainsSpentKeyImage ); } + // No failed transaction proposals over the limit yet, so the session + // shouldn't have been dropped + { + let tracker = tracked_sessions + .lock() + .expect("Attempt to lock session-tracking mutex failed."); + assert_eq!(tracker.len(), 1); + let (_session_id, tracking_data) = tracker.iter().next().unwrap(); + assert_eq!(tracking_data.tx_proposal_failures.len(), limit as usize); + } + + let propose_tx_response = client.client_tx_propose(&message); + assert!(propose_tx_response.is_err()); + + match propose_tx_response { + Err(grpcio::Error::RpcFailure(rpc_status)) => { + assert_eq!(rpc_status.code(), RpcStatusCode::RESOURCE_EXHAUSTED); + } + _ => panic!( + "Unexpected response upon continuing to use\ + a rate-limited session: {:?}", + propose_tx_response + ), + } let tracker = tracked_sessions .lock() .expect("Attempt to lock session-tracking mutex failed."); - assert_eq!(tracker.len(), 1); - let (_session_id, tracking_data) = tracker.iter().next().unwrap(); - assert_eq!(tracking_data.tx_proposal_failures.len(), limit as usize); + // This session should have been dropped at this point. + assert_eq!(tracker.len(), 0); // Because of the behavior of Mockall, if this returns without calling // client_close() exactly once, it will panic and the test will fail. From 3ee8518e8d7be052f3e3be7c013bd15f8f2092bd Mon Sep 17 00:00:00 2001 From: Millie C Date: Mon, 3 Apr 2023 20:46:38 -0400 Subject: [PATCH 16/46] cargo clippy --fix --- consensus/service/src/api/client_api_service.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 6b8eced187..4a0d97f8d4 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -1975,8 +1975,7 @@ mod client_api_tests { } _ => panic!( "Unexpected response upon continuing to use\ - a rate-limited session: {:?}", - propose_tx_response + a rate-limited session: {propose_tx_response:?}" ), } From 8dcdf8d99b736ecea69146ae08e380a2dcc9f167 Mon Sep 17 00:00:00 2001 From: Millie C Date: Tue, 7 Mar 2023 20:28:15 -0500 Subject: [PATCH 17/46] Track failed tx proposals in consensus --- .../service/src/api/client_api_service.rs | 44 +++++++++++++++++-- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 0026b40ffa..9d12600ed4 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -27,7 +27,7 @@ use mc_ledger_db::Ledger; use mc_peers::ConsensusValue; use mc_transaction_core::mint::{MintConfigTx, MintTx}; use mc_util_grpc::{check_request_chain_id, rpc_logger, send_result, Authenticator}; -use std::sync::{Arc, Mutex}; +use std::{sync::{Arc, Mutex}, collections::VecDeque, time::{Instant, Duration}}; /// Maximum number of pending values for consensus service before rejecting /// add_transaction requests. @@ -35,7 +35,45 @@ const PENDING_LIMIT: i64 = 500; /// Data retained on a session with a client. #[derive(Clone, Debug)] -pub struct ClientSessionTracking {/* stub - todo: track failed TX proposals */} +pub struct ClientSessionTracking { + // This needs to be a VecDeque because popping elements oldest-first (i.e. + // in a FIFO fashion) needs to be efficient, since we'll be culling the + // oldest tx failure timestamps constantly - if the server is configured to + // drop clients with over 100 failed tx proposals in the last 30 seconds, + // we don't care about timestamps from over 30 seconds ago, for example. + tx_proposal_failures: VecDeque, +} + +impl ClientSessionTracking { + pub fn new() -> Self { + Self { + tx_proposal_failures: VecDeque::default(), + } + } + + /// Push a new failed tx proposal record, clear out samples older than + /// our tracking window, and return the number of tx failures remaining + /// on the list - as-in, tells you "there have been x number of failures + /// within the past `tracking_window` seconds". + /// + /// # Arguments + /// + /// * `now` - Used as both the instant to record as a new tx proposal + /// failure, and as the "present" time to check for stale records. + /// * `tracking_window` - How long of a period of time should we keep track + /// of an individual tx proposal failure incident for? Any records which + /// have existed for longer than this value will be dropped when this + /// method is called. + pub fn fail_tx_proposal(&mut self, + now: &Instant, + tracking_window: &Duration) -> usize { + self.tx_proposal_failures.retain(|past_failure| { + now.saturating_duration_since(*past_failure) <= *tracking_window + }); + self.tx_proposal_failures.push_back(now.clone()); + self.tx_proposal_failures.len() + } +} #[derive(Clone)] pub struct ClientApiService { @@ -225,7 +263,7 @@ impl ConsensusClientApi for ClientApiService { // TODO: Update fields } else { // TODO: Populate new session metadata - tracker.put(session, ClientSessionTracking {}); + tracker.put(session, ClientSessionTracking::new()); } } if let Err(err) = check_request_chain_id(&self.config.chain_id, &ctx) { From f4e80ebc188302ad760d8bba34a522b4f651642f Mon Sep 17 00:00:00 2001 From: Millie C Date: Wed, 8 Mar 2023 21:28:18 -0500 Subject: [PATCH 18/46] Push an Instant::now() for every time validation fails on a tx proposal --- .../service/src/api/client_api_service.rs | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 9d12600ed4..186b255c55 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -130,12 +130,26 @@ impl ClientApiService { msg: Message, ) -> Result { counters::ADD_TX_INITIATED.inc(); + let session_id = ClientSession::from(msg.channel_id.clone()); let tx_context = self.enclave.client_tx_propose(msg.into())?; // Cache the transaction. This performs the well-formedness checks. let tx_hash = self.tx_manager.insert(tx_context).map_err(|err| { if let TxManagerError::TransactionValidation(cause) = &err { counters::TX_VALIDATION_ERROR_COUNTER.inc(&format!("{cause:?}")); + + let tracking_window = Duration::from_secs(5); // TODO, placeholder before draft PR gets published. + let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); + let record = if let Some(record) = tracker.get_mut(&session_id) { + record + } else { + tracker.put(session_id.clone(), ClientSessionTracking::new()); + tracker.get_mut(&session_id) + .expect("Adding session-tracking record should be atomic.") + + }; + let _recent_failure_count = record.fail_tx_proposal(&Instant::now(), &tracking_window); + // TODO: drop session when recent_failure_count reaches some number } err })?; @@ -256,16 +270,6 @@ impl ConsensusClientApi for ClientApiService { ) { let _timer = SVC_COUNTERS.req(&ctx); - { - let session = ClientSession::from(msg.channel_id.clone()); - let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); - if let Some(_session_info) = tracker.get(&session) { - // TODO: Update fields - } else { - // TODO: Populate new session metadata - tracker.put(session, ClientSessionTracking::new()); - } - } if let Err(err) = check_request_chain_id(&self.config.chain_id, &ctx) { return send_result(ctx, sink, Err(err), &self.logger); } From 0d2ab750f647b49a6c782bd80cf400371b77ceb2 Mon Sep 17 00:00:00 2001 From: Millie C Date: Mon, 13 Mar 2023 22:06:34 -0400 Subject: [PATCH 19/46] Fix unnecessary .clone() --- consensus/service/src/api/client_api_service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 186b255c55..0b8cea8298 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -70,7 +70,7 @@ impl ClientSessionTracking { self.tx_proposal_failures.retain(|past_failure| { now.saturating_duration_since(*past_failure) <= *tracking_window }); - self.tx_proposal_failures.push_back(now.clone()); + self.tx_proposal_failures.push_back(*now); self.tx_proposal_failures.len() } } From 9e557ddcec205b31232a07cabd4bee3c13d162b1 Mon Sep 17 00:00:00 2001 From: Millie C Date: Mon, 13 Mar 2023 22:35:31 -0400 Subject: [PATCH 20/46] Cargo fmt --- .../service/src/api/client_api_service.rs | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 0b8cea8298..30d14a28ad 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -27,7 +27,11 @@ use mc_ledger_db::Ledger; use mc_peers::ConsensusValue; use mc_transaction_core::mint::{MintConfigTx, MintTx}; use mc_util_grpc::{check_request_chain_id, rpc_logger, send_result, Authenticator}; -use std::{sync::{Arc, Mutex}, collections::VecDeque, time::{Instant, Duration}}; +use std::{ + collections::VecDeque, + sync::{Arc, Mutex}, + time::{Duration, Instant}, +}; /// Maximum number of pending values for consensus service before rejecting /// add_transaction requests. @@ -41,7 +45,7 @@ pub struct ClientSessionTracking { // oldest tx failure timestamps constantly - if the server is configured to // drop clients with over 100 failed tx proposals in the last 30 seconds, // we don't care about timestamps from over 30 seconds ago, for example. - tx_proposal_failures: VecDeque, + tx_proposal_failures: VecDeque, } impl ClientSessionTracking { @@ -54,8 +58,8 @@ impl ClientSessionTracking { /// Push a new failed tx proposal record, clear out samples older than /// our tracking window, and return the number of tx failures remaining /// on the list - as-in, tells you "there have been x number of failures - /// within the past `tracking_window` seconds". - /// + /// within the past `tracking_window` seconds". + /// /// # Arguments /// /// * `now` - Used as both the instant to record as a new tx proposal @@ -64,9 +68,7 @@ impl ClientSessionTracking { /// of an individual tx proposal failure incident for? Any records which /// have existed for longer than this value will be dropped when this /// method is called. - pub fn fail_tx_proposal(&mut self, - now: &Instant, - tracking_window: &Duration) -> usize { + pub fn fail_tx_proposal(&mut self, now: &Instant, tracking_window: &Duration) -> usize { self.tx_proposal_failures.retain(|past_failure| { now.saturating_duration_since(*past_failure) <= *tracking_window }); @@ -138,18 +140,20 @@ impl ClientApiService { if let TxManagerError::TransactionValidation(cause) = &err { counters::TX_VALIDATION_ERROR_COUNTER.inc(&format!("{cause:?}")); - let tracking_window = Duration::from_secs(5); // TODO, placeholder before draft PR gets published. + let tracking_window = Duration::from_secs(5); // TODO, placeholder before draft PR gets published. let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); let record = if let Some(record) = tracker.get_mut(&session_id) { record } else { tracker.put(session_id.clone(), ClientSessionTracking::new()); - tracker.get_mut(&session_id) + tracker + .get_mut(&session_id) .expect("Adding session-tracking record should be atomic.") - }; - let _recent_failure_count = record.fail_tx_proposal(&Instant::now(), &tracking_window); - // TODO: drop session when recent_failure_count reaches some number + let _recent_failure_count = + record.fail_tx_proposal(&Instant::now(), &tracking_window); + // TODO: drop session when recent_failure_count reaches some + // number } err })?; From c342f6c5bbb0f67c6fa5dff90e9597afb0b7a996 Mon Sep 17 00:00:00 2001 From: Millie C Date: Mon, 27 Mar 2023 17:26:50 -0400 Subject: [PATCH 21/46] Return to tracking clients as soon as they submit a proposeTX, per James' suggestion --- consensus/service/src/api/client_api_service.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 30d14a28ad..c7403cb61a 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -274,6 +274,16 @@ impl ConsensusClientApi for ClientApiService { ) { let _timer = SVC_COUNTERS.req(&ctx); + { + let session = ClientSession::from(msg.channel_id.clone()); + let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); + // Calling get() on the LRU bumps the entry to show up as more + // recently-used. + if tracker.get(&session).is_none() { + tracker.put(session, ClientSessionTracking::new()); + } + } + if let Err(err) = check_request_chain_id(&self.config.chain_id, &ctx) { return send_result(ctx, sink, Err(err), &self.logger); } From ecd5836e1cfe604263f475f17a05e38465ac4a1c Mon Sep 17 00:00:00 2001 From: Millie C Date: Mon, 27 Mar 2023 17:31:16 -0400 Subject: [PATCH 22/46] Cargo fmt --- consensus/service/src/api/client_api_service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index c7403cb61a..df3d0b67db 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -277,9 +277,9 @@ impl ConsensusClientApi for ClientApiService { { let session = ClientSession::from(msg.channel_id.clone()); let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); - // Calling get() on the LRU bumps the entry to show up as more + // Calling get() on the LRU bumps the entry to show up as more // recently-used. - if tracker.get(&session).is_none() { + if tracker.get(&session).is_none() { tracker.put(session, ClientSessionTracking::new()); } } From 93d603089e73cdd49a6a512835e4ff0a344daf26 Mon Sep 17 00:00:00 2001 From: Emily C Date: Thu, 6 Apr 2023 21:03:15 -0400 Subject: [PATCH 23/46] Apply suggestions from code review Co-authored-by: Sam Dealy <33067698+samdealy@users.noreply.github.com> --- consensus/service/src/api/client_api_service.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index df3d0b67db..3d2aae4248 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -142,14 +142,12 @@ impl ClientApiService { let tracking_window = Duration::from_secs(5); // TODO, placeholder before draft PR gets published. let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); - let record = if let Some(record) = tracker.get_mut(&session_id) { - record - } else { - tracker.put(session_id.clone(), ClientSessionTracking::new()); - tracker - .get_mut(&session_id) - .expect("Adding session-tracking record should be atomic.") - }; + if !tracker.contains(&session_id) { + tracker.put(session_id.clone(), ClientSessionTracking::new()); + } + let record = + tracker.get_mut(&session_id).expect("Session id {session_id} should be tracked."); + let _recent_failure_count = record.fail_tx_proposal(&Instant::now(), &tracking_window); // TODO: drop session when recent_failure_count reaches some From 6b3861fa7beed20555822ea943a9552d3c9d035b Mon Sep 17 00:00:00 2001 From: Millie C Date: Thu, 6 Apr 2023 21:05:01 -0400 Subject: [PATCH 24/46] Respond to suggestions on the pull request, no longer using references for Instant and Duration --- consensus/service/src/api/client_api_service.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 3d2aae4248..bdb39cbe5a 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -68,11 +68,11 @@ impl ClientSessionTracking { /// of an individual tx proposal failure incident for? Any records which /// have existed for longer than this value will be dropped when this /// method is called. - pub fn fail_tx_proposal(&mut self, now: &Instant, tracking_window: &Duration) -> usize { + pub fn fail_tx_proposal(&mut self, now: Instant, tracking_window: Duration) -> usize { self.tx_proposal_failures.retain(|past_failure| { - now.saturating_duration_since(*past_failure) <= *tracking_window + now.saturating_duration_since(*past_failure) <= tracking_window }); - self.tx_proposal_failures.push_back(*now); + self.tx_proposal_failures.push_back(now); self.tx_proposal_failures.len() } } @@ -140,7 +140,9 @@ impl ClientApiService { if let TxManagerError::TransactionValidation(cause) = &err { counters::TX_VALIDATION_ERROR_COUNTER.inc(&format!("{cause:?}")); - let tracking_window = Duration::from_secs(5); // TODO, placeholder before draft PR gets published. + // This will become a proper config option, already implemented + // in pull request #3296 "Failure limit on tx proposals" + let tracking_window = Duration::from_secs(60); let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); if !tracker.contains(&session_id) { tracker.put(session_id.clone(), ClientSessionTracking::new()); @@ -149,9 +151,9 @@ impl ClientApiService { tracker.get_mut(&session_id).expect("Session id {session_id} should be tracked."); let _recent_failure_count = - record.fail_tx_proposal(&Instant::now(), &tracking_window); - // TODO: drop session when recent_failure_count reaches some - // number + record.fail_tx_proposal(Instant::now(), tracking_window); + // Dropping the client after a limit has been reached will be + // implemented in a future pull request. } err })?; From ee9ec68b1923797331b349c860aec2776b6fa366 Mon Sep 17 00:00:00 2001 From: Millie C Date: Thu, 6 Apr 2023 21:06:17 -0400 Subject: [PATCH 25/46] Merge in suggested changes --- consensus/service/src/api/client_api_service.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index bdb39cbe5a..89e726296e 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -69,9 +69,8 @@ impl ClientSessionTracking { /// have existed for longer than this value will be dropped when this /// method is called. pub fn fail_tx_proposal(&mut self, now: Instant, tracking_window: Duration) -> usize { - self.tx_proposal_failures.retain(|past_failure| { - now.saturating_duration_since(*past_failure) <= tracking_window - }); + self.tx_proposal_failures + .retain(|past_failure| now.saturating_duration_since(*past_failure) <= tracking_window); self.tx_proposal_failures.push_back(now); self.tx_proposal_failures.len() } @@ -145,11 +144,12 @@ impl ClientApiService { let tracking_window = Duration::from_secs(60); let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); if !tracker.contains(&session_id) { - tracker.put(session_id.clone(), ClientSessionTracking::new()); + tracker.put(session_id.clone(), ClientSessionTracking::new()); } - let record = - tracker.get_mut(&session_id).expect("Session id {session_id} should be tracked."); - + let record = tracker + .get_mut(&session_id) + .expect("Session id {session_id} should be tracked."); + let _recent_failure_count = record.fail_tx_proposal(Instant::now(), tracking_window); // Dropping the client after a limit has been reached will be From bf6b2d30e930a7bb9ad8a22696e1bf32b3e9527e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Mar 2023 15:12:31 -0700 Subject: [PATCH 26/46] chore(deps): bump syn from 1.0.109 to 2.0.11 (#3295) Bumps [syn](https://github.com/dtolnay/syn) from 1.0.109 to 2.0.11. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/1.0.109...2.0.11) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 83 ++++++++++++++++++++--------------- util/logger-macros/Cargo.toml | 2 +- 2 files changed, 48 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bac1112c04..99ec6dbad0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -204,7 +204,7 @@ checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -215,7 +215,7 @@ checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -358,7 +358,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn", + "syn 1.0.109", "which", ] @@ -700,7 +700,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1000,7 +1000,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.109", ] [[package]] @@ -1011,7 +1011,7 @@ checksum = "ddfc69c5bfcbd2fc09a0f38451d2daf0e372e367986a83906d1b0dbc88134fb5" dependencies = [ "darling_core", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1076,7 +1076,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1101,7 +1101,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1112,7 +1112,7 @@ checksum = "45f5098f628d02a7a0f68ddba586fb61e80edec3bdc1be3b921f4ceec60858d3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1200,7 +1200,7 @@ checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1322,7 +1322,7 @@ checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -1477,7 +1477,7 @@ checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1941,7 +1941,7 @@ checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2244,7 +2244,7 @@ dependencies = [ "libc", "libz-sys", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3130,7 +3130,7 @@ version = "4.1.0-pre0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5582,7 +5582,7 @@ version = "4.1.0-pre0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.11", ] [[package]] @@ -5689,7 +5689,7 @@ version = "4.1.0-pre0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5852,7 +5852,7 @@ dependencies = [ "migrations_internals", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5922,7 +5922,7 @@ dependencies = [ "cfg-if 1.0.0", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6195,7 +6195,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6275,7 +6275,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6473,7 +6473,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -6505,7 +6505,7 @@ checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", "yansi", ] @@ -6566,7 +6566,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6752,7 +6752,7 @@ checksum = "a043824e29c94169374ac5183ac0ed43f5724dc4556b19568007486bd840fa1f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6905,7 +6905,7 @@ dependencies = [ "proc-macro2", "quote", "rocket_http", - "syn", + "syn 1.0.109", "unicode-xid", ] @@ -7382,7 +7382,7 @@ checksum = "4fc80d722935453bcafdc2c9a73cd6fac4dc1938f0346035d84bf99fa9e33217" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -7442,7 +7442,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -7467,7 +7467,7 @@ checksum = "079a83df15f85d89a68d64ae1238f142f172b1fa915d0d76b26a7cba1b659a69" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -7788,6 +7788,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21e3787bb71465627110e7d87ed4faaa36c1f61042ee67badb9e2ef173accc40" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "synstructure" version = "0.12.3" @@ -7796,7 +7807,7 @@ checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "unicode-xid", ] @@ -7883,7 +7894,7 @@ checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8017,7 +8028,7 @@ checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8125,7 +8136,7 @@ checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8394,7 +8405,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-shared", ] @@ -8428,7 +8439,7 @@ checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8729,6 +8740,6 @@ checksum = "81e8f13fef10b63c06356d65d416b070798ddabcadc10d3ece0c5be9b3c7eddb" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] diff --git a/util/logger-macros/Cargo.toml b/util/logger-macros/Cargo.toml index 8cc5d7ece2..fd8babac61 100644 --- a/util/logger-macros/Cargo.toml +++ b/util/logger-macros/Cargo.toml @@ -12,4 +12,4 @@ proc-macro = true [dependencies] proc-macro2 = { version = "1.0", features = ["nightly"] } quote = "1.0" -syn = { version = "1.0", features = ["full", "extra-traits"] } +syn = { version = "2.0", features = ["full", "extra-traits"] } From 429d3ea4257ccb097fe9ccb58b98d1facccfe528 Mon Sep 17 00:00:00 2001 From: Varsha <102332078+varsha888@users.noreply.github.com> Date: Mon, 3 Apr 2023 13:28:23 -0700 Subject: [PATCH 27/46] Update log messages (#3301) --- .../src/ledger_sync/ledger_sync_service.rs | 6 ++++- mobilecoind/src/sync.rs | 2 +- watcher/src/bin/main.rs | 2 +- watcher/src/verification_reports_collector.rs | 18 +++++++------- watcher/src/watcher.rs | 24 ++++++++++++++----- 5 files changed, 35 insertions(+), 17 deletions(-) diff --git a/ledger/sync/src/ledger_sync/ledger_sync_service.rs b/ledger/sync/src/ledger_sync/ledger_sync_service.rs index 12313bfc9f..f5bbeec01f 100644 --- a/ledger/sync/src/ledger_sync/ledger_sync_service.rs +++ b/ledger/sync/src/ledger_sync/ledger_sync_service.rs @@ -25,6 +25,7 @@ use mc_util_telemetry::{ use mc_util_uri::ConnectionUri; use retry::delay::Fibonacci; use std::{ + cmp::min, collections::{BTreeMap, HashMap, HashSet}, sync::{Arc, Condvar, Mutex}, thread, @@ -41,6 +42,8 @@ const MAX_CONCURRENT_GET_BLOCK_CONTENTS_CALLS: usize = 50; /// Telemetry metadata: number of blocks appended to the local ledger. const TELEMETRY_NUM_BLOCKS_APPENDED: Key = telemetry_static_key!("num-blocks-appended"); +const MAX_SLEEP_INTERVAL: Duration = Duration::from_secs(60); + pub struct LedgerSyncService< L: Ledger, BC: BlockchainConnection + 'static, @@ -719,7 +722,8 @@ fn get_block_contents( // Sleep, with a linearly increasing delay. This prevents // endless retries // as long as the deadline is not exceeded. - thread::sleep(Duration::from_secs(num_attempts + 1)); + let attempts = Duration::from_secs(num_attempts + 1); + thread::sleep(min(attempts, MAX_SLEEP_INTERVAL)); // Put back to queue for a retry thread_sender diff --git a/mobilecoind/src/sync.rs b/mobilecoind/src/sync.rs index d677126e3a..20a692cbd2 100644 --- a/mobilecoind/src/sync.rs +++ b/mobilecoind/src/sync.rs @@ -167,7 +167,7 @@ impl SyncThread { } // This monitor has blocks to process, put it in the queue. - log::info!( + log::debug!( logger, "sync thread noticed monitor {} needs syncing", monitor_id, diff --git a/watcher/src/bin/main.rs b/watcher/src/bin/main.rs index 36f9068015..33a550ef06 100644 --- a/watcher/src/bin/main.rs +++ b/watcher/src/bin/main.rs @@ -173,7 +173,7 @@ impl WatcherSyncThread { // Decide next step before continuing based on sync result match sync_result { SyncResult::AllBlocksSynced => { - log::info!(logger, "sync_blocks indicates we're done"); + log::info!(logger, "Block synchronization done"); break; } SyncResult::BlockSyncError => { diff --git a/watcher/src/verification_reports_collector.rs b/watcher/src/verification_reports_collector.rs index d21a64554b..d9010fea6d 100644 --- a/watcher/src/verification_reports_collector.rs +++ b/watcher/src/verification_reports_collector.rs @@ -308,7 +308,16 @@ impl VerificationReportsCollectorThread { verification_report: &VerificationReport, ) { let verification_report_block_signer = match NC::get_block_signer(verification_report) { - Ok(key) => key, + Ok(key) => { + // TODO: Add encode to key's Display impl + log::info!( + self.logger, + "Verification report from {} has block signer {}", + node_url, + hex::encode(key.to_bytes()) + ); + key + } Err(err) => { log::error!( self.logger, @@ -320,13 +329,6 @@ impl VerificationReportsCollectorThread { } }; - log::info!( - self.logger, - "Verification report from {} has block signer {}", - node_url, - hex::encode(verification_report_block_signer.to_bytes()) - ); - // Store the VerificationReport in the database, and also remove // verification_report_block_signer and potential_signers from the polling // queue. diff --git a/watcher/src/watcher.rs b/watcher/src/watcher.rs index d85b0e93db..22773d17b4 100644 --- a/watcher/src/watcher.rs +++ b/watcher/src/watcher.rs @@ -137,12 +137,24 @@ impl Watcher { max_blocks_per_iteration: Option, log_sync_failures: bool, ) -> Result { - log::info!( - self.logger, - "Now syncing signatures from {} to {:?}", - start, - max_block_height, - ); + match max_block_height { + None => { + log::debug!( + self.logger, + "Now syncing signatures from {} to {:?}", + start, + max_block_height, + ); + } + Some(_) => { + log::info!( + self.logger, + "Now syncing signatures from {} to {:?}", + start, + max_block_height, + ); + } + } let mut counter = 0usize; From b130c4aec063a3a1ff97813c4823a5444b9500a9 Mon Sep 17 00:00:00 2001 From: ryan Date: Wed, 5 Apr 2023 10:46:49 +1200 Subject: [PATCH 28/46] move transaction summary computation to separate crate (#3132) * move tx summary computation to separate transaction/summary crate this simplifies building for hardware wallets as we no longer need to include transaction/{core,extra} * bump package versions * lockfile updates for mc-core crate version --- Cargo.lock | 35 ++++- Cargo.toml | 1 + api/Cargo.toml | 1 + api/src/convert/unsigned_tx.rs | 3 +- consensus/enclave/trusted/Cargo.lock | 2 +- core/Cargo.toml | 4 +- crypto/memo-mac/Cargo.toml | 2 +- crypto/ring-signature/Cargo.toml | 2 +- fog/ingest/enclave/trusted/Cargo.lock | 2 +- fog/ledger/enclave/trusted/Cargo.lock | 2 +- fog/view/enclave/trusted/Cargo.lock | 2 +- transaction/builder/Cargo.toml | 1 + .../builder/src/transaction_builder.rs | 4 +- transaction/extra/Cargo.toml | 1 + transaction/extra/src/lib.rs | 5 - .../extra/src/tx_summary_unblinding/data.rs | 52 ------- .../extra/src/tx_summary_unblinding/mod.rs | 11 -- transaction/extra/src/unsigned_tx.rs | 4 +- transaction/extra/tests/verifier.rs | 3 +- transaction/signer/Cargo.toml | 3 +- transaction/signer/src/lib.rs | 5 +- transaction/signer/src/types.rs | 2 +- transaction/summary/Cargo.toml | 44 ++++++ transaction/summary/README.md | 8 + transaction/summary/src/data.rs | 142 +++++++++++++++++ .../src}/error.rs | 0 transaction/summary/src/lib.rs | 22 +++ .../src}/report.rs | 15 +- .../src}/verifier.rs | 145 ++++-------------- transaction/types/src/amount/error.rs | 4 +- util/vec-map/Cargo.toml | 2 +- util/vec-map/src/lib.rs | 15 +- util/zip-exact/Cargo.toml | 5 +- util/zip-exact/src/lib.rs | 4 +- 34 files changed, 336 insertions(+), 217 deletions(-) delete mode 100644 transaction/extra/src/tx_summary_unblinding/data.rs delete mode 100644 transaction/extra/src/tx_summary_unblinding/mod.rs create mode 100644 transaction/summary/Cargo.toml create mode 100644 transaction/summary/README.md create mode 100644 transaction/summary/src/data.rs rename transaction/{extra/src/tx_summary_unblinding => summary/src}/error.rs (100%) create mode 100644 transaction/summary/src/lib.rs rename transaction/{extra/src/tx_summary_unblinding => summary/src}/report.rs (89%) rename transaction/{extra/src/tx_summary_unblinding => summary/src}/verifier.rs (74%) diff --git a/Cargo.lock b/Cargo.lock index 99ec6dbad0..4b744f9c1f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2324,6 +2324,7 @@ dependencies = [ "mc-transaction-builder", "mc-transaction-core", "mc-transaction-extra", + "mc-transaction-summary", "mc-util-build-grpc", "mc-util-build-script", "mc-util-from-random", @@ -3037,7 +3038,7 @@ dependencies = [ [[package]] name = "mc-core" -version = "4.0.2" +version = "4.1.0-pre0" dependencies = [ "anyhow", "clap 4.1.11", @@ -3204,7 +3205,7 @@ dependencies = [ [[package]] name = "mc-crypto-memo-mac" -version = "4.0.2" +version = "4.1.0-pre0" dependencies = [ "hmac 0.12.1", "mc-crypto-keys", @@ -5189,6 +5190,7 @@ dependencies = [ "mc-fog-report-validation-test-utils", "mc-transaction-core", "mc-transaction-extra", + "mc-transaction-summary", "mc-transaction-types", "mc-util-from-random", "mc-util-serial", @@ -5286,6 +5288,7 @@ dependencies = [ "mc-fog-report-validation-test-utils", "mc-transaction-builder", "mc-transaction-core", + "mc-transaction-summary", "mc-transaction-types", "mc-util-from-random", "mc-util-repr-bytes", @@ -5306,7 +5309,7 @@ dependencies = [ [[package]] name = "mc-transaction-signer" -version = "4.0.1" +version = "4.1.0-pre0" dependencies = [ "anyhow", "clap 4.1.11", @@ -5322,6 +5325,7 @@ dependencies = [ "mc-crypto-ring-signature-signer", "mc-transaction-core", "mc-transaction-extra", + "mc-transaction-summary", "mc-util-repr-bytes", "mc-util-serial", "rand_core", @@ -5332,6 +5336,25 @@ dependencies = [ "zeroize", ] +[[package]] +name = "mc-transaction-summary" +version = "4.1.0-pre0" +dependencies = [ + "displaydoc", + "mc-account-keys", + "mc-core", + "mc-crypto-digestible", + "mc-crypto-keys", + "mc-crypto-ring-signature", + "mc-transaction-types", + "mc-util-vec-map", + "mc-util-zip-exact", + "prost", + "serde", + "subtle", + "zeroize", +] + [[package]] name = "mc-transaction-types" version = "4.1.0-pre0" @@ -5582,7 +5605,7 @@ version = "4.1.0-pre0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.12", ] [[package]] @@ -7790,9 +7813,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e3787bb71465627110e7d87ed4faaa36c1f61042ee67badb9e2ef173accc40" +checksum = "79d9531f94112cfc3e4c8f5f02cb2b58f72c97b7efd85f70203cc6d8efda5927" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 036d5998a2..6a708bc76e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -134,6 +134,7 @@ members = [ "transaction/core/test-utils", "transaction/extra", "transaction/signer", + "transaction/summary", "transaction/types", "util/b58-decoder", "util/build/enclave", diff --git a/api/Cargo.toml b/api/Cargo.toml index bce9552838..026a6ce431 100644 --- a/api/Cargo.toml +++ b/api/Cargo.toml @@ -19,6 +19,7 @@ mc-crypto-multisig = { path = "../crypto/multisig" } mc-crypto-ring-signature-signer = { path = "../crypto/ring-signature/signer" } mc-transaction-core = { path = "../transaction/core" } mc-transaction-extra = { path = "../transaction/extra" } +mc-transaction-summary = { path = "../transaction/summary" } mc-util-repr-bytes = { path = "../util/repr-bytes" } mc-util-serial = { path = "../util/serial" } mc-watcher-api = { path = "../watcher/api" } diff --git a/api/src/convert/unsigned_tx.rs b/api/src/convert/unsigned_tx.rs index 82af667d7b..e473f6c6fb 100644 --- a/api/src/convert/unsigned_tx.rs +++ b/api/src/convert/unsigned_tx.rs @@ -5,7 +5,8 @@ use crate::{external, ConversionError}; use mc_blockchain_types::BlockVersion; use mc_transaction_core::UnmaskedAmount; -use mc_transaction_extra::{TxOutSummaryUnblindingData, UnsignedTx}; +use mc_transaction_extra::UnsignedTx; +use mc_transaction_summary::TxOutSummaryUnblindingData; impl From<&UnsignedTx> for external::UnsignedTx { fn from(source: &UnsignedTx) -> Self { diff --git a/consensus/enclave/trusted/Cargo.lock b/consensus/enclave/trusted/Cargo.lock index 8320ff9555..3e5a204699 100644 --- a/consensus/enclave/trusted/Cargo.lock +++ b/consensus/enclave/trusted/Cargo.lock @@ -943,7 +943,7 @@ dependencies = [ [[package]] name = "mc-core" -version = "4.0.2" +version = "4.1.0-pre0" dependencies = [ "curve25519-dalek", "ed25519-dalek", diff --git a/core/Cargo.toml b/core/Cargo.toml index 9c44f85e33..77b5d57748 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "mc-core" -version = "4.0.2" +version = "4.1.0-pre0" authors = ["MobileCoin"] edition = "2021" description = "MobileCoin Core Library" @@ -26,7 +26,7 @@ slip10_ed25519 = { version = "0.1", optional = true } tiny-bip39 = { version = "1.0", optional = true } zeroize = { version = "1.5", default-features = false } -mc-core-types = { path = "./types" } +mc-core-types = { path = "./types", default-features = false } mc-crypto-hashes = { path = "../crypto/hashes", default-features = false } mc-crypto-keys = { path = "../crypto/keys", default-features = false } diff --git a/crypto/memo-mac/Cargo.toml b/crypto/memo-mac/Cargo.toml index 447942c58f..8c9fbb3ddf 100644 --- a/crypto/memo-mac/Cargo.toml +++ b/crypto/memo-mac/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "mc-crypto-memo-mac" -version = "4.0.2" +version = "4.1.0-pre0" authors = ["MobileCoin"] edition = "2021" diff --git a/crypto/ring-signature/Cargo.toml b/crypto/ring-signature/Cargo.toml index f194bdb118..c7ce3abbe5 100644 --- a/crypto/ring-signature/Cargo.toml +++ b/crypto/ring-signature/Cargo.toml @@ -32,7 +32,7 @@ mc-crypto-hashes = { path = "../../crypto/hashes", default-features = false } mc-crypto-keys = { path = "../../crypto/keys", default-features = false } mc-util-from-random = { path = "../../util/from-random" } mc-util-repr-bytes = { path = "../../util/repr-bytes", default-features = false } -mc-util-serial = { path = "../../util/serial", optional = true } +mc-util-serial = { path = "../../util/serial", optional = true, default_features = false } # Enable all default features not known to break code coverage builds proptest = { version = "1.1", default-features = false, features = ["default-code-coverage"], optional = true } diff --git a/fog/ingest/enclave/trusted/Cargo.lock b/fog/ingest/enclave/trusted/Cargo.lock index 13a4287e7e..6d941710a1 100644 --- a/fog/ingest/enclave/trusted/Cargo.lock +++ b/fog/ingest/enclave/trusted/Cargo.lock @@ -880,7 +880,7 @@ dependencies = [ [[package]] name = "mc-core" -version = "4.0.2" +version = "4.1.0-pre0" dependencies = [ "curve25519-dalek", "ed25519-dalek", diff --git a/fog/ledger/enclave/trusted/Cargo.lock b/fog/ledger/enclave/trusted/Cargo.lock index 869b70098d..01294b3210 100644 --- a/fog/ledger/enclave/trusted/Cargo.lock +++ b/fog/ledger/enclave/trusted/Cargo.lock @@ -839,7 +839,7 @@ dependencies = [ [[package]] name = "mc-core" -version = "4.0.2" +version = "4.1.0-pre0" dependencies = [ "curve25519-dalek", "ed25519-dalek", diff --git a/fog/view/enclave/trusted/Cargo.lock b/fog/view/enclave/trusted/Cargo.lock index 51d8e6b80f..5b12dea505 100644 --- a/fog/view/enclave/trusted/Cargo.lock +++ b/fog/view/enclave/trusted/Cargo.lock @@ -880,7 +880,7 @@ dependencies = [ [[package]] name = "mc-core" -version = "4.0.2" +version = "4.1.0-pre0" dependencies = [ "curve25519-dalek", "ed25519-dalek", diff --git a/transaction/builder/Cargo.toml b/transaction/builder/Cargo.toml index 0c9078989b..fa574109f6 100644 --- a/transaction/builder/Cargo.toml +++ b/transaction/builder/Cargo.toml @@ -31,6 +31,7 @@ mc-crypto-ring-signature-signer = { path = "../../crypto/ring-signature/signer", mc-fog-report-validation = { path = "../../fog/report/validation" } mc-transaction-core = { path = "../../transaction/core" } mc-transaction-extra = { path = "../../transaction/extra" } +mc-transaction-summary = { path = "../../transaction/summary" } mc-transaction-types = { path = "../../transaction/types" } mc-util-from-random = { path = "../../util/from-random" } mc-util-serial = { path = "../../util/serial" } diff --git a/transaction/builder/src/transaction_builder.rs b/transaction/builder/src/transaction_builder.rs index a5ddd53ff9..04e1bfb71f 100644 --- a/transaction/builder/src/transaction_builder.rs +++ b/transaction/builder/src/transaction_builder.rs @@ -29,9 +29,9 @@ use mc_transaction_core::{ RevealedTxOutError, Token, TokenId, }; use mc_transaction_extra::{ - SignedContingentInput, SignedContingentInputError, TxOutConfirmationNumber, - TxOutSummaryUnblindingData, UnsignedTx, + SignedContingentInput, SignedContingentInputError, TxOutConfirmationNumber, UnsignedTx, }; +use mc_transaction_summary::TxOutSummaryUnblindingData; use mc_util_from_random::FromRandom; use mc_util_u64_ratio::U64Ratio; use rand_core::{CryptoRng, RngCore}; diff --git a/transaction/extra/Cargo.toml b/transaction/extra/Cargo.toml index 64c32b5e0c..59a976e443 100644 --- a/transaction/extra/Cargo.toml +++ b/transaction/extra/Cargo.toml @@ -29,6 +29,7 @@ mc-crypto-keys = { path = "../../crypto/keys", default-features = false } mc-crypto-ring-signature = { path = "../../crypto/ring-signature" } mc-crypto-ring-signature-signer = { path = "../../crypto/ring-signature/signer" } mc-transaction-core = { path = "../../transaction/core" } +mc-transaction-summary = { path = "../../transaction/summary" } mc-transaction-types = { path = "../../transaction/types" } mc-util-from-random = { path = "../../util/from-random" } mc-util-repr-bytes = { path = "../../util/repr-bytes" } diff --git a/transaction/extra/src/lib.rs b/transaction/extra/src/lib.rs index 086296bd00..1c4d13e0b2 100644 --- a/transaction/extra/src/lib.rs +++ b/transaction/extra/src/lib.rs @@ -16,7 +16,6 @@ mod memo; mod signed_contingent_input; mod tx_out_confirmation_number; mod tx_out_gift_code; -mod tx_summary_unblinding; mod unsigned_tx; pub use memo::{ @@ -32,10 +31,6 @@ pub use signed_contingent_input::{ }; pub use tx_out_confirmation_number::TxOutConfirmationNumber; pub use tx_out_gift_code::TxOutGiftCode; -pub use tx_summary_unblinding::{ - verify_tx_summary, TransactionEntity, TxOutSummaryUnblindingData, TxSummaryStreamingVerifier, - TxSummaryUnblindingData, TxSummaryUnblindingReport, -}; pub use unsigned_tx::UnsignedTx; // Re-export this to help the exported macros work diff --git a/transaction/extra/src/tx_summary_unblinding/data.rs b/transaction/extra/src/tx_summary_unblinding/data.rs deleted file mode 100644 index 2a297a413e..0000000000 --- a/transaction/extra/src/tx_summary_unblinding/data.rs +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation - -use alloc::vec::Vec; -use mc_account_keys::PublicAddress; -use mc_crypto_digestible::Digestible; -use mc_crypto_keys::RistrettoPrivate; -use mc_transaction_core::UnmaskedAmount; -use prost::Message; -use serde::{Deserialize, Serialize}; -use zeroize::Zeroize; - -/// Unblinding data correpsonding to a TxSummary. This reveals the amounts of -/// all inputs and outputs in a way that can be confirmed against the TxSummary. -#[derive(Clone, Deserialize, Digestible, Eq, Message, PartialEq, Serialize, Zeroize)] -pub struct TxSummaryUnblindingData { - /// The block version targetted by the outputs of this Tx - #[prost(uint32, tag = "1")] - pub block_version: u32, - - /// A TxOutSummaryUnblindingData, one for each transaction output - #[prost(message, repeated, tag = "2")] - pub outputs: Vec, - - /// An unmasked amount, one for each transaction input, corresponding to - /// the pseudo-output commitment - #[prost(message, repeated, tag = "3")] - pub inputs: Vec, -} - -/// Unblinding data corresponding to a TxOutSummary. This reveals the amount -/// and, usually, the Public Address to which this TxOut is addressed. -#[derive(Clone, Deserialize, Digestible, Eq, Message, PartialEq, Serialize, Zeroize)] -pub struct TxOutSummaryUnblindingData { - /// An unmasked amount, corresponding to the MaskedAmount field - /// The block vesion appears in the TxSummaryUnblindingData. - #[prost(message, required, tag = "1")] - pub unmasked_amount: UnmaskedAmount, - - /// The public address to which this TxOut is addressed. - /// If this output comes from an SCI then we may not know the public - /// address. - #[prost(message, optional, tag = "2")] - pub address: Option, - - /// The tx_private_key generated for this TxOut. This is an entropy source - /// which introduces randomness into the cryptonote stealth addresses - /// (tx_public_key and tx_target_key) of the TxOut. - /// - /// If this output comes from an SCI then we may not know this. - #[prost(message, optional, tag = "3")] - pub tx_private_key: Option, -} diff --git a/transaction/extra/src/tx_summary_unblinding/mod.rs b/transaction/extra/src/tx_summary_unblinding/mod.rs deleted file mode 100644 index e50fc879a1..0000000000 --- a/transaction/extra/src/tx_summary_unblinding/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation - -mod data; -mod error; -mod report; -mod verifier; - -pub use data::{TxOutSummaryUnblindingData, TxSummaryUnblindingData}; -pub use error::Error; -pub use report::{TransactionEntity, TxSummaryUnblindingReport}; -pub use verifier::{verify_tx_summary, TxSummaryStreamingVerifier}; diff --git a/transaction/extra/src/unsigned_tx.rs b/transaction/extra/src/unsigned_tx.rs index 6480de954f..a9ec143dfb 100644 --- a/transaction/extra/src/unsigned_tx.rs +++ b/transaction/extra/src/unsigned_tx.rs @@ -1,6 +1,5 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation +// Copyright (c) 2018-2023 The MobileCoin Foundation -use crate::{TxOutSummaryUnblindingData, TxSummaryUnblindingData}; use alloc::vec::Vec; use mc_crypto_ring_signature_signer::RingSigner; use mc_transaction_core::{ @@ -11,6 +10,7 @@ use mc_transaction_core::{ tx::{Tx, TxPrefix}, FeeMap, }; +use mc_transaction_summary::{TxOutSummaryUnblindingData, TxSummaryUnblindingData}; use mc_transaction_types::{Amount, BlockVersion, TokenId, TxSummary, UnmaskedAmount}; use rand_core::{CryptoRng, RngCore}; use serde::{Deserialize, Serialize}; diff --git a/transaction/extra/tests/verifier.rs b/transaction/extra/tests/verifier.rs index c17ec518f9..327ba6d5e7 100644 --- a/transaction/extra/tests/verifier.rs +++ b/transaction/extra/tests/verifier.rs @@ -17,7 +17,8 @@ use mc_transaction_core::{ tx::Tx, Amount, BlockVersion, Token, TokenId, }; -use mc_transaction_extra::{verify_tx_summary, TransactionEntity, UnsignedTx}; +use mc_transaction_extra::UnsignedTx; +use mc_transaction_summary::{verify_tx_summary, TransactionEntity}; use mc_util_from_random::FromRandom; use mc_util_serial::encode; use rand::{rngs::StdRng, SeedableRng}; diff --git a/transaction/signer/Cargo.toml b/transaction/signer/Cargo.toml index 764f8ba3c4..c70d679074 100644 --- a/transaction/signer/Cargo.toml +++ b/transaction/signer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "mc-transaction-signer" -version = "4.0.1" +version = "4.1.0-pre0" authors = ["MobileCoin"] edition = "2021" license = "Apache-2.0" @@ -33,6 +33,7 @@ mc-crypto-ring-signature = { path = "../../crypto/ring-signature" } mc-crypto-ring-signature-signer = { path = "../../crypto/ring-signature/signer" } mc-transaction-core = { path = "../../transaction/core" } mc-transaction-extra = { path = "../../transaction/extra" } +mc-transaction-summary = { path = "../../transaction/summary" } mc-util-repr-bytes = { path = "../../util/repr-bytes", default-features = false } mc-util-serial = { path = "../../util/serial", default-features = false } diff --git a/transaction/signer/src/lib.rs b/transaction/signer/src/lib.rs index d3d38e42b1..33503358c9 100644 --- a/transaction/signer/src/lib.rs +++ b/transaction/signer/src/lib.rs @@ -8,13 +8,11 @@ use std::path::Path; use clap::Parser; use log::debug; - -use mc_crypto_keys::RistrettoPublic; -use mc_transaction_extra::TxSummaryUnblindingData; use rand_core::{CryptoRng, OsRng, RngCore}; use serde::{de::DeserializeOwned, Serialize}; use mc_core::keys::TxOutPublic; +use mc_crypto_keys::RistrettoPublic; use mc_crypto_ring_signature_signer::RingSigner; use mc_transaction_core::{ ring_ct::{ @@ -24,6 +22,7 @@ use mc_transaction_core::{ tx::Tx, Amount, TokenId, TxSummary, UnmaskedAmount, }; +use mc_transaction_summary::TxSummaryUnblindingData; pub mod types; use types::*; diff --git a/transaction/signer/src/types.rs b/transaction/signer/src/types.rs index b10ae8ce72..f65a31423d 100644 --- a/transaction/signer/src/types.rs +++ b/transaction/signer/src/types.rs @@ -11,7 +11,7 @@ use mc_transaction_core::{ tx::{Tx, TxPrefix}, BlockVersion, }; -use mc_transaction_extra::TxOutSummaryUnblindingData; +use mc_transaction_summary::TxOutSummaryUnblindingData; use serde::{Deserialize, Serialize}; /// View account credentials produced by a signer implementation diff --git a/transaction/summary/Cargo.toml b/transaction/summary/Cargo.toml new file mode 100644 index 0000000000..60f80d01c0 --- /dev/null +++ b/transaction/summary/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "mc-transaction-summary" +version = "4.1.0-pre0" +authors = ["MobileCoin"] +edition = "2021" +readme = "README.md" + +[features] +std = ["prost?/std"] +serde = [ + "dep:serde", + "mc-account-keys?/serde", + "mc-crypto-keys/serde", + "mc-crypto-ring-signature/serde", + "mc-transaction-types/serde", +] +prost = [ + "dep:prost", + "mc-account-keys?/prost", + "mc-crypto-keys/prost", + "mc-crypto-ring-signature/prost", + "mc-transaction-types/prost", +] + +default = ["std", "serde", "prost", "mc-account-keys"] + +[dependencies] +# External dependencies +displaydoc = { version = "0.2", default-features = false } + +# MobileCoin dependencies +mc-account-keys = { path = "../../account-keys", optional = true, default-features = false } +mc-core = { path = "../../core", default-features = false } +mc-crypto-digestible = { path = "../../crypto/digestible", default-features = false, features = ["dalek", "derive"] } +mc-crypto-keys = { path = "../../crypto/keys", default-features = false } +mc-crypto-ring-signature = { path = "../../crypto/ring-signature", default-features = false } +mc-transaction-types = { path = "../types", default-features = false, features = [ "alloc" ] } +mc-util-vec-map = { path = "../../util/vec-map" } +mc-util-zip-exact = { path = "../../util/zip-exact", default-features = false } + +prost = { version = "0.11", optional = true, default-features = false, features = ["prost-derive"] } +serde = { version = "1.0", optional = true, default-features = false, features = ["derive"] } +subtle = { version = "2.4.1", default-features = false, features = ["i128"] } +zeroize = { version = "1", default-features = false } diff --git a/transaction/summary/README.md b/transaction/summary/README.md new file mode 100644 index 0000000000..e3aa871e23 --- /dev/null +++ b/transaction/summary/README.md @@ -0,0 +1,8 @@ +# mc-transaction-summary + +This crate provides types and implementations for computing transaction summaries and associated reports, used in the calculation of the extended message digest for construction of transactions in block versions >= 3 and allowing hardware devices or offline-signers to generate reports to verify the contents of a transaction prior to signing. + +A high-level `verify_tx_summary` function is provided to check the a summary against a transaction using `TxSummaryUnblindingData` and `TxOutSummaryUnblindingData` types, avaialable under the `default` or `mc-account-keys` features. + +A `no_std` compatible streaming interface `TxSummaryStreamingVerifierCtx` for verifying transaction summaries and generating associated reports, for use in memory-constrained contexts such as hardware wallets. + diff --git a/transaction/summary/src/data.rs b/transaction/summary/src/data.rs new file mode 100644 index 0000000000..04c569af64 --- /dev/null +++ b/transaction/summary/src/data.rs @@ -0,0 +1,142 @@ +// Copyright (c) 2018-2023 The MobileCoin Foundation + +use alloc::vec::Vec; + +use super::{Error, TxSummaryUnblindingReport}; +use crate::TxSummaryStreamingVerifierCtx; +use mc_account_keys::PublicAddress; +use mc_core::account::ShortAddressHash; +use mc_crypto_digestible::Digestible; +use mc_crypto_keys::RistrettoPrivate; +use mc_transaction_types::{Amount, TxSummary, UnmaskedAmount}; +use mc_util_zip_exact::zip_exact; +#[cfg(feature = "prost")] +use prost::Message; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use zeroize::Zeroize; + +/// Unblinding data correpsonding to a TxSummary. This reveals the amounts of +/// all inputs and outputs in a way that can be confirmed against the TxSummary. +#[derive(Clone, Digestible, Eq, PartialEq, Zeroize)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +#[cfg_attr(feature = "prost", derive(Message))] +#[cfg_attr(not(feature = "prost"), derive(Debug))] +pub struct TxSummaryUnblindingData { + /// The block version targetted by the outputs of this Tx + #[cfg_attr(feature = "prost", prost(uint32, tag = "1"))] + pub block_version: u32, + + /// A TxOutSummaryUnblindingData, one for each transaction output + #[cfg_attr(feature = "prost", prost(message, repeated, tag = "2"))] + pub outputs: Vec, + + /// An unmasked amount, one for each transaction input, corresponding to + /// the pseudo-output commitment + #[cfg_attr(feature = "prost", prost(message, repeated, tag = "3"))] + pub inputs: Vec, +} + +/// Unblinding data corresponding to a TxOutSummary. This reveals the amount +/// and, usually, the Public Address to which this TxOut is addressed. +#[derive(Clone, Digestible, Eq, PartialEq, Zeroize)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +#[cfg_attr(feature = "prost", derive(Message))] +#[cfg_attr(not(feature = "prost"), derive(Debug))] +pub struct TxOutSummaryUnblindingData { + /// An unmasked amount, corresponding to the MaskedAmount field + /// The block vesion appears in the TxSummaryUnblindingData. + #[cfg_attr(feature = "prost", prost(message, required, tag = "1"))] + pub unmasked_amount: UnmaskedAmount, + + /// The public address to which this TxOut is addressed. + /// If this output comes from an SCI then we may not know the public + /// address. + #[cfg_attr(feature = "prost", prost(message, optional, tag = "2"))] + pub address: Option, + + /// The tx_private_key generated for this TxOut. This is an entropy source + /// which introduces randomness into the cryptonote stealth addresses + /// (tx_public_key and tx_target_key) of the TxOut. + /// + /// If this output comes from an SCI then we may not know this. + #[cfg_attr(feature = "prost", prost(message, optional, tag = "3"))] + pub tx_private_key: Option, +} + +/// Exercise the functionality of the streaming verifier, and return its +/// results. +/// +/// This is mainly useful for testing / demonstration purposes, since the more +/// interesting use-case is when the streaming verifier is on a small remote +/// device and doesn't have the full TxSummary or TxSummaryUnblindingData on +/// hand. +pub fn verify_tx_summary( + extended_message_digest: &[u8; 32], + tx_summary: &TxSummary, + unblinding_data: &TxSummaryUnblindingData, + view_private_key: RistrettoPrivate, +) -> Result<([u8; 32], TxSummaryUnblindingReport), Error> { + let mut verifier = TxSummaryStreamingVerifierCtx::new( + extended_message_digest, + unblinding_data.block_version.try_into()?, + tx_summary.outputs.len(), + tx_summary.inputs.len(), + view_private_key, + ); + let mut report = TxSummaryUnblindingReport::default(); + + for (tx_out_summary, tx_out_unblinding_data) in + zip_exact(tx_summary.outputs.iter(), unblinding_data.outputs.iter())? + { + let TxOutSummaryUnblindingData { + unmasked_amount, + address, + tx_private_key, + } = tx_out_unblinding_data; + let address = address.as_ref().map(|v| (ShortAddressHash::from(v), v)); + + verifier.digest_output( + tx_out_summary, + unmasked_amount, + address, + tx_private_key.as_ref(), + &mut report, + )?; + } + for (tx_in_summary, tx_in_unblinding_data) in + zip_exact(tx_summary.inputs.iter(), unblinding_data.inputs.iter())? + { + verifier.digest_input(tx_in_summary, tx_in_unblinding_data, &mut report)?; + } + + let mut digest = [0u8; 32]; + verifier.finalize( + Amount::new(tx_summary.fee, tx_summary.fee_token_id.into()), + tx_summary.tombstone_block, + &mut digest, + &mut report, + ); + + // In a debug build, confirm the digest by computing it in a non-streaming way + // + // Note: this needs to be kept in sync with the compute_mlsag_signing_digest + // function in transaction_core::ring_ct::rct_bulletproofs + #[cfg(debug)] + { + let mut transcript = + MerlinTranscript::new(EXTENDED_MESSAGE_AND_TX_SUMMARY_DOMAIN_TAG.as_bytes()); + extended_message.append_to_transcript(b"extended_message", &mut transcript); + tx_summary.append_to_transcript(b"tx_summary", &mut transcript); + + // Extract digest + let mut output = [0u8; 32]; + transcript.extract_digest(&mut output); + + assert_eq!( + output, digest, + "streaming verifier did not compute correct digest" + ); + } + Ok((digest, report)) +} diff --git a/transaction/extra/src/tx_summary_unblinding/error.rs b/transaction/summary/src/error.rs similarity index 100% rename from transaction/extra/src/tx_summary_unblinding/error.rs rename to transaction/summary/src/error.rs diff --git a/transaction/summary/src/lib.rs b/transaction/summary/src/lib.rs new file mode 100644 index 0000000000..cf6de2c0ba --- /dev/null +++ b/transaction/summary/src/lib.rs @@ -0,0 +1,22 @@ +// Copyright (c) 2018-2023 The MobileCoin Foundation + +// Copyright (c) 2018-2022 The MobileCoin Foundation + +#![no_std] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +extern crate alloc; + +#[cfg(feature = "mc-account-keys")] +mod data; +mod error; +mod report; +mod verifier; + +#[cfg(feature = "mc-account-keys")] +pub use data::{verify_tx_summary, TxOutSummaryUnblindingData, TxSummaryUnblindingData}; + +pub use error::Error; +pub use report::{TransactionEntity, TxSummaryUnblindingReport}; +pub use verifier::TxSummaryStreamingVerifierCtx; diff --git a/transaction/extra/src/tx_summary_unblinding/report.rs b/transaction/summary/src/report.rs similarity index 89% rename from transaction/extra/src/tx_summary_unblinding/report.rs rename to transaction/summary/src/report.rs index 540fd974d8..c81d915864 100644 --- a/transaction/extra/src/tx_summary_unblinding/report.rs +++ b/transaction/summary/src/report.rs @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation +// Copyright (c) 2018-2023 The MobileCoin Foundation //! A TxSummaryUnblindingReport, containing the set of verified information //! about a transaction. @@ -6,6 +6,7 @@ use super::Error; use core::fmt::Display; use displaydoc::Display; + use mc_core::account::ShortAddressHash; use mc_transaction_types::{ constants::{MAX_INPUTS, MAX_OUTPUTS}, @@ -25,23 +26,23 @@ pub enum TransactionEntity { Swap, } -const MAX_RECORDS: usize = MAX_OUTPUTS as usize + MAX_INPUTS as usize; +pub const MAX_RECORDS: usize = MAX_OUTPUTS as usize + MAX_INPUTS as usize; /// A report of the parties and balance changes due to a transaction. /// This can be produced for a given TxSummary and TxSummaryUnblindingData. #[derive(Clone, Debug, Default)] -pub struct TxSummaryUnblindingReport { +pub struct TxSummaryUnblindingReport { /// The set of balance changes that we have observed // Note: We can save about 210 bytes on the stack if we store TokenId as // a [u8; 8] to avoid alignment requirements. TBD if that's worth it. - pub balance_changes: VecMap<(TransactionEntity, TokenId), i64, MAX_RECORDS>, + pub balance_changes: VecMap<(TransactionEntity, TokenId), i64, RECORDS>, /// The network fee that we pay pub network_fee: Amount, /// The tombstone block associated to this transaction pub tombstone_block: u64, } -impl TxSummaryUnblindingReport { +impl TxSummaryUnblindingReport { /// Add value to the balance report, for some entity pub fn balance_add( &mut self, @@ -82,7 +83,7 @@ impl TxSummaryUnblindingReport { // This is a proof-of-concept, it doesn't map token id's to their symbol when // displaying. -impl Display for TxSummaryUnblindingReport { +impl Display for TxSummaryUnblindingReport { fn fmt(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result { let mut current_entity = None; for ((entity, tok), val) in self.balance_changes.iter() { @@ -90,7 +91,7 @@ impl Display for TxSummaryUnblindingReport { writeln!(formatter, "{entity}:")?; current_entity = Some(entity.clone()); } - writeln!(formatter, "\t{}: {val}", *tok)?; + writeln!(formatter, "\t{}: {}", *tok, val)?; } writeln!( formatter, diff --git a/transaction/extra/src/tx_summary_unblinding/verifier.rs b/transaction/summary/src/verifier.rs similarity index 74% rename from transaction/extra/src/tx_summary_unblinding/verifier.rs rename to transaction/summary/src/verifier.rs index 4e143f5d6c..2c028f03bb 100644 --- a/transaction/extra/src/tx_summary_unblinding/verifier.rs +++ b/transaction/summary/src/verifier.rs @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation +// Copyright (c) 2018-2023 The MobileCoin Foundation //! This module provides support for a "streaming" verifier which consumes an //! extended-message digest, a TxSummary and a TxSummaryUnblindingData, @@ -10,10 +10,7 @@ //! To take the largest "step" (verifying an output) requires //! approximately 300 bytes + Fog url length -use super::{ - Error, TransactionEntity, TxOutSummaryUnblindingData, TxSummaryUnblindingData, - TxSummaryUnblindingReport, -}; +use super::{Error, TransactionEntity, TxSummaryUnblindingReport}; use mc_core::account::{RingCtAddress, ShortAddressHash}; use mc_crypto_digestible::{DigestTranscript, Digestible, MerlinTranscript}; use mc_crypto_keys::{RistrettoPrivate, RistrettoPublic}; @@ -23,81 +20,11 @@ use mc_crypto_ring_signature::{ }; use mc_transaction_types::{ domain_separators::EXTENDED_MESSAGE_AND_TX_SUMMARY_DOMAIN_TAG, Amount, AmountError, - BlockVersion, MaskedAmount, TxInSummary, TxOutSummary, TxSummary, UnmaskedAmount, + BlockVersion, MaskedAmount, TxInSummary, TxOutSummary, UnmaskedAmount, }; -use mc_util_zip_exact::zip_exact; - -/// Exercise the functionality of the streaming verifier, and return its -/// results. -/// -/// This is mainly useful for testing / demonstration purposes, since the more -/// interesting use-case is when the streaming verifier is on a small remote -/// device and doesn't have the full TxSummary or TxSummaryUnblindingData on -/// hand. -pub fn verify_tx_summary( - extended_message_digest: &[u8; 32], - tx_summary: &TxSummary, - unblinding_data: &TxSummaryUnblindingData, - view_private_key: RistrettoPrivate, -) -> Result<([u8; 32], TxSummaryUnblindingReport), Error> { - let mut verifier = TxSummaryStreamingVerifier::new( - extended_message_digest, - unblinding_data.block_version.try_into()?, - tx_summary.outputs.len(), - tx_summary.inputs.len(), - view_private_key, - ); - for (tx_out_summary, tx_out_unblinding_data) in - zip_exact(tx_summary.outputs.iter(), unblinding_data.outputs.iter())? - { - let TxOutSummaryUnblindingData { - unmasked_amount, - address, - tx_private_key, - } = tx_out_unblinding_data; - let address = address.as_ref().map(|v| (ShortAddressHash::from(v), v)); - - verifier.digest_output( - tx_out_summary, - unmasked_amount, - address, - tx_private_key.as_ref(), - )?; - } - for (tx_in_summary, tx_in_unblinding_data) in - zip_exact(tx_summary.inputs.iter(), unblinding_data.inputs.iter())? - { - verifier.digest_input(tx_in_summary, tx_in_unblinding_data)?; - } - let (digest, report) = verifier.finalize( - Amount::new(tx_summary.fee, tx_summary.fee_token_id.into()), - tx_summary.tombstone_block, - )?; - - // In a debug build, confirm the digest by computing it in a non-streaming way - // - // Note: this needs to be kept in sync with the compute_mlsag_signing_digest - // function in transaction_core::ring_ct::rct_bulletproofs - #[cfg(debug)] - { - let mut transcript = - MerlinTranscript::new(EXTENDED_MESSAGE_AND_TX_SUMMARY_DOMAIN_TAG.as_bytes()); - extended_message.append_to_transcript(b"extended_message", &mut transcript); - tx_summary.append_to_transcript(b"tx_summary", &mut transcript); - - // Extract digest - let mut output = [0u8; 32]; - transcript.extract_digest(&mut output); - assert_eq!( - output, digest, - "streaming verifier did not compute correct digest" - ); - } - Ok((digest, report)) -} - -/// An object intended for hardware wallets to use, with a dual purpose. +/// A streaming transaction summary verifier for use in hardware wallets, +/// with a dual purpose. /// /// * Compute the "extended-message-and-tx-summary" digest in a "streaming" way /// that does not require sending the entire TxSummary at once. Only one input @@ -108,6 +35,9 @@ pub fn verify_tx_summary( /// change for this party. Return the TxSummaryUnblindingReport along with the /// final digest, which is fully-verified to be accurate. /// +/// This streaming interface is provided for use by hardware wallets, +/// most users should use the higher-level [verify_tx_summary] +/// /// The TxSummaryUnblindingReport can be displayed to the hardware wallet user, /// and then if they approve, Ring MLSAGs can be signed over the digest produced /// by this verifier, knowing what the significance of signing these is. @@ -117,7 +47,7 @@ pub fn verify_tx_summary( /// implementation details of the mc-crypto-digestible scheme. /// If TxSummary digestible annotations are changed then this object's /// implementation needs to change also. -pub struct TxSummaryStreamingVerifier { +pub struct TxSummaryStreamingVerifierCtx { // The account view private key of the transaction signer. // This is used to identify outputs addressed to ourselves regardless of subaddress view_private_key: RistrettoPrivate, @@ -126,9 +56,6 @@ pub struct TxSummaryStreamingVerifier { // The merlin transcript which we maintain in order to produce the digest // at the end. transcript: MerlinTranscript, - // The report which we produce about what balance changes occur for what - // parties - report: TxSummaryUnblindingReport, // The total number of outputs expected expected_num_outputs: usize, // The total number of inputs expected @@ -139,7 +66,7 @@ pub struct TxSummaryStreamingVerifier { input_count: usize, } -impl TxSummaryStreamingVerifier { +impl TxSummaryStreamingVerifierCtx { /// Start a new streaming verifier. This takes a few small arguments from /// TxSummary and TxSummaryUnblindingData which are needed before we can /// consume outputs and inputs. This also takes the view private key of @@ -170,13 +97,10 @@ impl TxSummaryStreamingVerifier { // Append start of TxSummary.outputs list transcript.append_seq_header(b"outputs", expected_num_outputs); - // Default initialize the report - let report = TxSummaryUnblindingReport::default(); Self { view_private_key, block_version, transcript, - report, expected_num_outputs, expected_num_inputs, output_count: 0, @@ -186,12 +110,13 @@ impl TxSummaryStreamingVerifier { /// Stream the next TxOutSummary and matching unblinding data to the /// streaming verifier, which will verify and then digest it. - pub fn digest_output( + pub fn digest_output( &mut self, tx_out_summary: &TxOutSummary, unmasked_amount: &UnmaskedAmount, address: Option<(ShortAddressHash, impl RingCtAddress)>, tx_private_key: Option<&RistrettoPrivate>, + report: &mut TxSummaryUnblindingReport, ) -> Result<(), Error> { if self.output_count >= self.expected_num_outputs { return Err(Error::UnexpectedOutput); @@ -201,8 +126,7 @@ impl TxSummaryStreamingVerifier { // with the listed address, or this is associated to an SCI. if let Some(amount) = self.view_key_match(tx_out_summary)? { // If we view-key matched the output, then it belongs to one of our subaddresses - self.report - .balance_add(TransactionEntity::Ourself, amount.token_id, amount.value)?; + report.balance_add(TransactionEntity::Ourself, amount.token_id, amount.value)?; } else if let Some((address_hash, address)) = address.as_ref() { let amount = Amount::new(unmasked_amount.value, unmasked_amount.token_id.into()); // In this case, we are given the address of who is supposed to have received @@ -212,7 +136,7 @@ impl TxSummaryStreamingVerifier { let expected = Self::expected_tx_out_summary(self.block_version, amount, address, tx_private_key)?; if &expected == tx_out_summary { - self.report.balance_add( + report.balance_add( TransactionEntity::Address(address_hash.clone()), amount.token_id, amount.value, @@ -241,8 +165,7 @@ impl TxSummaryStreamingVerifier { { return Err(Error::AmountVerificationFailed); } - self.report - .balance_add(TransactionEntity::Swap, token_id.into(), value)?; + report.balance_add(TransactionEntity::Swap, token_id.into(), value)?; } // We've now verified the tx_out_summary and added it to the report. @@ -263,10 +186,11 @@ impl TxSummaryStreamingVerifier { /// Stream the next TxInSummary and matching unblinding data to the /// streaming verifier, which will verify and then digest it. - pub fn digest_input( + pub fn digest_input( &mut self, tx_in_summary: &TxInSummary, tx_in_summary_unblinding_data: &UnmaskedAmount, + report: &mut TxSummaryUnblindingReport, ) -> Result<(), Error> { if self.output_count != self.expected_num_outputs { return Err(Error::StillExpectingMoreOutputs); @@ -293,8 +217,7 @@ impl TxSummaryStreamingVerifier { TransactionEntity::Swap }; - self.report - .balance_subtract(entity, token_id.into(), value)?; + report.balance_subtract(entity, token_id.into(), value)?; // We've now verified the tx_in_summary and added it to the report. // Now we need to add it to the digest @@ -316,21 +239,16 @@ impl TxSummaryStreamingVerifier { /// * extended-message-and-tx-summary digest /// * TxSummaryUnblindingReport, which details all balance changes for all /// parties to this Tx. - pub fn finalize( + pub fn finalize( mut self, fee: Amount, tombstone_block: u64, - ) -> Result<([u8; 32], TxSummaryUnblindingReport), Error> { - if self.output_count != self.expected_num_outputs { - return Err(Error::StillExpectingMoreOutputs); - } - if self.input_count != self.expected_num_inputs { - return Err(Error::StillExpectingMoreInputs); - } - - self.report.network_fee = fee; - self.report.tombstone_block = tombstone_block; - self.report.sort(); + digest: &mut [u8; 32], + report: &mut TxSummaryUnblindingReport, + ) { + report.network_fee = fee; + report.tombstone_block = tombstone_block; + report.sort(); fee.value.append_to_transcript(b"fee", &mut self.transcript); (*fee.token_id).append_to_transcript(b"fee_token_id", &mut self.transcript); @@ -341,10 +259,7 @@ impl TxSummaryStreamingVerifier { .append_agg_closer(b"tx_summary", b"TxSummary"); // Extract the digest - let mut digest = [0u8; 32]; - self.transcript.extract_digest(&mut digest); - - Ok((digest, self.report)) + self.transcript.extract_digest(digest); } // Internal: Check if TxOutSummary matches to our view private key @@ -409,7 +324,13 @@ mod tests { // Test the size of the streaming verifier on the stack. This is using heapless. #[test] fn test_streaming_verifier_size() { - assert_eq!(core::mem::size_of::(), 1600); + let s = core::mem::size_of::(); + assert!( + s < 512, + "TxSummaryStreamingVerifierCtx exceeds size thresold {}/{}", + s, + 512 + ); } // Note: Most tests are in transaction/extra/tests to avoid build issues. diff --git a/transaction/types/src/amount/error.rs b/transaction/types/src/amount/error.rs index 7b8e0c7af0..c90627d39b 100644 --- a/transaction/types/src/amount/error.rs +++ b/transaction/types/src/amount/error.rs @@ -3,10 +3,12 @@ //! Errors that can occur when handling an amount commitment. use displaydoc::Display; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; /// An error which can occur when handling an amount commitment. -#[derive(Clone, Debug, Deserialize, Display, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +#[derive(Clone, Debug, Display, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum AmountError { /** * The masked value, token id, or shared secret are not consistent with diff --git a/util/vec-map/Cargo.toml b/util/vec-map/Cargo.toml index 9d0aa983f3..1ee04b88b1 100644 --- a/util/vec-map/Cargo.toml +++ b/util/vec-map/Cargo.toml @@ -9,5 +9,5 @@ readme = "README.md" rust-version = { workspace = true } [dependencies] -displaydoc = "0.2" +displaydoc = { version = "0.2", default-features = false } heapless = { version = "0.7.16", default-features = false } diff --git a/util/vec-map/src/lib.rs b/util/vec-map/src/lib.rs index e755806aca..6a915b6ce4 100644 --- a/util/vec-map/src/lib.rs +++ b/util/vec-map/src/lib.rs @@ -102,6 +102,19 @@ impl VecMap { pub fn iter(&self) -> IterVecMap { IterVecMap::new(self) } + + /// Fetch a (K, V) item by index + #[inline] + pub fn index(&self, index: usize) -> Option<(&K, &V)> { + if index + 1 > self.len() { + return None; + } + + match (self.keys.get(index), self.values.get(index)) { + (Some(k), Some(v)) => Some((k, v)), + _ => None, + } + } } // Sorting is possible when keys are ordered, and keys and values are cloneable @@ -110,7 +123,7 @@ impl VecMap { pub fn sort(&mut self) { // First compute the order that would sort the set of keys let mut indices: Vec = (0..self.keys.len()).collect(); - indices.sort_by_key(|&i| &self.keys[i]); + indices.sort_unstable_by_key(|&i| &self.keys[i]); // Make new key and val sets let mut new_keys = Vec::::default(); let mut new_vals = Vec::::default(); diff --git a/util/zip-exact/Cargo.toml b/util/zip-exact/Cargo.toml index 6f1ffa3ba7..8dd468fbc8 100644 --- a/util/zip-exact/Cargo.toml +++ b/util/zip-exact/Cargo.toml @@ -8,5 +8,8 @@ license = "Apache-2.0" readme = "README.md" rust-version = { workspace = true } +[features] +default = ["serde"] + [dependencies] -serde = { version = "1.0", default-features = false } +serde = { version = "1.0", default-features = false, optional = true } diff --git a/util/zip-exact/src/lib.rs b/util/zip-exact/src/lib.rs index 2792ed6cbc..d1df4f62c9 100644 --- a/util/zip-exact/src/lib.rs +++ b/util/zip-exact/src/lib.rs @@ -9,6 +9,7 @@ use core::{ fmt::{self, Debug, Display}, iter::Zip, }; +#[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; /// An alternate version of `Iterator::zip` which returns an error if the two @@ -30,7 +31,8 @@ where } /// An error that occurs when zip_exact fails -#[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ZipExactError(usize, usize); impl Display for ZipExactError { From 31455afa9d2b15c316a5bfb02c1120eaf6601c10 Mon Sep 17 00:00:00 2001 From: Eran Rundstein Date: Tue, 4 Apr 2023 20:28:25 -0700 Subject: [PATCH 29/46] include necessary optional dep when the serde feature is enabled (#3303) --- crypto/keys/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/keys/Cargo.toml b/crypto/keys/Cargo.toml index 2c519af14c..67777ee32b 100644 --- a/crypto/keys/Cargo.toml +++ b/crypto/keys/Cargo.toml @@ -10,9 +10,9 @@ rust-version = { workspace = true } [features] alloc = ["base64/alloc", "curve25519-dalek/alloc", "ed25519-dalek/alloc", "mc-crypto-digestible/alloc", "mc-crypto-digestible-signature/alloc", "mc-util-repr-bytes/alloc"] -serde = ["dep:serde", "ed25519/serde", "curve25519-dalek/serde", "ed25519-dalek/serde", "mc-util-repr-bytes/serde"] +serde = ["dep:serde", "dep:mc-util-serial", "ed25519/serde", "curve25519-dalek/serde", "ed25519-dalek/serde", "mc-util-repr-bytes/serde"] prost = ["alloc", "mc-util-repr-bytes/prost"] -default = ["alloc", "serde", "prost", "mc-util-repr-bytes/default", "curve25519-dalek/default", "mc-util-serial"] +default = ["alloc", "serde", "prost", "mc-util-repr-bytes/default", "curve25519-dalek/default", "dep:mc-util-serial"] [dependencies] From 3492fff2cf47983527a0c01c5d4963c664b5d08f Mon Sep 17 00:00:00 2001 From: Millie C Date: Thu, 6 Apr 2023 21:06:17 -0400 Subject: [PATCH 30/46] Merge in suggested changes --- consensus/service/src/api/client_api_service.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 4a0d97f8d4..ceead36a12 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -64,9 +64,9 @@ impl ClientSessionTracking { /// Remove any transaction proposal failure record that is older than our /// tracking window. - fn clear_stale_records(&mut self, now: &Instant, tracking_window: &Duration) { + fn clear_stale_records(&mut self, now: Instant, tracking_window: Duration) { self.tx_proposal_failures.retain(|past_failure| { - now.saturating_duration_since(*past_failure) <= *tracking_window + now.saturating_duration_since(*past_failure) <= tracking_window }); } @@ -83,9 +83,9 @@ impl ClientSessionTracking { /// of an individual tx proposal failure incident for? Any records which /// have existed for longer than this value will be dropped when this /// method is called. - pub fn fail_tx_proposal(&mut self, now: &Instant, tracking_window: &Duration) -> usize { + pub fn fail_tx_proposal(&mut self, now: Instant, tracking_window: Duration) -> usize { self.clear_stale_records(now, tracking_window); - self.tx_proposal_failures.push_back(*now); + self.tx_proposal_failures.push_back(now); self.tx_proposal_failures.len() } } @@ -362,7 +362,7 @@ impl ConsensusClientApi for ClientApiService { .get_mut(&session_id) .expect("Adding session-tracking record should be atomic.") }; - record.fail_tx_proposal(&Instant::now(), &self.config.tx_failure_window); + record.fail_tx_proposal(Instant::now(), self.config.tx_failure_window); } result.or_else(ConsensusGrpcError::into) }; From be4fd1353f149e473c0bc5665012de90b2f4da34 Mon Sep 17 00:00:00 2001 From: Millie C Date: Thu, 6 Apr 2023 21:27:27 -0400 Subject: [PATCH 31/46] cargo fmt --- consensus/service/src/api/client_api_service.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index ceead36a12..1d1dac2ed4 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -65,9 +65,8 @@ impl ClientSessionTracking { /// Remove any transaction proposal failure record that is older than our /// tracking window. fn clear_stale_records(&mut self, now: Instant, tracking_window: Duration) { - self.tx_proposal_failures.retain(|past_failure| { - now.saturating_duration_since(*past_failure) <= tracking_window - }); + self.tx_proposal_failures + .retain(|past_failure| now.saturating_duration_since(*past_failure) <= tracking_window); } /// Push a new failed tx proposal record, clear out samples older than From 765127615628e590b73f34193cbb51cb6174dc50 Mon Sep 17 00:00:00 2001 From: wjuan-mob Date: Thu, 6 Apr 2023 13:02:37 -0400 Subject: [PATCH 32/46] Refactor out a shared compute authenticated sender and computer destination function (#3275) * Refactor compute authenticated sender memo and compute destination memo. Add new AuthenticatedSenderWithData and DestinationWithData memo types per MCIP#60 Clean up match statement and add new memo with data types to sample paykit memo handler Removing with data types due to discussion on mcip/60 Update dependencies to get cd to pass Update dependencies to get cd to pass * Refactor compute authenticated sender memo and compute destination memo. Add new AuthenticatedSenderWithData and DestinationWithData memo types per MCIP#60 Clean up match statement and add new memo with data types to sample paykit memo handler Removing with data types due to discussion on mcip/60 Update dependencies to get cd to pass Update dependencies to get cd to pass --- .../src/cached_tx_data/memo_handler.rs | 44 +++++++++---------- transaction/extra/src/lib.rs | 8 ++-- .../extra/src/memo/authenticated_common.rs | 35 ++++++++++++++- .../extra/src/memo/authenticated_sender.rs | 27 ++++-------- ...enticated_sender_with_payment_intent_id.rs | 27 +++++------- ...nticated_sender_with_payment_request_id.rs | 28 +++++------- transaction/extra/src/memo/destination.rs | 34 +++++++++++--- .../destination_with_payment_intent_id.rs | 20 +++++---- .../destination_with_payment_request_id.rs | 20 +++++---- transaction/extra/src/memo/mod.rs | 30 ++++++------- 10 files changed, 152 insertions(+), 121 deletions(-) diff --git a/fog/sample-paykit/src/cached_tx_data/memo_handler.rs b/fog/sample-paykit/src/cached_tx_data/memo_handler.rs index 2ad43a449f..5b950d25aa 100644 --- a/fog/sample-paykit/src/cached_tx_data/memo_handler.rs +++ b/fog/sample-paykit/src/cached_tx_data/memo_handler.rs @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation +// Copyright (c) 2018-2023 The MobileCoin Foundation //! A memo handler object which processes memos, for use in integration tests @@ -86,7 +86,7 @@ impl MemoHandler { Err(MemoHandlerError::UnknownSender) } } - MemoType::AuthenticatedSenderWithPaymentRequestId(memo) => { + MemoType::AuthenticatedSenderWithPaymentIntentId(memo) => { if let Some(addr) = self.contacts.get(&memo.sender_address_hash()) { if bool::from(memo.validate( addr, @@ -101,26 +101,7 @@ impl MemoHandler { Err(MemoHandlerError::UnknownSender) } } - MemoType::Destination(_) => { - if subaddress_matches_tx_out(account_key, CHANGE_SUBADDRESS_INDEX, tx_out)? { - Ok(Some(memo_type)) - } else { - Err(MemoHandlerError::FailedSubaddressValidation) - } - } - MemoType::GiftCodeCancellation(_) => { - // TODO: Add Gift Code Memo Validation - Ok(Some(memo_type)) - } - MemoType::GiftCodeFunding(_) => { - // TODO: Add Gift Code Memo Validation - Ok(Some(memo_type)) - } - MemoType::GiftCodeSender(_) => { - // TODO: Add Gift Code Validation - Ok(Some(memo_type)) - } - MemoType::AuthenticatedSenderWithPaymentIntentId(memo) => { + MemoType::AuthenticatedSenderWithPaymentRequestId(memo) => { if let Some(addr) = self.contacts.get(&memo.sender_address_hash()) { if bool::from(memo.validate( addr, @@ -135,6 +116,13 @@ impl MemoHandler { Err(MemoHandlerError::UnknownSender) } } + MemoType::Destination(_) => { + if subaddress_matches_tx_out(account_key, CHANGE_SUBADDRESS_INDEX, tx_out)? { + Ok(Some(memo_type)) + } else { + Err(MemoHandlerError::FailedSubaddressValidation) + } + } MemoType::DestinationWithPaymentRequestId(_) => { if subaddress_matches_tx_out(account_key, CHANGE_SUBADDRESS_INDEX, tx_out)? { Ok(Some(memo_type)) @@ -156,6 +144,18 @@ impl MemoHandler { Err(MemoHandlerError::FailedSubaddressValidation) } } + MemoType::GiftCodeCancellation(_) => { + // TODO: Add Gift Code Memo Validation + Ok(Some(memo_type)) + } + MemoType::GiftCodeFunding(_) => { + // TODO: Add Gift Code Memo Validation + Ok(Some(memo_type)) + } + MemoType::GiftCodeSender(_) => { + // TODO: Add Gift Code Validation + Ok(Some(memo_type)) + } } } } diff --git a/transaction/extra/src/lib.rs b/transaction/extra/src/lib.rs index 1c4d13e0b2..65157d283f 100644 --- a/transaction/extra/src/lib.rs +++ b/transaction/extra/src/lib.rs @@ -19,10 +19,10 @@ mod tx_out_gift_code; mod unsigned_tx; pub use memo::{ - AuthenticatedSenderMemo, AuthenticatedSenderWithPaymentIntentIdMemo, - AuthenticatedSenderWithPaymentRequestIdMemo, BurnRedemptionMemo, DefragmentationMemo, - DefragmentationMemoError, DestinationMemo, DestinationMemoError, - DestinationWithPaymentIntentIdMemo, DestinationWithPaymentRequestIdMemo, + compute_authenticated_sender_memo, compute_destination_memo, AuthenticatedSenderMemo, + AuthenticatedSenderWithPaymentIntentIdMemo, AuthenticatedSenderWithPaymentRequestIdMemo, + BurnRedemptionMemo, DefragmentationMemo, DefragmentationMemoError, DestinationMemo, + DestinationMemoError, DestinationWithPaymentIntentIdMemo, DestinationWithPaymentRequestIdMemo, GiftCodeCancellationMemo, GiftCodeFundingMemo, GiftCodeSenderMemo, MemoDecodingError, MemoType, RegisteredMemoType, SenderMemoCredential, UnusedMemo, }; diff --git a/transaction/extra/src/memo/authenticated_common.rs b/transaction/extra/src/memo/authenticated_common.rs index 6abb3366af..54bd69fd1a 100644 --- a/transaction/extra/src/memo/authenticated_common.rs +++ b/transaction/extra/src/memo/authenticated_common.rs @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation +// Copyright (c) 2018-2023 The MobileCoin Foundation //! HMAC code shared by all category 0x01 memos. //! @@ -7,10 +7,14 @@ use hmac::{Hmac, Mac}; use mc_account_keys::{PublicAddress, ShortAddressHash}; -use mc_crypto_keys::{CompressedRistrettoPublic, KexReusablePrivate, RistrettoPrivate}; +use mc_crypto_keys::{ + CompressedRistrettoPublic, KexReusablePrivate, RistrettoPrivate, RistrettoPublic, +}; use sha2::Sha512; use subtle::{Choice, ConstantTimeEq}; +use crate::SenderMemoCredential; + type HmacSha512 = Hmac; /// Shared code for memo types in category 0x01, whose last 16 bytes is an HMAC @@ -70,3 +74,30 @@ pub fn validate_authenticated_sender( result &= expected_hmac.ct_eq(&found_hmac); result } + +/// Shared code for creation of an authenticated sender memo with additional +/// data +pub fn compute_authenticated_sender_memo( + memo_type_bytes: [u8; 2], + cred: &SenderMemoCredential, + receiving_subaddress_view_public_key: &RistrettoPublic, + tx_out_public_key: &CompressedRistrettoPublic, + data: &[u8], +) -> [u8; 64] { + let mut memo_data = [0u8; 64]; + memo_data[..16].copy_from_slice(cred.address_hash.as_ref()); + memo_data[16..48].copy_from_slice(data); + + let shared_secret = cred + .subaddress_spend_private_key + .key_exchange(receiving_subaddress_view_public_key); + + let hmac_value = compute_category1_hmac( + shared_secret.as_ref(), + tx_out_public_key, + memo_type_bytes, + &memo_data, + ); + memo_data[48..].copy_from_slice(&hmac_value); + memo_data +} diff --git a/transaction/extra/src/memo/authenticated_sender.rs b/transaction/extra/src/memo/authenticated_sender.rs index d7f8b42def..81c77401a3 100644 --- a/transaction/extra/src/memo/authenticated_sender.rs +++ b/transaction/extra/src/memo/authenticated_sender.rs @@ -1,19 +1,17 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation +// Copyright (c) 2018-2023 The MobileCoin Foundation //! Object for 0x0100 Authenticated Sender memo type //! //! This was proposed for standardization in mobilecoinfoundation/mcips/pull/4 use super::{ - authenticated_common::{compute_category1_hmac, validate_authenticated_sender}, + authenticated_common::{compute_authenticated_sender_memo, validate_authenticated_sender}, credential::SenderMemoCredential, RegisteredMemoType, }; use crate::impl_memo_type_conversions; use mc_account_keys::{PublicAddress, ShortAddressHash}; -use mc_crypto_keys::{ - CompressedRistrettoPublic, KexReusablePrivate, RistrettoPrivate, RistrettoPublic, -}; +use mc_crypto_keys::{CompressedRistrettoPublic, RistrettoPrivate, RistrettoPublic}; use subtle::Choice; /// A memo that the sender writes to convey their identity in an authenticated @@ -55,21 +53,14 @@ impl AuthenticatedSenderMemo { // [0-16) address hash // [16-48) unused // [48-64) HMAC - - let mut memo_data = [0u8; 64]; - memo_data[..16].copy_from_slice(cred.address_hash.as_ref()); - - let shared_secret = cred - .subaddress_spend_private_key - .key_exchange(receiving_subaddress_view_public_key); - - let hmac_value = compute_category1_hmac( - shared_secret.as_ref(), - tx_out_public_key, + let data = [0u8; (48 - 16)]; + let memo_data = compute_authenticated_sender_memo( Self::MEMO_TYPE_BYTES, - &memo_data, + cred, + receiving_subaddress_view_public_key, + tx_out_public_key, + &data, ); - memo_data[48..].copy_from_slice(&hmac_value); Self { memo_data } } diff --git a/transaction/extra/src/memo/authenticated_sender_with_payment_intent_id.rs b/transaction/extra/src/memo/authenticated_sender_with_payment_intent_id.rs index 33829c55f8..ea54adda3c 100644 --- a/transaction/extra/src/memo/authenticated_sender_with_payment_intent_id.rs +++ b/transaction/extra/src/memo/authenticated_sender_with_payment_intent_id.rs @@ -1,19 +1,17 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation +// Copyright (c) 2018-2023 The MobileCoin Foundation //! Object for 0x0102 Authenticated Sender With Payment Intent Id memo type //! //! This was proposed for standardization in mobilecoinfoundation/mcips/pull/54 use super::{ - authenticated_common::{compute_category1_hmac, validate_authenticated_sender}, + authenticated_common::{compute_authenticated_sender_memo, validate_authenticated_sender}, credential::SenderMemoCredential, RegisteredMemoType, }; use crate::impl_memo_type_conversions; use mc_account_keys::{PublicAddress, ShortAddressHash}; -use mc_crypto_keys::{ - CompressedRistrettoPublic, KexReusablePrivate, RistrettoPrivate, RistrettoPublic, -}; +use mc_crypto_keys::{CompressedRistrettoPublic, RistrettoPrivate, RistrettoPublic}; use subtle::Choice; /// A memo that the sender writes to convey their identity in an authenticated @@ -62,21 +60,16 @@ impl AuthenticatedSenderWithPaymentIntentIdMemo { // [24-48) unused // [48-64) HMAC - let mut memo_data = [0u8; 64]; - memo_data[..16].copy_from_slice(cred.address_hash.as_ref()); - memo_data[16..24].copy_from_slice(&payment_intent_id.to_be_bytes()); - - let shared_secret = cred - .subaddress_spend_private_key - .key_exchange(receiving_subaddress_view_public_key); + let mut data = [0u8; (48 - 16)]; + data[0..8].copy_from_slice(&payment_intent_id.to_be_bytes()); - let hmac_value = compute_category1_hmac( - shared_secret.as_ref(), - tx_out_public_key, + let memo_data = compute_authenticated_sender_memo( Self::MEMO_TYPE_BYTES, - &memo_data, + cred, + receiving_subaddress_view_public_key, + tx_out_public_key, + &data, ); - memo_data[48..].copy_from_slice(&hmac_value); Self { memo_data } } diff --git a/transaction/extra/src/memo/authenticated_sender_with_payment_request_id.rs b/transaction/extra/src/memo/authenticated_sender_with_payment_request_id.rs index 5199e510a9..1c446f5aa6 100644 --- a/transaction/extra/src/memo/authenticated_sender_with_payment_request_id.rs +++ b/transaction/extra/src/memo/authenticated_sender_with_payment_request_id.rs @@ -1,19 +1,17 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation +// Copyright (c) 2018-2023 The MobileCoin Foundation //! Object for 0x0101 Authenticated Sender With Payment Request Id memo type //! //! This was proposed for standardization in mobilecoinfoundation/mcips/pull/4 use super::{ - authenticated_common::{compute_category1_hmac, validate_authenticated_sender}, + authenticated_common::{compute_authenticated_sender_memo, validate_authenticated_sender}, credential::SenderMemoCredential, RegisteredMemoType, }; use crate::impl_memo_type_conversions; use mc_account_keys::{PublicAddress, ShortAddressHash}; -use mc_crypto_keys::{ - CompressedRistrettoPublic, KexReusablePrivate, RistrettoPrivate, RistrettoPublic, -}; +use mc_crypto_keys::{CompressedRistrettoPublic, RistrettoPrivate, RistrettoPublic}; use subtle::Choice; /// A memo that the sender writes to convey their identity in an authenticated @@ -60,21 +58,15 @@ impl AuthenticatedSenderWithPaymentRequestIdMemo { // [24-48) unused // [48-64) HMAC - let mut memo_data = [0u8; 64]; - memo_data[..16].copy_from_slice(cred.address_hash.as_ref()); - memo_data[16..24].copy_from_slice(&payment_request_id.to_be_bytes()); - - let shared_secret = cred - .subaddress_spend_private_key - .key_exchange(receiving_subaddress_view_public_key); - - let hmac_value = compute_category1_hmac( - shared_secret.as_ref(), - tx_out_public_key, + let mut data = [0u8; (48 - 16)]; + data[0..8].copy_from_slice(&payment_request_id.to_be_bytes()); + let memo_data = compute_authenticated_sender_memo( Self::MEMO_TYPE_BYTES, - &memo_data, + cred, + receiving_subaddress_view_public_key, + tx_out_public_key, + &data, ); - memo_data[48..].copy_from_slice(&hmac_value); Self { memo_data } } diff --git a/transaction/extra/src/memo/destination.rs b/transaction/extra/src/memo/destination.rs index 8da804b877..f0395a6b7d 100644 --- a/transaction/extra/src/memo/destination.rs +++ b/transaction/extra/src/memo/destination.rs @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation +// Copyright (c) 2018-2023 The MobileCoin Foundation //! Object for 0x0200 Destination memo type //! @@ -141,12 +141,14 @@ impl From<&[u8; 64]> for DestinationMemo { impl From for [u8; 64] { fn from(src: DestinationMemo) -> [u8; 64] { - let mut memo_data = [0u8; 64]; - memo_data[0..16].copy_from_slice(src.address_hash.as_ref()); - memo_data[16..24].copy_from_slice(&src.fee.to_be_bytes()); - memo_data[16] = src.num_recipients; - memo_data[24..32].copy_from_slice(&src.total_outlay.to_be_bytes()); - memo_data + let data = [0u8; 32]; + compute_destination_memo( + src.address_hash, + src.fee, + src.num_recipients, + src.total_outlay, + data, + ) } } @@ -157,4 +159,22 @@ pub enum DestinationMemoError { FeeTooLarge, } +/// Shared code for creation of an destination memo with additional +/// data +pub fn compute_destination_memo( + address_hash: ShortAddressHash, + fee: u64, + num_recipients: u8, + total_outlay: u64, + data: [u8; 32], +) -> [u8; 64] { + let mut memo_data = [0u8; 64]; + memo_data[0..16].copy_from_slice(address_hash.as_ref()); + memo_data[16..24].copy_from_slice(&fee.to_be_bytes()); + memo_data[16] = num_recipients; + memo_data[24..32].copy_from_slice(&total_outlay.to_be_bytes()); + memo_data[32..64].copy_from_slice(&data); + memo_data +} + impl_memo_type_conversions! { DestinationMemo } diff --git a/transaction/extra/src/memo/destination_with_payment_intent_id.rs b/transaction/extra/src/memo/destination_with_payment_intent_id.rs index 7f01053ac2..222054a329 100644 --- a/transaction/extra/src/memo/destination_with_payment_intent_id.rs +++ b/transaction/extra/src/memo/destination_with_payment_intent_id.rs @@ -1,10 +1,10 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation +// Copyright (c) 2018-2023 The MobileCoin Foundation //! Object for 0x0204 Destination With Payment Intent Id memo type //! //! This was proposed for standardization in mobilecoinfoundation/mcips/pull/54 -use super::{DestinationMemoError, RegisteredMemoType}; +use super::{compute_destination_memo, DestinationMemoError, RegisteredMemoType}; use crate::impl_memo_type_conversions; use mc_account_keys::ShortAddressHash; @@ -165,13 +165,15 @@ impl From<&[u8; 64]> for DestinationWithPaymentIntentIdMemo { impl From for [u8; 64] { fn from(src: DestinationWithPaymentIntentIdMemo) -> [u8; 64] { - let mut memo_data = [0u8; 64]; - memo_data[0..16].copy_from_slice(src.address_hash.as_ref()); - memo_data[16..24].copy_from_slice(&src.fee.to_be_bytes()); - memo_data[16] = src.num_recipients; - memo_data[24..32].copy_from_slice(&src.total_outlay.to_be_bytes()); - memo_data[32..40].copy_from_slice(&src.payment_intent_id.to_be_bytes()); - memo_data + let mut data = [0u8; 32]; + data[0..8].copy_from_slice(&src.payment_intent_id.to_be_bytes()); + compute_destination_memo( + src.address_hash, + src.fee, + src.num_recipients, + src.total_outlay, + data, + ) } } diff --git a/transaction/extra/src/memo/destination_with_payment_request_id.rs b/transaction/extra/src/memo/destination_with_payment_request_id.rs index 13b488e4ad..e5711acf28 100644 --- a/transaction/extra/src/memo/destination_with_payment_request_id.rs +++ b/transaction/extra/src/memo/destination_with_payment_request_id.rs @@ -1,10 +1,10 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation +// Copyright (c) 2018-2023 The MobileCoin Foundation //! Object for 0x0203 Destination With Payment Request Id memo type //! //! This was proposed for standardization in mobilecoinfoundation/mcips/pull/54 -use super::{DestinationMemoError, RegisteredMemoType}; +use super::{compute_destination_memo, DestinationMemoError, RegisteredMemoType}; use crate::impl_memo_type_conversions; use mc_account_keys::ShortAddressHash; @@ -165,13 +165,15 @@ impl From<&[u8; 64]> for DestinationWithPaymentRequestIdMemo { impl From for [u8; 64] { fn from(src: DestinationWithPaymentRequestIdMemo) -> [u8; 64] { - let mut memo_data = [0u8; 64]; - memo_data[0..16].copy_from_slice(src.address_hash.as_ref()); - memo_data[16..24].copy_from_slice(&src.fee.to_be_bytes()); - memo_data[16] = src.num_recipients; - memo_data[24..32].copy_from_slice(&src.total_outlay.to_be_bytes()); - memo_data[32..40].copy_from_slice(&src.payment_request_id.to_be_bytes()); - memo_data + let mut data = [0u8; 32]; + data[0..8].copy_from_slice(&src.payment_request_id.to_be_bytes()); + compute_destination_memo( + src.address_hash, + src.fee, + src.num_recipients, + src.total_outlay, + data, + ) } } diff --git a/transaction/extra/src/memo/mod.rs b/transaction/extra/src/memo/mod.rs index 5a6562beb9..38a0e7d9fd 100644 --- a/transaction/extra/src/memo/mod.rs +++ b/transaction/extra/src/memo/mod.rs @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation +// Copyright (c) 2018-2023 The MobileCoin Foundation //! Defines an object for each known high-level memo type, //! and an enum to allow matching recovered memos to one of these types. @@ -48,14 +48,14 @@ //! | 0x0204 | Destination With Payment Intent Id Memo | pub use self::{ - authenticated_common::compute_category1_hmac, + authenticated_common::{compute_authenticated_sender_memo, compute_category1_hmac}, authenticated_sender::AuthenticatedSenderMemo, authenticated_sender_with_payment_intent_id::AuthenticatedSenderWithPaymentIntentIdMemo, authenticated_sender_with_payment_request_id::AuthenticatedSenderWithPaymentRequestIdMemo, burn_redemption::BurnRedemptionMemo, credential::SenderMemoCredential, defragmentation::{DefragmentationMemo, DefragmentationMemoError}, - destination::{DestinationMemo, DestinationMemoError}, + destination::{compute_destination_memo, DestinationMemo, DestinationMemoError}, destination_with_payment_intent_id::DestinationWithPaymentIntentIdMemo, destination_with_payment_request_id::DestinationWithPaymentRequestIdMemo, gift_code_cancellation::GiftCodeCancellationMemo, @@ -106,18 +106,18 @@ pub enum MemoDecodingError { } impl_memo_enum! { MemoType, - AuthenticatedSender(AuthenticatedSenderMemo), - AuthenticatedSenderWithPaymentRequestId(AuthenticatedSenderWithPaymentRequestIdMemo), - AuthenticatedSenderWithPaymentIntentId(AuthenticatedSenderWithPaymentIntentIdMemo), - BurnRedemption(BurnRedemptionMemo), - Defragmentation(DefragmentationMemo), - Destination(DestinationMemo), - DestinationWithPaymentRequestId(DestinationWithPaymentRequestIdMemo), - DestinationWithPaymentIntentId(DestinationWithPaymentIntentIdMemo), - GiftCodeCancellation(GiftCodeCancellationMemo), - GiftCodeFunding(GiftCodeFundingMemo), - GiftCodeSender(GiftCodeSenderMemo), - Unused(UnusedMemo), + AuthenticatedSender(AuthenticatedSenderMemo), //[0x01, 0x00] + AuthenticatedSenderWithPaymentRequestId(AuthenticatedSenderWithPaymentRequestIdMemo), //[0x01, 0x01] + AuthenticatedSenderWithPaymentIntentId(AuthenticatedSenderWithPaymentIntentIdMemo), //[0x01, 0x02] + BurnRedemption(BurnRedemptionMemo), //[0x00, 0x01] + Defragmentation(DefragmentationMemo), //[0x00, 0x03] + Destination(DestinationMemo), //[0x02, 0x00] + DestinationWithPaymentRequestId(DestinationWithPaymentRequestIdMemo), //[0x02, 0x03] + DestinationWithPaymentIntentId(DestinationWithPaymentIntentIdMemo), //[0x02, 0x04] + GiftCodeCancellation(GiftCodeCancellationMemo), //[0x02, 0x02] + GiftCodeFunding(GiftCodeFundingMemo), //[0x02, 0x01] + GiftCodeSender(GiftCodeSenderMemo), //[0x00, 0x02] + Unused(UnusedMemo), //[0x00, 0x00] } #[cfg(test)] From c1014d0dbb974555af8e7a036a940b14bd18734e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Apr 2023 16:57:49 -0700 Subject: [PATCH 33/46] chore(deps): bump serde from 1.0.154 to 1.0.159 (#3294) Bumps [serde](https://github.com/serde-rs/serde) from 1.0.154 to 1.0.159. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.154...v1.0.159) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 10 +++++----- core/Cargo.toml | 2 +- core/types/Cargo.toml | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4b744f9c1f..d7f95dd3b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7371,9 +7371,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.154" +version = "1.0.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cdd151213925e7f1ab45a9bbfb129316bd00799784b174b7cc7bcd16961c49e" +checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065" dependencies = [ "serde_derive", ] @@ -7399,13 +7399,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.154" +version = "1.0.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fc80d722935453bcafdc2c9a73cd6fac4dc1938f0346035d84bf99fa9e33217" +checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.11", ] [[package]] diff --git a/core/Cargo.toml b/core/Cargo.toml index 77b5d57748..539930c5fc 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -37,7 +37,7 @@ clap = { version = "4.1.11", features = [ "derive" ] } hex = { version = "0.4.3", default-features = false } lazy_static = { version = "1.4.0" } rand_core = { version = "0.6.3", default-features = false } -serde = { version = "1.0.154", features = [ "derive" ] } +serde = { version = "1.0.159", features = [ "derive" ] } serde_json = { version = "1.0.94" } mc-test-vectors-definitions = { path = "../test-vectors/definitions" } diff --git a/core/types/Cargo.toml b/core/types/Cargo.toml index 3f768759a9..287463daa9 100644 --- a/core/types/Cargo.toml +++ b/core/types/Cargo.toml @@ -16,7 +16,7 @@ prost = ["dep:prost", "mc-crypto-keys/prost"] # External dependencies curve25519-dalek = { version = "4.0.0-rc.1", default-features = false } prost = { version = "0.11", optional = true, default-features = false } -serde = { version = "1.0.154", optional = true, default-features = false, features = [ "derive" ] } +serde = { version = "1.0.159", optional = true, default-features = false, features = [ "derive" ] } subtle = { version = "2.4.1", default-features = false } zeroize = { version = "1.5", default-features = false } From f618719aa424043d684b95daea3b134fffd468fd Mon Sep 17 00:00:00 2001 From: Millie C Date: Tue, 28 Mar 2023 18:42:01 -0400 Subject: [PATCH 34/46] Failure limit on tx proposals --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index d7f95dd3b4..25adc48780 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7405,7 +7405,7 @@ checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585" dependencies = [ "proc-macro2", "quote", - "syn 2.0.11", + "syn 2.0.12", ] [[package]] From 9749e599f24b0e06a6002248f97e2e32697a44d8 Mon Sep 17 00:00:00 2001 From: Millie C Date: Thu, 30 Mar 2023 19:03:00 -0400 Subject: [PATCH 35/46] Actually close sessions with problem clients --- .../service/src/api/client_api_service.rs | 32 +++++++++++++++++-- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 1d1dac2ed4..6abef5b1b7 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -276,6 +276,8 @@ impl ConsensusClientApi for ClientApiService { let session_id = ClientSession::from(msg.channel_id.clone()); + let session_id = ClientSession::from(msg.channel_id.clone()); + { let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); // Calling get() on the LRU bumps the entry to show up as more @@ -349,9 +351,9 @@ impl ConsensusClientApi for ClientApiService { ConsensusGrpcError::NotServing.into() } } else { - let result = self.handle_proposed_tx(msg); + let result = self.handle_proposed_tx(msg); // The block present below rate-limits suspicious behavior. - if let Err(_err) = &result { + if let Err(err) = &result { let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); let record = if let Some(record) = tracker.get_mut(&session_id) { record @@ -361,7 +363,31 @@ impl ConsensusClientApi for ClientApiService { .get_mut(&session_id) .expect("Adding session-tracking record should be atomic.") }; - record.fail_tx_proposal(Instant::now(), self.config.tx_failure_window); + let recent_failure_count = + record.fail_tx_proposal(&Instant::now(), &self.config.tx_failure_window); + + if (recent_failure_count as u32) >= self.config.tx_failure_limit { + log::warn!( + self.logger, + "Client has {} recent failed tx proposals within the \ + last {} seconds - dropping connection. \n\ + Most recent proposal failure reason was: {:?}", + recent_failure_count, + self.config.tx_failure_window.as_secs_f32(), + err + ); + // Rate-limiting is performed at the auth endpoint, so + // merely dropping the connection will be enough. + let close_result = self.enclave.client_close(session_id.clone()); + if let Err(e) = close_result { + log::error!( + self.logger, + "Failed to drop session {:?} due to: {:?}", + &session_id, + e + ); + } + } } result.or_else(ConsensusGrpcError::into) }; From c1c00c6007fd2ec1ec79f03eac44fe4399cd88db Mon Sep 17 00:00:00 2001 From: Millie C Date: Thu, 6 Apr 2023 22:38:33 -0400 Subject: [PATCH 36/46] Fixes within the process of rebasing --- .../service/src/api/client_api_service.rs | 33 +++---------------- 1 file changed, 5 insertions(+), 28 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 6abef5b1b7..7b374a8ec3 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -65,8 +65,9 @@ impl ClientSessionTracking { /// Remove any transaction proposal failure record that is older than our /// tracking window. fn clear_stale_records(&mut self, now: Instant, tracking_window: Duration) { - self.tx_proposal_failures - .retain(|past_failure| now.saturating_duration_since(*past_failure) <= tracking_window); + self.tx_proposal_failures.retain(|past_failure| { + now.saturating_duration_since(*past_failure) <= tracking_window + }); } /// Push a new failed tx proposal record, clear out samples older than @@ -353,7 +354,7 @@ impl ConsensusClientApi for ClientApiService { } else { let result = self.handle_proposed_tx(msg); // The block present below rate-limits suspicious behavior. - if let Err(err) = &result { + if let Err(_err) = &result { let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); let record = if let Some(record) = tracker.get_mut(&session_id) { record @@ -363,31 +364,7 @@ impl ConsensusClientApi for ClientApiService { .get_mut(&session_id) .expect("Adding session-tracking record should be atomic.") }; - let recent_failure_count = - record.fail_tx_proposal(&Instant::now(), &self.config.tx_failure_window); - - if (recent_failure_count as u32) >= self.config.tx_failure_limit { - log::warn!( - self.logger, - "Client has {} recent failed tx proposals within the \ - last {} seconds - dropping connection. \n\ - Most recent proposal failure reason was: {:?}", - recent_failure_count, - self.config.tx_failure_window.as_secs_f32(), - err - ); - // Rate-limiting is performed at the auth endpoint, so - // merely dropping the connection will be enough. - let close_result = self.enclave.client_close(session_id.clone()); - if let Err(e) = close_result { - log::error!( - self.logger, - "Failed to drop session {:?} due to: {:?}", - &session_id, - e - ); - } - } + record.fail_tx_proposal(Instant::now(), self.config.tx_failure_window); } result.or_else(ConsensusGrpcError::into) }; From 81c621d52c6adb1647934e3c8d921d1de94f0f71 Mon Sep 17 00:00:00 2001 From: Millie C Date: Mon, 10 Apr 2023 13:22:33 -0400 Subject: [PATCH 37/46] cargo fmt --- consensus/service/src/api/client_api_service.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 978cfa4d02..d41cbe1259 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -65,9 +65,8 @@ impl ClientSessionTracking { /// Remove any transaction proposal failure record that is older than our /// tracking window. fn clear_stale_records(&mut self, now: Instant, tracking_window: Duration) { - self.tx_proposal_failures.retain(|past_failure| { - now.saturating_duration_since(*past_failure) <= tracking_window - }); + self.tx_proposal_failures + .retain(|past_failure| now.saturating_duration_since(*past_failure) <= tracking_window); } /// Push a new failed tx proposal record, clear out samples older than @@ -367,7 +366,7 @@ impl ConsensusClientApi for ClientApiService { ConsensusGrpcError::NotServing.into() } } else { - let result = self.handle_proposed_tx(msg); + let result = self.handle_proposed_tx(msg); // The block present below rate-limits suspicious behavior. if let Err(_err) = &result { let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); From cf078af2417883ba71fb6e8765e1b5633fbcbe82 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Apr 2023 19:53:54 +0000 Subject: [PATCH 38/46] chore(deps): bump bitflags from 1.3.2 to 2.0.1 (#3250) * chore(deps): bump bitflags from 1.3.2 to 2.0.1 Bumps [bitflags](https://github.com/bitflags/bitflags) from 1.3.2 to 2.0.1. - [Release notes](https://github.com/bitflags/bitflags/releases) - [Changelog](https://github.com/bitflags/bitflags/blob/main/CHANGELOG.md) - [Commits](https://github.com/bitflags/bitflags/compare/1.3.2...2.0.1) --- updated-dependencies: - dependency-name: bitflags dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update bitflag usage for the 1.0->2.0 changes Per this changes the serialized format of items containing bitflags. * Update enclave lock files * Rust fmt --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Nick Santana --- Cargo.lock | 5 ++++- attest/core/Cargo.toml | 2 +- attest/core/src/error.rs | 2 +- attest/core/src/ias/verify.rs | 1 + consensus/enclave/trusted/Cargo.lock | 15 ++++++++++++--- fog/ingest/enclave/trusted/Cargo.lock | 15 ++++++++++++--- fog/ledger/enclave/trusted/Cargo.lock | 15 ++++++++++++--- fog/view/enclave/trusted/Cargo.lock | 15 ++++++++++++--- 8 files changed, 55 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab2d229ede..4f827eb505 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -388,6 +388,9 @@ name = "bitflags" version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487f1e0fcbe47deb8b0574e646def1c903389d95241dd1bbcc6ce4a715dfc0c1" +dependencies = [ + "serde", +] [[package]] name = "bitvec" @@ -2392,7 +2395,7 @@ version = "4.1.0-pre0" dependencies = [ "base64 0.21.0", "bincode", - "bitflags 1.3.2", + "bitflags 2.0.2", "cargo-emit", "chrono", "digest 0.10.6", diff --git a/attest/core/Cargo.toml b/attest/core/Cargo.toml index 8f5b947fdf..9db6cadf52 100644 --- a/attest/core/Cargo.toml +++ b/attest/core/Cargo.toml @@ -34,7 +34,7 @@ mc-util-encodings = { path = "../../util/encodings" } mc-util-repr-bytes = { path = "../../util/repr-bytes", features = ["hex_fmt"] } base64 = { version = "0.21", default-features = false, features = ["alloc"] } -bitflags = "1.3" +bitflags = { version = "2.0", default-features = false, features = ["serde"] } chrono = { version = "0.4.24", default-features = false, features = ["alloc"] } digest = "0.10" displaydoc = { version = "0.2", default-features = false } diff --git a/attest/core/src/error.rs b/attest/core/src/error.rs index 8411764e17..72bd9ded28 100644 --- a/attest/core/src/error.rs +++ b/attest/core/src/error.rs @@ -390,7 +390,7 @@ pub enum ReportDetailsError { bitflags! { /// Revocation cause flags - #[derive(Deserialize, Serialize)] + #[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] pub struct RevocationCause: u64 { /// Cause reason was not given (but still revoked) const UNSPECIFIED = 0; diff --git a/attest/core/src/ias/verify.rs b/attest/core/src/ias/verify.rs index 2661191dfa..7f8fdb1039 100644 --- a/attest/core/src/ias/verify.rs +++ b/attest/core/src/ias/verify.rs @@ -336,6 +336,7 @@ impl<'src> TryFrom<&'src VerificationReport> for VerificationReportData { "SIGNATURE_INVALID" => Err(IasQuoteError::SignatureInvalid), "GROUP_REVOKED" => Err(IasQuoteError::GroupRevoked( revocation_reason + .clone() .ok_or_else(|| JsonError::FieldMissing("revocationReason".to_string()))?, platform_info_blob .ok_or_else(|| JsonError::FieldMissing("platformInfoBlob".to_string()))?, diff --git a/consensus/enclave/trusted/Cargo.lock b/consensus/enclave/trusted/Cargo.lock index 3e5a204699..746b8674cb 100644 --- a/consensus/enclave/trusted/Cargo.lock +++ b/consensus/enclave/trusted/Cargo.lock @@ -88,7 +88,7 @@ version = "0.64.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4243e6031260db77ede97ad86c27e501d646a27ab57b59a574f725d98ab1fb4" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", @@ -116,6 +116,15 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5dd14596c0e5b954530d0e6f1fd99b89c03e313aa2086e8da4303701a09e1cf" +dependencies = [ + "serde", +] + [[package]] name = "blake2" version = "0.10.2" @@ -621,7 +630,7 @@ name = "mbedtls" version = "0.8.1" source = "git+https://github.com/mobilecoinfoundation/rust-mbedtls.git?rev=98d3af413c1e23ea89cc5f41ab4dddb1944405af#98d3af413c1e23ea89cc5f41ab4dddb1944405af" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "cc", "cfg-if 1.0.0", @@ -705,7 +714,7 @@ name = "mc-attest-core" version = "4.1.0-pre0" dependencies = [ "base64", - "bitflags", + "bitflags 2.0.1", "cargo-emit", "chrono", "digest", diff --git a/fog/ingest/enclave/trusted/Cargo.lock b/fog/ingest/enclave/trusted/Cargo.lock index 6d941710a1..dd5cfe8f3f 100644 --- a/fog/ingest/enclave/trusted/Cargo.lock +++ b/fog/ingest/enclave/trusted/Cargo.lock @@ -118,7 +118,7 @@ version = "0.64.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4243e6031260db77ede97ad86c27e501d646a27ab57b59a574f725d98ab1fb4" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", @@ -146,6 +146,15 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5dd14596c0e5b954530d0e6f1fd99b89c03e313aa2086e8da4303701a09e1cf" +dependencies = [ + "serde", +] + [[package]] name = "blake2" version = "0.10.6" @@ -651,7 +660,7 @@ name = "mbedtls" version = "0.8.1" source = "git+https://github.com/mobilecoinfoundation/rust-mbedtls.git?rev=98d3af413c1e23ea89cc5f41ab4dddb1944405af#98d3af413c1e23ea89cc5f41ab4dddb1944405af" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "cc", "cfg-if 1.0.0", @@ -735,7 +744,7 @@ name = "mc-attest-core" version = "4.1.0-pre0" dependencies = [ "base64", - "bitflags", + "bitflags 2.0.1", "cargo-emit", "chrono", "digest", diff --git a/fog/ledger/enclave/trusted/Cargo.lock b/fog/ledger/enclave/trusted/Cargo.lock index 01294b3210..669b1884c2 100644 --- a/fog/ledger/enclave/trusted/Cargo.lock +++ b/fog/ledger/enclave/trusted/Cargo.lock @@ -118,7 +118,7 @@ version = "0.64.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4243e6031260db77ede97ad86c27e501d646a27ab57b59a574f725d98ab1fb4" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", @@ -146,6 +146,15 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5dd14596c0e5b954530d0e6f1fd99b89c03e313aa2086e8da4303701a09e1cf" +dependencies = [ + "serde", +] + [[package]] name = "blake2" version = "0.10.6" @@ -645,7 +654,7 @@ name = "mbedtls" version = "0.8.1" source = "git+https://github.com/mobilecoinfoundation/rust-mbedtls.git?rev=98d3af413c1e23ea89cc5f41ab4dddb1944405af#98d3af413c1e23ea89cc5f41ab4dddb1944405af" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "cc", "cfg-if", @@ -729,7 +738,7 @@ name = "mc-attest-core" version = "4.1.0-pre0" dependencies = [ "base64", - "bitflags", + "bitflags 2.0.1", "cargo-emit", "chrono", "digest", diff --git a/fog/view/enclave/trusted/Cargo.lock b/fog/view/enclave/trusted/Cargo.lock index 5b12dea505..e41718098a 100644 --- a/fog/view/enclave/trusted/Cargo.lock +++ b/fog/view/enclave/trusted/Cargo.lock @@ -118,7 +118,7 @@ version = "0.64.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4243e6031260db77ede97ad86c27e501d646a27ab57b59a574f725d98ab1fb4" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cexpr", "clang-sys", "lazy_static", @@ -146,6 +146,15 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5dd14596c0e5b954530d0e6f1fd99b89c03e313aa2086e8da4303701a09e1cf" +dependencies = [ + "serde", +] + [[package]] name = "blake2" version = "0.10.6" @@ -651,7 +660,7 @@ name = "mbedtls" version = "0.8.1" source = "git+https://github.com/mobilecoinfoundation/rust-mbedtls.git?rev=98d3af413c1e23ea89cc5f41ab4dddb1944405af#98d3af413c1e23ea89cc5f41ab4dddb1944405af" dependencies = [ - "bitflags", + "bitflags 1.3.2", "byteorder", "cc", "cfg-if 1.0.0", @@ -735,7 +744,7 @@ name = "mc-attest-core" version = "4.1.0-pre0" dependencies = [ "base64", - "bitflags", + "bitflags 2.0.1", "cargo-emit", "chrono", "digest", From 766fa3328dfd7d0b5ea561dbeb09cb30d99e371a Mon Sep 17 00:00:00 2001 From: Chris Beck <5683852+cbeck88@users.noreply.github.com> Date: Mon, 10 Apr 2023 14:50:19 -0600 Subject: [PATCH 39/46] refactor mobilecoind api to not depend on consensus-api (#3307) * refactor mobilecoind api to not depend on consensus-api I am having a lot of build problems in another project due to mc-mobilecoind-api pulling in mbedtls, which exports some symbols that conflicts with another C library that some rust code depends on. I determined that the only reason I have mbedtls in my build is that `mobilecoind-api` depends on `consensus-api` and this pulls in `mc-attest-core` etc. This change allows me to break this dependency. * remove stuff from build.rs that isn't needed anymore * cargo lock * fixup * fixup * restore deprecated field (avoid compatibility break) * add changelog entry * fixup --- CHANGELOG.md | 6 +++++ Cargo.lock | 1 - mobilecoind/api/Cargo.toml | 1 - mobilecoind/api/build.rs | 5 ---- mobilecoind/api/proto/mobilecoind_api.proto | 26 ++++++++++++++++++--- mobilecoind/api/src/lib.rs | 1 - mobilecoind/src/service.rs | 20 +++++++++++++++- 7 files changed, 48 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cb58db6132..00c4c090a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). The crates in this repository do not adhere to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) at this time. +## [5.0.0] + +### Changed + +- mobilecoind now has its own version of the `LastBlockInfo` proto message. ([#3307]) + ## [4.1.0] ### Added diff --git a/Cargo.lock b/Cargo.lock index 4f827eb505..2635b1df73 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4804,7 +4804,6 @@ dependencies = [ "hex_fmt", "mc-api", "mc-common", - "mc-consensus-api", "mc-transaction-builder", "mc-util-build-grpc", "mc-util-build-script", diff --git a/mobilecoind/api/Cargo.toml b/mobilecoind/api/Cargo.toml index 7df7c1f21d..b5ddbe1149 100644 --- a/mobilecoind/api/Cargo.toml +++ b/mobilecoind/api/Cargo.toml @@ -11,7 +11,6 @@ rust-version = { workspace = true } [dependencies] mc-api = { path = "../../api" } -mc-consensus-api = { path = "../../consensus/api" } mc-util-uri = { path = "../../util/uri" } futures = "0.3" diff --git a/mobilecoind/api/build.rs b/mobilecoind/api/build.rs index 8d3c89df80..f4e95d95c8 100644 --- a/mobilecoind/api/build.rs +++ b/mobilecoind/api/build.rs @@ -16,12 +16,7 @@ fn main() { .depvar("MC_API_PROTOS_PATH") .expect("Could not read api's protos path") .to_owned(); - let consensus_api_proto_path = env - .depvar("MC_CONSENSUS_API_PROTOS_PATH") - .expect("Could not read api's protos path") - .to_owned(); let mut all_proto_dirs = api_proto_path.split(':').collect::>(); - all_proto_dirs.extend(consensus_api_proto_path.split(':')); all_proto_dirs.push(proto_str); mc_util_build_grpc::compile_protos_and_generate_mod_rs( diff --git a/mobilecoind/api/proto/mobilecoind_api.proto b/mobilecoind/api/proto/mobilecoind_api.proto index a4095b4d57..0791510d29 100644 --- a/mobilecoind/api/proto/mobilecoind_api.proto +++ b/mobilecoind/api/proto/mobilecoind_api.proto @@ -8,7 +8,6 @@ syntax = "proto3"; import "google/protobuf/empty.proto"; import "external.proto"; import "blockchain.proto"; -import "consensus_common.proto"; package mobilecoind_api; @@ -961,10 +960,31 @@ message GetNetworkStatusResponse { // Whether we are behind. bool is_behind = 4; - // The latest block info reported by a consensus node - consensus_common.LastBlockInfoResponse last_block_info = 5; + // The latest block info data reported by a consensus node + LastBlockInfo last_block_info = 5; } +// Data about the network state and last block processed by the consensus network +message LastBlockInfo { + // Block index + uint64 index = 1; + + // Current MOB minimum fee (kept for backwards compatibility) + uint64 mob_minimum_fee = 2 [deprecated = true]; + + // A map of token id -> minimum fee + map minimum_fees = 3; + + // Current network_block version, appropriate for new transactions. + // + // Note that if the server was just reconfigured, this may be HIGHER than + // the highest block version in the ledger, so for clients this is a better + // source of truth than the local ledger, if the client might possibly be + // creating the first transaction after a reconfigure / redeploy. + uint32 network_block_version = 4; +} + + // // Database encryption // diff --git a/mobilecoind/api/src/lib.rs b/mobilecoind/api/src/lib.rs index 3ad6db5344..3423899659 100644 --- a/mobilecoind/api/src/lib.rs +++ b/mobilecoind/api/src/lib.rs @@ -7,7 +7,6 @@ use mc_util_uri::{Uri, UriScheme}; mod autogenerated_code { // Expose proto data types from included third-party/external proto files. pub use mc_api::{blockchain, external, printable}; - pub use mc_consensus_api::consensus_common; pub use protobuf::well_known_types::Empty; // Needed due to how to the auto-generated code references the Empty message. diff --git a/mobilecoind/src/service.rs b/mobilecoind/src/service.rs index f2b3efe771..622f37edd7 100644 --- a/mobilecoind/src/service.rs +++ b/mobilecoind/src/service.rs @@ -2158,6 +2158,24 @@ impl Date: Mon, 10 Apr 2023 14:12:27 -0700 Subject: [PATCH 40/46] unforked diesel and updated necessary code (#3304) --- Cargo.lock | 32 ++-- Cargo.toml | 3 - fog/sql_recovery_db/Cargo.toml | 10 +- .../src/bin/fog_sql_recovery_db_migrations.rs | 12 +- fog/sql_recovery_db/src/lib.rs | 164 +++++++++--------- fog/sql_recovery_db/src/models.rs | 10 +- fog/sql_recovery_db/src/sql_types.rs | 27 +-- fog/sql_recovery_db/src/test_utils.rs | 20 +-- 8 files changed, 138 insertions(+), 140 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2635b1df73..4bb06dbd31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1084,22 +1084,24 @@ dependencies = [ [[package]] name = "diesel" -version = "1.4.8" -source = "git+https://github.com/mobilecoinofficial/diesel?rev=026f6379715d27c8be48396e5ca9059f4a263198#026f6379715d27c8be48396e5ca9059f4a263198" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4391a22b19c916e50bec4d6140f29bdda3e3bb187223fe6e3ea0b6e4d1021c04" dependencies = [ "bitflags 1.3.2", "byteorder", "chrono", "diesel_derives", + "itoa 1.0.1", "pq-sys", "r2d2", ] [[package]] name = "diesel-derive-enum" -version = "1.1.2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8910921b014e2af16298f006de12aa08af894b71f0f49a486ab6d74b17bbed" +checksum = "6b10c03b954333d05bfd5be1d8a74eae2c9ca77b86e0f1c3a1ea29c49da1d6c2" dependencies = [ "heck", "proc-macro2", @@ -1109,10 +1111,11 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "1.4.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45f5098f628d02a7a0f68ddba586fb61e80edec3bdc1be3b921f4ceec60858d3" +checksum = "0ad74fdcf086be3d4fdd142f67937678fe60ed431c3b2f08599e7687269410c4" dependencies = [ + "proc-macro-error", "proc-macro2", "quote", "syn 1.0.109", @@ -1120,10 +1123,11 @@ dependencies = [ [[package]] name = "diesel_migrations" -version = "1.4.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf3cde8413353dc7f5d72fa8ce0b99a560a359d2c5ef1e5817ca731cd9008f4c" +checksum = "e9ae22beef5e9d6fab9225ddb073c1c6c1a7a6ded5019d5da11d1e5c5adc34e2" dependencies = [ + "diesel", "migrations_internals", "migrations_macros", ] @@ -5861,23 +5865,23 @@ dependencies = [ [[package]] name = "migrations_internals" -version = "1.4.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b4fc84e4af020b837029e017966f86a1c2d5e83e64b589963d5047525995860" +checksum = "c493c09323068c01e54c685f7da41a9ccf9219735c3766fbfd6099806ea08fbc" dependencies = [ - "diesel", + "serde", + "toml 0.5.11", ] [[package]] name = "migrations_macros" -version = "1.4.2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9753f12909fd8d923f75ae5c3258cae1ed3c8ec052e1b38c93c21a6d157f789c" +checksum = "8a8ff27a350511de30cdabb77147501c36ef02e0451d957abea2f30caffb2b58" dependencies = [ "migrations_internals", "proc-macro2", "quote", - "syn 1.0.109", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 6a708bc76e..a267625fdd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -242,8 +242,5 @@ schnorrkel-og = { git = "https://github.com/mobilecoinfoundation/schnorrkel.git" # See: https://github.com/pyfisch/cbor/pull/198 serde_cbor = { git = "https://github.com/mobilecoinofficial/cbor", rev = "4c886a7c1d523aae1ec4aa7386f402cb2f4341b5" } -# Override diesel dependency with our fork, to statically link SQLite. -diesel = { git = "https://github.com/mobilecoinofficial/diesel", rev = "026f6379715d27c8be48396e5ca9059f4a263198" } - # Fix issues with recent nightlies, bump curve25519-dalek version x25519-dalek = { git = "https://github.com/mobilecoinfoundation/x25519-dalek.git", rev = "4fbaa3343301c62cfdbc3023c9f485257e6b718a" } diff --git a/fog/sql_recovery_db/Cargo.toml b/fog/sql_recovery_db/Cargo.toml index 6742f0e0d3..10c1755a27 100644 --- a/fog/sql_recovery_db/Cargo.toml +++ b/fog/sql_recovery_db/Cargo.toml @@ -34,9 +34,9 @@ mc-fog-types = { path = "../types" } chrono = "0.4" clap = { version = "4.1", features = ["derive", "env"] } -diesel = { version = "1.4", features = ["chrono", "postgres", "r2d2"] } -diesel-derive-enum = { version = "1", features = ["postgres"] } -diesel_migrations = { version = "1.4", features = ["postgres"] } +diesel = { version = "2.0.3", features = ["chrono", "postgres", "r2d2"] } +diesel-derive-enum = { version = "2.0.1", features = ["postgres"] } +diesel_migrations = { version = "2.0.0", features = ["postgres"] } displaydoc = { version = "0.2", default-features = false } prost = "0.11" r2d2 = "0.8.10" @@ -56,7 +56,3 @@ mc-util-test-helper = { path = "../../util/test-helper" } pem = "2.0" rand = "0.8" - -[build-dependencies] -# clippy fails to run without this. -diesel = { version = "1.4.8", features = ["chrono", "postgres", "r2d2"] } diff --git a/fog/sql_recovery_db/src/bin/fog_sql_recovery_db_migrations.rs b/fog/sql_recovery_db/src/bin/fog_sql_recovery_db_migrations.rs index eacb22c44a..f01e8a8255 100644 --- a/fog/sql_recovery_db/src/bin/fog_sql_recovery_db_migrations.rs +++ b/fog/sql_recovery_db/src/bin/fog_sql_recovery_db_migrations.rs @@ -3,22 +3,20 @@ //! A helper utility for running migrations on a database configured via //! DATABASE_URL. -#[macro_use] -extern crate diesel_migrations; - use diesel::{prelude::*, PgConnection}; -use diesel_migrations::embed_migrations; +use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; use std::env; -embed_migrations!("migrations/"); +const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations"); fn main() { let database_url = env::var("DATABASE_URL").expect("Missing DATABASE_URL environment variable"); - let conn = PgConnection::establish(&database_url) + let conn = &mut PgConnection::establish(&database_url) .expect("fog-sql-recovery-db-migrations cannot connect to PG database"); - embedded_migrations::run(&conn).expect("Failed running migrations"); + conn.run_pending_migrations(MIGRATIONS) + .expect("Failed running migrations"); println!("Done migrating Fog recovery DB!"); } diff --git a/fog/sql_recovery_db/src/lib.rs b/fog/sql_recovery_db/src/lib.rs index 1d927096e5..245575cb0c 100644 --- a/fog/sql_recovery_db/src/lib.rs +++ b/fog/sql_recovery_db/src/lib.rs @@ -5,7 +5,6 @@ #[macro_use] extern crate diesel; -#[macro_use] extern crate diesel_migrations; pub use error::Error; @@ -162,7 +161,7 @@ impl SqlRecoveryDb { /// Mark a given ingest invocation as decommissioned. fn decommission_ingest_invocation_impl( &self, - conn: &PgConnection, + conn: &mut PgConnection, ingest_invocation_id: &IngestInvocationId, ) -> Result<(), Error> { // Mark the ingest invocation as decommissioned. @@ -172,7 +171,7 @@ impl SqlRecoveryDb { ) .set(( schema::ingest_invocations::dsl::decommissioned.eq(true), - schema::ingest_invocations::dsl::last_active_at.eq(diesel::expression::dsl::now), + schema::ingest_invocations::dsl::last_active_at.eq(diesel::dsl::now), )) .execute(conn)?; @@ -190,21 +189,24 @@ impl SqlRecoveryDb { /// Mark a given ingest invocation as still being alive. fn update_last_active_at_impl( &self, - conn: &PgConnection, + conn: &mut PgConnection, ingest_invocation_id: &IngestInvocationId, ) -> Result<(), Error> { diesel::update( schema::ingest_invocations::dsl::ingest_invocations .filter(schema::ingest_invocations::dsl::id.eq(**ingest_invocation_id)), ) - .set(schema::ingest_invocations::dsl::last_active_at.eq(diesel::expression::dsl::now)) + .set(schema::ingest_invocations::dsl::last_active_at.eq(diesel::dsl::now)) .execute(conn)?; Ok(()) } /// Get missed block ranges. - fn get_missed_block_ranges_impl(&self, conn: &PgConnection) -> Result, Error> { + fn get_missed_block_ranges_impl( + &self, + conn: &mut PgConnection, + ) -> Result, Error> { let query = schema::user_events::dsl::user_events .filter(schema::user_events::dsl::event_type.eq(UserEventType::MissingBlocks)) .select(( @@ -231,7 +233,7 @@ impl SqlRecoveryDb { fn get_ingress_key_status_impl( &self, - conn: &PgConnection, + conn: &mut PgConnection, key: &CompressedRistrettoPublic, ) -> Result, Error> { let key_bytes: &[u8] = key.as_ref(); @@ -256,7 +258,7 @@ impl SqlRecoveryDb { } } - fn get_highest_known_block_index_impl(conn: &PgConnection) -> Result, Error> { + fn get_highest_known_block_index_impl(conn: &mut PgConnection) -> Result, Error> { Ok(schema::ingested_blocks::dsl::ingested_blocks .select(diesel::dsl::max(schema::ingested_blocks::dsl::block_number)) .first::>(conn)? @@ -265,7 +267,7 @@ impl SqlRecoveryDb { fn get_expired_invocations_impl( &self, - conn: &PgConnection, + conn: &mut PgConnection, expiration: NaiveDateTime, ) -> Result, Error> { use schema::ingest_invocations::dsl; @@ -310,8 +312,8 @@ impl SqlRecoveryDb { &self, key: &CompressedRistrettoPublic, ) -> Result, Error> { - let conn = self.pool.get()?; - self.get_ingress_key_status_impl(&conn, key) + let conn = &mut self.pool.get()?; + self.get_ingress_key_status_impl(conn, key) } fn new_ingress_key_retriable( @@ -319,12 +321,12 @@ impl SqlRecoveryDb { key: &CompressedRistrettoPublic, start_block_count: u64, ) -> Result { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; conn.build_transaction() .read_write() - .run(|| -> Result { + .run(|conn| -> Result { let highest_known_block_count: u64 = - SqlRecoveryDb::get_highest_known_block_index_impl(&conn)? + SqlRecoveryDb::get_highest_known_block_index_impl(conn)? .map(|index| index + 1) .unwrap_or(0); @@ -340,7 +342,7 @@ impl SqlRecoveryDb { let inserted_row_count = diesel::insert_into(schema::ingress_keys::table) .values(&obj) .on_conflict_do_nothing() - .execute(&conn)?; + .execute(conn)?; if inserted_row_count > 0 { Ok(accepted_start_block_count) @@ -359,11 +361,11 @@ impl SqlRecoveryDb { ) -> Result<(), Error> { let key_bytes: &[u8] = key.as_ref(); - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; use schema::ingress_keys::dsl; diesel::update(dsl::ingress_keys.filter(dsl::ingress_public_key.eq(key_bytes))) .set(dsl::retired.eq(set_retired)) - .execute(&conn)?; + .execute(conn)?; Ok(()) } @@ -373,13 +375,13 @@ impl SqlRecoveryDb { ) -> Result, Error> { let key_bytes: &[u8] = key.as_ref(); - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; use schema::ingested_blocks::dsl; let maybe_index: Option = dsl::ingested_blocks .filter(dsl::ingress_public_key.eq(key_bytes)) .select(diesel::dsl::max(dsl::block_number)) - .first(&conn)?; + .first(conn)?; Ok(maybe_index.map(|val| val as u64)) } @@ -389,7 +391,7 @@ impl SqlRecoveryDb { start_block_at_least: u64, ingress_public_key_record_filters: &IngressPublicKeyRecordFilters, ) -> Result, Error> { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; use schema::ingress_keys::dsl; let last_scanned_block = diesel::dsl::sql::( @@ -433,7 +435,7 @@ impl SqlRecoveryDb { bool, bool, Option, - )>(&conn)? + )>(conn)? .into_iter() .map( |( @@ -468,15 +470,15 @@ impl SqlRecoveryDb { egress_public_key: &KexRngPubkey, start_block: u64, ) -> Result { - let conn = self.pool.get()?; - conn.build_transaction().read_write().run(|| { + let conn = &mut self.pool.get()?; + conn.build_transaction().read_write().run(|conn| { // Optionally decommission old invocation. if let Some(prev_ingest_invocation_id) = prev_ingest_invocation_id { - self.decommission_ingest_invocation_impl(&conn, &prev_ingest_invocation_id)?; + self.decommission_ingest_invocation_impl(conn, &prev_ingest_invocation_id)?; } // Write new invocation. - let now = diesel::select(diesel::dsl::now).get_result::(&conn)?; + let now = diesel::select(diesel::dsl::now).get_result::(conn)?; let obj = models::NewIngestInvocation { ingress_public_key: (*ingress_public_key).into(), @@ -490,14 +492,14 @@ impl SqlRecoveryDb { let inserted_obj: models::IngestInvocation = diesel::insert_into(schema::ingest_invocations::table) .values(&obj) - .get_result(&conn)?; + .get_result(conn)?; // Write a user event. let new_event = models::NewUserEvent::new_ingest_invocation(inserted_obj.id); diesel::insert_into(schema::user_events::table) .values(&new_event) - .execute(&conn)?; + .execute(conn)?; // Success. Ok(IngestInvocationId::from(inserted_obj.id)) @@ -507,7 +509,7 @@ impl SqlRecoveryDb { fn get_ingestable_ranges_retriable( &self, ) -> Result, Error> { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; // For each ingest invocation we are aware of get its id, start block, is // decommissioned and the max block number it has ingested (if @@ -524,7 +526,7 @@ impl SqlRecoveryDb { .order_by(schema::ingest_invocations::dsl::id); // The list of fields here must match the .select() clause above. - let data = query.load::<(i64, i64, bool, Option)>(&conn)?; + let data = query.load::<(i64, i64, bool, Option)>(conn)?; Ok(data .into_iter() .map(|row| { @@ -551,11 +553,11 @@ impl SqlRecoveryDb { &self, ingest_invocation_id: &IngestInvocationId, ) -> Result<(), Error> { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; conn.build_transaction() .read_write() - .run(|| self.decommission_ingest_invocation_impl(&conn, ingest_invocation_id)) + .run(|conn| self.decommission_ingest_invocation_impl(conn, ingest_invocation_id)) } fn add_block_data_retriable( @@ -565,12 +567,12 @@ impl SqlRecoveryDb { block_signature_timestamp: u64, txs: &[mc_fog_types::ETxOutRecord], ) -> Result { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; match conn .build_transaction() .read_write() - .run(|| -> Result<(), Error> { + .run(|conn| -> Result<(), Error> { // Get ingress pubkey of this ingest invocation id, which is also stored in the // ingested_block record // @@ -581,7 +583,7 @@ impl SqlRecoveryDb { let ingress_key_bytes: Vec = schema::ingest_invocations::table .filter(schema::ingest_invocations::dsl::id.eq(**ingest_invocation_id)) .select(schema::ingest_invocations::ingress_public_key) - .first(&conn)?; + .first(conn)?; // Get bytes of encoded proto ingested block data let proto_bytes = { @@ -606,10 +608,10 @@ impl SqlRecoveryDb { diesel::insert_into(schema::ingested_blocks::table) .values(&new_ingested_block) - .execute(&conn)?; + .execute(conn)?; // Update last active at. - self.update_last_active_at_impl(&conn, ingest_invocation_id)?; + self.update_last_active_at_impl(conn, ingest_invocation_id)?; // Success. Ok(()) @@ -638,16 +640,16 @@ impl SqlRecoveryDb { &self, lost_ingress_key: CompressedRistrettoPublic, ) -> Result<(), Error> { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; - conn.build_transaction().read_write().run(|| { + conn.build_transaction().read_write().run(|conn| { // Find the ingress key and update it to be marked lost let key_bytes: &[u8] = lost_ingress_key.as_ref(); use schema::ingress_keys::dsl; let key_records: Vec = diesel::update(dsl::ingress_keys.filter(dsl::ingress_public_key.eq(key_bytes))) .set(dsl::lost.eq(true)) - .get_results(&conn)?; + .get_results(conn)?; // Compute a missed block range based on looking at the key status, // which is correct if no blocks have actually been scanned using the key. @@ -671,7 +673,7 @@ impl SqlRecoveryDb { dsl::ingested_blocks .filter(dsl::ingress_public_key.eq(key_bytes)) .select(diesel::dsl::max(dsl::block_number)) - .first(&conn)? + .first(conn)? }; if let Some(block_index) = maybe_block_index { @@ -699,15 +701,15 @@ impl SqlRecoveryDb { diesel::insert_into(schema::user_events::table) .values(&new_event) - .execute(&conn)?; + .execute(conn)?; Ok(()) }) } fn get_missed_block_ranges_retriable(&self) -> Result, Error> { - let conn = self.pool.get()?; - self.get_missed_block_ranges_impl(&conn) + let conn = &mut self.pool.get()?; + self.get_missed_block_ranges_impl(conn) } fn search_user_events_retriable( @@ -719,7 +721,7 @@ impl SqlRecoveryDb { return Ok((Default::default(), i64::MAX)); } - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; let mut events: Vec<(i64, FogUserEvent)> = Vec::new(); // Collect all events of interest @@ -771,7 +773,7 @@ impl SqlRecoveryDb { // For MissingBlocks events Option, // user_events.missing_blocks_start Option, // user_events.missing_blocks_end - )>(&conn)?; + )>(conn)?; // If no events are found, return start_from_user_event_id and not 0 let mut max_user_event_id = start_from_user_event_id; @@ -889,14 +891,14 @@ impl SqlRecoveryDb { start_block: u64, search_keys: &[Vec], ) -> Result, Error> { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; let query = schema::ingested_blocks::dsl::ingested_blocks .filter(schema::ingested_blocks::dsl::block_number.ge(start_block as i64)) .select(schema::ingested_blocks::dsl::proto_ingested_block_data); let mut search_key_to_payload = HashMap::, Vec>::default(); - for proto_bytes in query.load::>(&conn)? { + for proto_bytes in query.load::>(conn)? { let proto = ProtoIngestedBlockData::decode(&*proto_bytes)?; for e_tx_out_record in proto.e_tx_out_records { search_key_to_payload.insert(e_tx_out_record.search_key, e_tx_out_record.payload); @@ -928,8 +930,8 @@ impl SqlRecoveryDb { &self, ingest_invocation_id: &IngestInvocationId, ) -> Result<(), Error> { - let conn = self.pool.get()?; - self.update_last_active_at_impl(&conn, ingest_invocation_id) + let conn = &mut self.pool.get()?; + self.update_last_active_at_impl(conn, ingest_invocation_id) } /// Get any ETxOutRecords produced by a given ingress key for a given @@ -947,7 +949,7 @@ impl SqlRecoveryDb { ingress_key: CompressedRistrettoPublic, block_index: u64, ) -> Result>, Error> { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; let key_bytes: &[u8] = ingress_key.as_ref(); let query = schema::ingested_blocks::dsl::ingested_blocks @@ -957,7 +959,7 @@ impl SqlRecoveryDb { // The result of load should be 0 or 1, since there is a database constraint // around ingress keys and block indices - let protos: Vec> = query.load::>(&conn)?; + let protos: Vec> = query.load::>(conn)?; if protos.is_empty() { Ok(None) @@ -988,7 +990,7 @@ impl SqlRecoveryDb { block_index: u64, block_count: usize, ) -> Result>, Error> { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; // The idea is: // Similar to get_tx_outs_by_block_and_key_retriable, but now @@ -1009,7 +1011,7 @@ impl SqlRecoveryDb { }; // We will get one row for each hit in the table we found - let rows: Vec<(i64, Vec)> = query.load(&conn)?; + let rows: Vec<(i64, Vec)> = query.load(conn)?; if rows.len() > block_count { log::warn!( @@ -1048,7 +1050,7 @@ impl SqlRecoveryDb { ingress_key: CompressedRistrettoPublic, block_index: u64, ) -> Result, Error> { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; let key_bytes: &[u8] = ingress_key.as_ref(); let query = schema::ingested_blocks::dsl::ingested_blocks @@ -1058,7 +1060,7 @@ impl SqlRecoveryDb { // The result of load should be 0 or 1, since there is a database constraint // around ingress keys and block indices - let iids: Vec = query.load::(&conn)?; + let iids: Vec = query.load::(conn)?; if iids.is_empty() { Ok(None) @@ -1082,13 +1084,13 @@ impl SqlRecoveryDb { &self, block_index: u64, ) -> Result, Error> { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; let query = schema::ingested_blocks::dsl::ingested_blocks .filter(schema::ingested_blocks::dsl::block_number.eq(block_index as i64)) .select(schema::ingested_blocks::dsl::cumulative_txo_count); - let data = query.load::(&conn)?; + let data = query.load::(conn)?; if data.is_empty() { Ok(None) } else { @@ -1118,20 +1120,20 @@ impl SqlRecoveryDb { &self, block_index: u64, ) -> Result, Error> { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; let query = schema::ingested_blocks::dsl::ingested_blocks .filter(schema::ingested_blocks::dsl::block_number.eq(block_index as i64)) .select(schema::ingested_blocks::dsl::block_signature_timestamp); - let data = query.load::(&conn)?; + let data = query.load::(conn)?; Ok(data.first().map(|val| *val as u64)) } /// Get the highest block index for which we have any data at all. fn get_highest_known_block_index_retriable(&self) -> Result, Error> { - let conn = self.pool.get()?; - SqlRecoveryDb::get_highest_known_block_index_impl(&conn) + let conn = &mut self.pool.get()?; + SqlRecoveryDb::get_highest_known_block_index_impl(conn) } //// @@ -1140,7 +1142,7 @@ impl SqlRecoveryDb { //// fn get_all_reports_retriable(&self) -> Result, Error> { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; let query = schema::reports::dsl::reports .select(( @@ -1152,7 +1154,7 @@ impl SqlRecoveryDb { .order_by(schema::reports::dsl::id); query - .load::<(Option, String, Vec, i64)>(&conn)? + .load::<(Option, String, Vec, i64)>(conn)? .into_iter() .map(|(ingest_invocation_id, report_id, report, pubkey_expiry)| { let report = VerificationReport::decode(&*report)?; @@ -1175,11 +1177,11 @@ impl SqlRecoveryDb { report_id: &str, data: &ReportData, ) -> Result { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; conn.build_transaction() .read_write() - .run(|| -> Result { + .run(|conn| -> Result { // First, try to update the pubkey_expiry value on this ingress key, only // allowing it to increase, and only if it is not retired let result: IngressPublicKeyStatus = { @@ -1193,7 +1195,7 @@ impl SqlRecoveryDb { .filter(dsl::pubkey_expiry.lt(data.pubkey_expiry as i64)), ) .set(dsl::pubkey_expiry.eq(data.pubkey_expiry as i64)) - .get_results(&conn)?; + .get_results(conn)?; if key_records.is_empty() { // If the result is empty, the key might not exist, or it might have had a @@ -1201,7 +1203,7 @@ impl SqlRecoveryDb { // so we need to make another query to find which is the case log::info!(self.logger, "update was a no-op"); let maybe_key_status = - self.get_ingress_key_status_impl(&conn, ingress_key)?; + self.get_ingress_key_status_impl(conn, ingress_key)?; log::info!(self.logger, "check ingress key passed"); maybe_key_status.ok_or(Error::MissingIngressKey(*ingress_key))? } else if key_records.len() > 1 { @@ -1245,18 +1247,18 @@ impl SqlRecoveryDb { schema::reports::dsl::report.eq(report_bytes.clone()), schema::reports::dsl::pubkey_expiry.eq(report.pubkey_expiry), )) - .execute(&conn)?; + .execute(conn)?; Ok(result) }) } /// Remove report data associated with a given report id. fn remove_report_retriable(&self, report_id: &str) -> Result<(), Error> { - let conn = self.pool.get()?; + let conn = &mut self.pool.get()?; diesel::delete( schema::reports::dsl::reports.filter(schema::reports::dsl::fog_report_id.eq(report_id)), ) - .execute(&conn)?; + .execute(conn)?; Ok(()) } @@ -1264,8 +1266,8 @@ impl SqlRecoveryDb { &self, expiration: NaiveDateTime, ) -> Result, Error> { - let conn = self.pool.get()?; - self.get_expired_invocations_impl(&conn, expiration) + let conn = &mut self.pool.get()?; + self.get_expired_invocations_impl(conn, expiration) } } @@ -1661,11 +1663,11 @@ mod tests { assert_ne!(invoc_id1, invoc_id2); // Both ingest invocations should appear in the ingest_invocations table - let conn = db_test_context.new_conn(); + let conn = &mut db_test_context.new_conn(); let ingest_invocations: Vec = schema::ingest_invocations::dsl::ingest_invocations .order_by(schema::ingest_invocations::dsl::id) - .load(&conn) + .load(conn) .expect("failed getting ingest invocations"); assert_eq!(ingest_invocations.len(), 2); @@ -1927,7 +1929,7 @@ mod tests { let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let db_test_context = test_utils::SqlRecoveryDbTestContext::new(logger); let db = db_test_context.get_db_instance(); - let conn = db_test_context.new_conn(); + let conn = &mut db_test_context.new_conn(); let ingress_key = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); db.new_ingress_key(&ingress_key, 20).unwrap(); @@ -1952,7 +1954,7 @@ mod tests { schema::ingest_invocations::dsl::ingest_invocations .select(schema::ingest_invocations::dsl::last_active_at) .order_by(schema::ingest_invocations::dsl::id) - .load(&conn) + .load(conn) .unwrap(); let mut invoc1_orig_last_active_at = invocs_last_active_at[0]; let invoc2_orig_last_active_at = invocs_last_active_at[1]; @@ -1967,7 +1969,7 @@ mod tests { let blocks: Vec = schema::ingested_blocks::dsl::ingested_blocks .order_by(schema::ingested_blocks::dsl::id) - .load(&conn) + .load(conn) .unwrap(); assert_eq!(blocks.len(), 1); assert_eq!( @@ -1996,7 +1998,7 @@ mod tests { schema::ingest_invocations::dsl::ingest_invocations .select(schema::ingest_invocations::dsl::last_active_at) .order_by(schema::ingest_invocations::dsl::id) - .load(&conn) + .load(conn) .unwrap(); assert!(invocs_last_active_at[0] > invoc1_orig_last_active_at); assert_eq!(invocs_last_active_at[1], invoc2_orig_last_active_at); @@ -2027,7 +2029,7 @@ mod tests { schema::ingest_invocations::dsl::ingest_invocations .select(schema::ingest_invocations::dsl::last_active_at) .order_by(schema::ingest_invocations::dsl::id) - .load(&conn) + .load(conn) .unwrap(); assert_eq!(invocs_last_active_at[0], invoc1_orig_last_active_at); assert_eq!(invocs_last_active_at[1], invoc2_orig_last_active_at); @@ -2045,7 +2047,7 @@ mod tests { let blocks: Vec = schema::ingested_blocks::dsl::ingested_blocks .order_by(schema::ingested_blocks::dsl::id) - .load(&conn) + .load(conn) .unwrap(); assert_eq!(blocks.len(), 2); assert_eq!( @@ -2085,7 +2087,7 @@ mod tests { schema::ingest_invocations::dsl::ingest_invocations .select(schema::ingest_invocations::dsl::last_active_at) .order_by(schema::ingest_invocations::dsl::id) - .load(&conn) + .load(conn) .unwrap(); assert_eq!(invocs_last_active_at[0], invoc1_orig_last_active_at); assert!(invocs_last_active_at[1] > invoc2_orig_last_active_at); diff --git a/fog/sql_recovery_db/src/models.rs b/fog/sql_recovery_db/src/models.rs index 05d82bfbcd..125d42a7da 100644 --- a/fog/sql_recovery_db/src/models.rs +++ b/fog/sql_recovery_db/src/models.rs @@ -14,7 +14,7 @@ pub struct IngressKey { } #[derive(Debug, Insertable)] -#[table_name = "ingress_keys"] +#[diesel(table_name = ingress_keys)] pub struct NewIngressKey { pub ingress_public_key: Vec, pub start_block: i64, @@ -35,7 +35,7 @@ pub struct IngestInvocation { } #[derive(Debug, Insertable)] -#[table_name = "ingest_invocations"] +#[diesel(table_name = ingest_invocations)] pub struct NewIngestInvocation { pub ingress_public_key: Vec, pub egress_public_key: Vec, @@ -57,7 +57,7 @@ pub struct IngestedBlock { } #[derive(Debug, Insertable)] -#[table_name = "ingested_blocks"] +#[diesel(table_name = ingested_blocks)] pub struct NewIngestedBlock { pub ingress_public_key: Vec, pub ingest_invocation_id: i64, @@ -68,7 +68,7 @@ pub struct NewIngestedBlock { } #[derive(Debug, Insertable)] -#[table_name = "user_events"] +#[diesel(table_name = user_events)] pub struct NewUserEvent { pub event_type: UserEventType, pub new_ingest_invocation_id: Option, @@ -112,7 +112,7 @@ impl NewUserEvent { } #[derive(Debug, Insertable)] -#[table_name = "reports"] +#[diesel(table_name = reports)] pub struct NewReport<'a> { pub ingress_public_key: &'a [u8], pub ingest_invocation_id: Option, diff --git a/fog/sql_recovery_db/src/sql_types.rs b/fog/sql_recovery_db/src/sql_types.rs index fbcdb1caf7..c280de3cad 100644 --- a/fog/sql_recovery_db/src/sql_types.rs +++ b/fog/sql_recovery_db/src/sql_types.rs @@ -1,15 +1,13 @@ // Copyright (c) 2018-2022 The MobileCoin Foundation use diesel::{ - backend::Backend, deserialize::{self, FromSql}, - pg::Pg, + pg::{Pg, PgValue}, serialize::{self, Output, ToSql}, }; use diesel_derive_enum::DbEnum; use mc_crypto_keys::CompressedRistrettoPublic; -use mc_util_repr_bytes::ReprBytes; -use std::{fmt, io::Write, ops::Deref}; +use std::{fmt, ops::Deref}; #[derive(Debug, PartialEq, DbEnum)] #[DieselType = "User_event_type"] @@ -20,7 +18,7 @@ pub enum UserEventType { } #[derive(AsExpression, FromSqlRow, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] -#[sql_type = "diesel::sql_types::Binary"] +#[diesel(sql_type = diesel::sql_types::Binary)] pub struct SqlCompressedRistrettoPublic(CompressedRistrettoPublic); impl Deref for SqlCompressedRistrettoPublic { @@ -49,13 +47,13 @@ impl fmt::Display for SqlCompressedRistrettoPublic { } } -impl> FromSql - for SqlCompressedRistrettoPublic -{ - fn from_sql(bytes: Option<&DB::RawValue>) -> deserialize::Result { - let vec = as FromSql>::from_sql(bytes)?; +impl FromSql for SqlCompressedRistrettoPublic { + fn from_sql(value: PgValue) -> deserialize::Result { + let vec = >::from_sql(value)?; if vec.len() != 32 { - return Err("SqlCompressedRistrettoPublic: Invalid array length".into()); + return Err("SqlCompressedRistrettoPublic: Invalid array + length" + .into()); } let mut key = [0; 32]; @@ -69,7 +67,10 @@ impl> FromSql } impl ToSql for SqlCompressedRistrettoPublic { - fn to_sql(&self, out: &mut Output) -> serialize::Result { - as ToSql>::to_sql(&self.0.to_bytes().to_vec(), out) + fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { + as ToSql>::to_sql( + &self.0.as_bytes().to_vec(), + &mut out.reborrow(), + ) } } diff --git a/fog/sql_recovery_db/src/test_utils.rs b/fog/sql_recovery_db/src/test_utils.rs index aa55329097..bed5f1813f 100644 --- a/fog/sql_recovery_db/src/test_utils.rs +++ b/fog/sql_recovery_db/src/test_utils.rs @@ -4,7 +4,7 @@ use crate::{SqlRecoveryDb, SqlRecoveryDbConnectionConfig}; use diesel::{prelude::*, PgConnection}; -use diesel_migrations::embed_migrations; +use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness}; use mc_common::logger::{log, Logger}; use rand::{distributions::Alphanumeric, thread_rng, Rng}; use retry::{ @@ -13,8 +13,7 @@ use retry::{ }; use std::time::Duration; -embed_migrations!("migrations/"); - +const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations"); const DB_CONNECTION_SLEEP_PERIOD: Duration = Duration::from_secs(3); const TOTAL_RETRY_COUNT: usize = 5; @@ -44,19 +43,20 @@ impl SqlRecoveryDbTestContext { // database. let postgres_url = format!("{base_url}/postgres"); log::info!(&logger, "Connecting to root PG DB {}", postgres_url); - let conn = SqlRecoveryDbTestContext::establish_connection(&postgres_url); + let mut conn = SqlRecoveryDbTestContext::establish_connection(&postgres_url); // Create a new database for the test let query = diesel::sql_query(format!("CREATE DATABASE {db_name};").as_str()); let _ = query - .execute(&conn) + .execute(&mut conn) .unwrap_or_else(|err| panic!("Could not create database {db_name}: {err:?}")); // Now we can connect to the database and run the migrations let db_url = format!("{base_url}/{db_name}"); log::info!(&logger, "Connecting to newly created PG DB '{}'", db_url); - let conn = SqlRecoveryDbTestContext::establish_connection(&db_url); - embedded_migrations::run(&conn).expect("failed running migrations"); + let conn = &mut SqlRecoveryDbTestContext::establish_connection(&db_url); + conn.run_pending_migrations(MIGRATIONS) + .expect("failed running migrations"); // Success Self { @@ -107,7 +107,7 @@ impl SqlRecoveryDbTestContext { impl Drop for SqlRecoveryDbTestContext { fn drop(&mut self) { let postgres_url = format!("{}/postgres", self.base_url); - let conn = + let mut conn = PgConnection::establish(&postgres_url).expect("Cannot connect to postgres database."); let disconnect_users = format!( @@ -116,12 +116,12 @@ impl Drop for SqlRecoveryDbTestContext { ); diesel::sql_query(disconnect_users.as_str()) - .execute(&conn) + .execute(&mut conn) .unwrap(); let query = diesel::sql_query(format!("DROP DATABASE {};", self.db_name).as_str()); query - .execute(&conn) + .execute(&mut conn) .unwrap_or_else(|err| panic!("Couldn't drop database {}: {:?}", self.db_name, err)); } } From 9562735bbdd281090c8f8a334f7e8b50aec35a40 Mon Sep 17 00:00:00 2001 From: Millie C Date: Tue, 28 Mar 2023 18:42:01 -0400 Subject: [PATCH 41/46] Failure limit on tx proposals --- .../service/src/api/client_api_service.rs | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index d41cbe1259..17b4356655 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -152,21 +152,24 @@ impl ClientApiService { if let TxManagerError::TransactionValidation(cause) = &err { counters::TX_VALIDATION_ERROR_COUNTER.inc(&format!("{cause:?}")); - // This will become a proper config option, already implemented - // in pull request #3296 "Failure limit on tx proposals" - let tracking_window = Duration::from_secs(60); let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); if !tracker.contains(&session_id) { tracker.put(session_id.clone(), ClientSessionTracking::new()); + tracker + .get_mut(&session_id) + .expect("Adding session-tracking record should be atomic.") + }; + let recent_failure_count = + record.fail_tx_proposal(Instant::now(), self.config.tx_failure_window); + + if (recent_failure_count as u32) >= self.config.tx_failure_limit { + // TODO: Some action to take to counter the harmful traffic + log::warn!(self.logger, + "Client has {} recent failed tx proposals within the last {}", + recent_failure_count, + self.config.tx_failure_window.as_secs_f32() + ); } - let record = tracker - .get_mut(&session_id) - .expect("Session id {session_id} should be tracked."); - - let _recent_failure_count = - record.fail_tx_proposal(Instant::now(), tracking_window); - // Dropping the client after a limit has been reached will be - // implemented in a future pull request. } err })?; From 5bd1b981b0d6a39a7957e7f80e0458b86328804f Mon Sep 17 00:00:00 2001 From: Millie C Date: Wed, 29 Mar 2023 21:59:58 -0400 Subject: [PATCH 42/46] Cargo fmt. --- consensus/service/src/api/client_api_service.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 17b4356655..f4d3266f3b 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -162,9 +162,10 @@ impl ClientApiService { let recent_failure_count = record.fail_tx_proposal(Instant::now(), self.config.tx_failure_window); - if (recent_failure_count as u32) >= self.config.tx_failure_limit { + if (recent_failure_count as u32) >= self.config.tx_failure_limit { // TODO: Some action to take to counter the harmful traffic - log::warn!(self.logger, + log::warn!( + self.logger, "Client has {} recent failed tx proposals within the last {}", recent_failure_count, self.config.tx_failure_window.as_secs_f32() From 2b0c4efd12015e0747df3920622da39f861acefa Mon Sep 17 00:00:00 2001 From: Millie C Date: Thu, 30 Mar 2023 19:03:00 -0400 Subject: [PATCH 43/46] Actually close sessions with problem clients --- .../service/src/api/client_api_service.rs | 21 ------------------- 1 file changed, 21 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index f4d3266f3b..1d1dac2ed4 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -144,33 +144,12 @@ impl ClientApiService { msg: Message, ) -> Result { counters::ADD_TX_INITIATED.inc(); - let session_id = ClientSession::from(msg.channel_id.clone()); let tx_context = self.enclave.client_tx_propose(msg.into())?; // Cache the transaction. This performs the well-formedness checks. let tx_hash = self.tx_manager.insert(tx_context).map_err(|err| { if let TxManagerError::TransactionValidation(cause) = &err { counters::TX_VALIDATION_ERROR_COUNTER.inc(&format!("{cause:?}")); - - let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); - if !tracker.contains(&session_id) { - tracker.put(session_id.clone(), ClientSessionTracking::new()); - tracker - .get_mut(&session_id) - .expect("Adding session-tracking record should be atomic.") - }; - let recent_failure_count = - record.fail_tx_proposal(Instant::now(), self.config.tx_failure_window); - - if (recent_failure_count as u32) >= self.config.tx_failure_limit { - // TODO: Some action to take to counter the harmful traffic - log::warn!( - self.logger, - "Client has {} recent failed tx proposals within the last {}", - recent_failure_count, - self.config.tx_failure_window.as_secs_f32() - ); - } } err })?; From 67c22844808bd4aea7ba9fca7284de37dc1ebe1f Mon Sep 17 00:00:00 2001 From: Millie C Date: Wed, 8 Mar 2023 21:28:18 -0500 Subject: [PATCH 44/46] Push an Instant::now() for every time validation fails on a tx proposal --- consensus/service/src/api/client_api_service.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 1d1dac2ed4..93d36ceb9f 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -144,12 +144,26 @@ impl ClientApiService { msg: Message, ) -> Result { counters::ADD_TX_INITIATED.inc(); + let session_id = ClientSession::from(msg.channel_id.clone()); let tx_context = self.enclave.client_tx_propose(msg.into())?; // Cache the transaction. This performs the well-formedness checks. let tx_hash = self.tx_manager.insert(tx_context).map_err(|err| { if let TxManagerError::TransactionValidation(cause) = &err { counters::TX_VALIDATION_ERROR_COUNTER.inc(&format!("{cause:?}")); + + let tracking_window = Duration::from_secs(5); // TODO, placeholder before draft PR gets published. + let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); + let record = if let Some(record) = tracker.get_mut(&session_id) { + record + } else { + tracker.put(session_id.clone(), ClientSessionTracking::new()); + tracker.get_mut(&session_id) + .expect("Adding session-tracking record should be atomic.") + + }; + let _recent_failure_count = record.fail_tx_proposal(&Instant::now(), &tracking_window); + // TODO: drop session when recent_failure_count reaches some number } err })?; From 65bd6ad1864eee02998d86223a34c9e42bb1877f Mon Sep 17 00:00:00 2001 From: Millie C Date: Mon, 13 Mar 2023 22:35:31 -0400 Subject: [PATCH 45/46] Cargo fmt --- consensus/service/src/api/client_api_service.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index 93d36ceb9f..ceb8aae289 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -152,18 +152,20 @@ impl ClientApiService { if let TxManagerError::TransactionValidation(cause) = &err { counters::TX_VALIDATION_ERROR_COUNTER.inc(&format!("{cause:?}")); - let tracking_window = Duration::from_secs(5); // TODO, placeholder before draft PR gets published. + let tracking_window = Duration::from_secs(5); // TODO, placeholder before draft PR gets published. let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); let record = if let Some(record) = tracker.get_mut(&session_id) { record } else { tracker.put(session_id.clone(), ClientSessionTracking::new()); - tracker.get_mut(&session_id) + tracker + .get_mut(&session_id) .expect("Adding session-tracking record should be atomic.") - }; - let _recent_failure_count = record.fail_tx_proposal(&Instant::now(), &tracking_window); - // TODO: drop session when recent_failure_count reaches some number + let _recent_failure_count = + record.fail_tx_proposal(&Instant::now(), &tracking_window); + // TODO: drop session when recent_failure_count reaches some + // number } err })?; From 7be77286cf6f7768f1ab44a818f30f9c95eae584 Mon Sep 17 00:00:00 2001 From: Millie C Date: Thu, 6 Apr 2023 21:05:01 -0400 Subject: [PATCH 46/46] Respond to suggestions on the pull request, no longer using references for Instant and Duration --- consensus/service/src/api/client_api_service.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/consensus/service/src/api/client_api_service.rs b/consensus/service/src/api/client_api_service.rs index ceb8aae289..386d9b8ccd 100644 --- a/consensus/service/src/api/client_api_service.rs +++ b/consensus/service/src/api/client_api_service.rs @@ -152,7 +152,9 @@ impl ClientApiService { if let TxManagerError::TransactionValidation(cause) = &err { counters::TX_VALIDATION_ERROR_COUNTER.inc(&format!("{cause:?}")); - let tracking_window = Duration::from_secs(5); // TODO, placeholder before draft PR gets published. + // This will become a proper config option, already implemented + // in pull request #3296 "Failure limit on tx proposals" + let tracking_window = Duration::from_secs(60); let mut tracker = self.tracked_sessions.lock().expect("Mutex poisoned"); let record = if let Some(record) = tracker.get_mut(&session_id) { record @@ -163,9 +165,9 @@ impl ClientApiService { .expect("Adding session-tracking record should be atomic.") }; let _recent_failure_count = - record.fail_tx_proposal(&Instant::now(), &tracking_window); - // TODO: drop session when recent_failure_count reaches some - // number + record.fail_tx_proposal(Instant::now(), tracking_window); + // Dropping the client after a limit has been reached will be + // implemented in a future pull request. } err })?;