From 5d3b10f924f9ffecd997f3cf7af2a528efb38fab Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:31:34 -0600 Subject: [PATCH 01/50] Refactor pruning proof validation to many functions Co-authored-by: Ori Newman --- consensus/src/consensus/services.rs | 1 + consensus/src/processes/pruning_proof/mod.rs | 194 ++++++++++++++----- 2 files changed, 145 insertions(+), 50 deletions(-) diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 4afb5938a..3db1e8d38 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -185,6 +185,7 @@ impl ConsensusServices { parents_manager.clone(), reachability_service.clone(), ghostdag_managers.clone(), + ghostdag_primary_manager.clone(), dag_traversal_manager.clone(), window_manager.clone(), params.max_block_level, diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 6324aa4ee..3dfed8660 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -26,7 +26,10 @@ use kaspa_consensus_core::{ BlockHashMap, BlockHashSet, BlockLevel, HashMapCustomHasher, KType, }; use kaspa_core::{debug, info, trace}; -use kaspa_database::prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}; +use kaspa_database::{ + prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}, + utils::DbLifetime, +}; use kaspa_hashes::Hash; use kaspa_pow::calc_block_level; use kaspa_utils::{binary_heap::BinaryHeapExtensions, vec::VecExtensions}; @@ -41,7 +44,7 @@ use crate::{ services::reachability::{MTReachabilityService, ReachabilityService}, stores::{ depth::DbDepthStore, - ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, + ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, headers::{DbHeadersStore, HeaderStore, HeaderStoreReader}, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, @@ -88,6 +91,16 @@ impl Clone for CachedPruningPointData { } } +struct TempProofContext { + headers_store: Arc, + ghostdag_stores: Vec>, + relations_stores: Vec, + reachability_stores: Vec>>, + ghostdag_managers: + Vec, DbHeadersStore>>, + db_lifetime: DbLifetime, +} + pub struct PruningProofManager { db: Arc, @@ -96,6 +109,7 @@ pub struct PruningProofManager { reachability_relations_store: Arc>, reachability_service: MTReachabilityService, ghostdag_stores: Arc>>, + ghostdag_primary_store: Arc, relations_stores: Arc>>, pruning_point_store: Arc>, past_pruning_points_store: Arc, @@ -106,6 +120,7 @@ pub struct PruningProofManager { selected_chain_store: Arc>, ghostdag_managers: Arc>, + ghostdag_primary_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, parents_manager: DbParentsManager, @@ -130,6 +145,7 @@ impl PruningProofManager { parents_manager: DbParentsManager, reachability_service: MTReachabilityService, ghostdag_managers: Arc>, + ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, max_block_level: BlockLevel, @@ -146,6 +162,7 @@ impl PruningProofManager { reachability_relations_store: storage.reachability_relations_store.clone(), reachability_service, ghostdag_stores: storage.ghostdag_stores.clone(), + ghostdag_primary_store: storage.ghostdag_primary_store.clone(), relations_stores: storage.relations_stores.clone(), pruning_point_store: storage.pruning_point_store.clone(), past_pruning_points_store: storage.past_pruning_points_store.clone(), @@ -168,6 +185,7 @@ impl PruningProofManager { pruning_proof_m, anticone_finalization_depth, ghostdag_k, + ghostdag_primary_manager: ghostdag_manager, is_consensus_exiting, } @@ -244,8 +262,12 @@ impl PruningProofManager { self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); let gd = if header.hash == self.genesis_hash { self.ghostdag_managers[level].genesis_ghostdag_data() - } else if level == 0 { - if let Some(gd) = trusted_gd_map.get(&header.hash) { + } else { + self.ghostdag_managers[level].ghostdag(&parents) + }; + + if level == 0 { + let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { let calculated_gd = self.ghostdag_managers[level].ghostdag(&parents); @@ -258,18 +280,18 @@ impl PruningProofManager { mergeset_reds: calculated_gd.mergeset_reds.clone(), blues_anticone_sizes: calculated_gd.blues_anticone_sizes.clone(), } - } + }; + self.ghostdag_primary_store.insert(header.hash, Arc::new(gd)).unwrap(); } else { - self.ghostdag_managers[level].ghostdag(&parents) - }; - self.ghostdag_stores[level].insert(header.hash, Arc::new(gd)).unwrap(); + self.ghostdag_stores[level].insert(header.hash, Arc::new(gd)).unwrap(); + } } } let virtual_parents = vec![pruning_point]; let virtual_state = Arc::new(VirtualState { parents: virtual_parents.clone(), - ghostdag_data: self.ghostdag_managers[0].ghostdag(&virtual_parents), + ghostdag_data: self.ghostdag_primary_manager.ghostdag(&virtual_parents), ..VirtualState::default() }); self.virtual_stores.write().state.set(virtual_state).unwrap(); @@ -387,18 +409,16 @@ impl PruningProofManager { } } - pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { - if proof.len() != self.max_block_level as usize + 1 { - return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); - } + fn init_validate_pruning_point_proof_stores_and_processes( + &self, + proof: &PruningPointProof, + ) -> PruningImportResult { if proof[0].is_empty() { return Err(PruningImportError::PruningProofNotEnoughHeaders); } let headers_estimate = self.estimate_proof_unique_size(proof); - let proof_pp_header = proof[0].last().expect("checked if empty"); - let proof_pp = proof_pp_header.hash; - let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); + let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); let headers_store = @@ -444,6 +464,23 @@ impl PruningProofManager { db.write(batch).unwrap(); } + Ok(TempProofContext { db_lifetime, headers_store, ghostdag_stores, relations_stores, reachability_stores, ghostdag_managers }) + } + + fn populate_stores_for_validate_pruning_point_proof( + &self, + proof: &PruningPointProof, + stores_and_processes: &mut TempProofContext, + ) -> PruningImportResult> { + let headers_store = &stores_and_processes.headers_store; + let ghostdag_stores = &stores_and_processes.ghostdag_stores; + let mut relations_stores = stores_and_processes.relations_stores.clone(); + let reachability_stores = &stores_and_processes.reachability_stores; + let ghostdag_managers = &stores_and_processes.ghostdag_managers; + + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; for level in (0..=self.max_block_level).rev() { // Before processing this level, check if the process is exiting so we can end early @@ -533,45 +570,91 @@ impl PruningProofManager { selected_tip_by_level[level_idx] = selected_tip; } + Ok(selected_tip_by_level.into_iter().map(|selected_tip| selected_tip.unwrap()).collect()) + } + + fn validate_proof_selected_tip( + &self, + proof_selected_tip: Hash, + level: BlockLevel, + proof_pp_level: BlockLevel, + proof_pp: Hash, + proof_pp_header: &Header, + ) -> PruningImportResult<()> { + // A proof selected tip of some level has to be the proof suggested prunint point itself if its level + // is lower or equal to the pruning point level, or a parent of the pruning point on the relevant level + // otherwise. + if level <= proof_pp_level { + if proof_selected_tip != proof_pp { + return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(proof_selected_tip, level)); + } + } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&proof_selected_tip) { + return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(proof_selected_tip, level)); + } + + Ok(()) + } + + // find_proof_and_consensus_common_chain_ancestor_ghostdag_data returns an option of a tuple + // that contains the ghostdag data of the proof and current consensus common ancestor. If no + // such ancestor exists, it returns None. + fn find_proof_and_consensus_common_ancestor_ghostdag_data( + &self, + ghostdag_stores: &[Arc], + proof_selected_tip: Hash, + level: BlockLevel, + proof_selected_tip_gd: CompactGhostdagData, + ) -> Option<(CompactGhostdagData, CompactGhostdagData)> { + let mut proof_current = proof_selected_tip; + let mut proof_current_gd = proof_selected_tip_gd; + loop { + match self.ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { + Some(current_gd) => { + break Some((proof_current_gd, current_gd)); + } + None => { + proof_current = proof_current_gd.selected_parent; + if proof_current.is_origin() { + break None; + } + proof_current_gd = ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap(); + } + }; + } + } + + pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { + if proof.len() != self.max_block_level as usize + 1 { + return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); + } + + let proof_pp_header = proof[0].last().expect("checked if empty"); + let proof_pp = proof_pp_header.hash; + let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); + let mut stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(&proof)?; + let selected_tip_by_level = self.populate_stores_for_validate_pruning_point_proof(proof, &mut stores_and_processes)?; + let ghostdag_stores = stores_and_processes.ghostdag_stores; + let pruning_read = self.pruning_point_store.read(); let relations_read = self.relations_stores.read(); let current_pp = pruning_read.get().unwrap().pruning_point; let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); - for (level_idx, selected_tip) in selected_tip_by_level.into_iter().enumerate() { + for (level_idx, selected_tip) in selected_tip_by_level.iter().copied().enumerate() { let level = level_idx as BlockLevel; - let selected_tip = selected_tip.unwrap(); - if level <= proof_pp_level { - if selected_tip != proof_pp { - return Err(PruningImportError::PruningProofSelectedTipIsNotThePruningPoint(selected_tip, level)); - } - } else if !self.parents_manager.parents_at_level(proof_pp_header, level).contains(&selected_tip) { - return Err(PruningImportError::PruningProofSelectedTipNotParentOfPruningPoint(selected_tip, level)); - } + self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { continue; } - let mut proof_current = selected_tip; - let mut proof_current_gd = proof_selected_tip_gd; - let common_ancestor_data = loop { - match self.ghostdag_stores[level_idx].get_compact_data(proof_current).unwrap_option() { - Some(current_gd) => { - break Some((proof_current_gd, current_gd)); - } - None => { - proof_current = proof_current_gd.selected_parent; - if proof_current.is_origin() { - break None; - } - proof_current_gd = ghostdag_stores[level_idx].get_compact_data(proof_current).unwrap(); - } - }; - }; - - if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = common_ancestor_data { + if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( + &ghostdag_stores, + selected_tip, + level, + proof_selected_tip_gd, + ) { let selected_tip_blue_work_diff = SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { @@ -593,8 +676,19 @@ impl PruningProofManager { return Ok(()); } + // If we got here it means there's no level with shared blocks + // between the proof and the current consensus. In this case we + // consider the proof to be better if it has at least one level + // with 2*self.pruning_proof_m blue blocks where consensus doesn't. for level in (0..=self.max_block_level).rev() { let level_idx = level as usize; + + let proof_selected_tip = selected_tip_by_level[level_idx]; + let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(proof_selected_tip).unwrap(); + if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { + continue; + } + match relations_read[level_idx].get_parents(current_pp).unwrap_option() { Some(parents) => { if parents @@ -614,7 +708,7 @@ impl PruningProofManager { drop(pruning_read); drop(relations_read); - drop(db_lifetime); + drop(stores_and_processes.db_lifetime); Err(PruningImportError::PruningProofNotEnoughHeaders) } @@ -816,7 +910,7 @@ impl PruningProofManager { let mut current = hash; for _ in 0..=self.ghostdag_k { hashes.push(current); - let Some(parent) = self.ghostdag_stores[0].get_selected_parent(current).unwrap_option() else { + let Some(parent) = self.ghostdag_primary_store.get_selected_parent(current).unwrap_option() else { break; }; if parent == self.genesis_hash || parent == blockhash::ORIGIN { @@ -836,7 +930,7 @@ impl PruningProofManager { .traversal_manager .anticone(pruning_point, virtual_parents, None) .expect("no error is expected when max_traversal_allowed is None"); - let mut anticone = self.ghostdag_managers[0].sort_blocks(anticone); + let mut anticone = self.ghostdag_primary_manager.sort_blocks(anticone); anticone.insert(0, pruning_point); let mut daa_window_blocks = BlockHashMap::new(); @@ -847,14 +941,14 @@ impl PruningProofManager { for anticone_block in anticone.iter().copied() { let window = self .window_manager - .block_window(&self.ghostdag_stores[0].get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) + .block_window(&self.ghostdag_primary_store.get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) .unwrap(); for hash in window.deref().iter().map(|block| block.0.hash) { if let Entry::Vacant(e) = daa_window_blocks.entry(hash) { e.insert(TrustedHeader { header: self.headers_store.get_header(hash).unwrap(), - ghostdag: (&*self.ghostdag_stores[0].get_data(hash).unwrap()).into(), + ghostdag: (&*self.ghostdag_primary_store.get_data(hash).unwrap()).into(), }); } } @@ -862,7 +956,7 @@ impl PruningProofManager { let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block); for hash in ghostdag_chain { if let Entry::Vacant(e) = ghostdag_blocks.entry(hash) { - let ghostdag = self.ghostdag_stores[0].get_data(hash).unwrap(); + let ghostdag = self.ghostdag_primary_store.get_data(hash).unwrap(); e.insert((&*ghostdag).into()); // We fill `ghostdag_blocks` only for kaspad-go legacy reasons, but the real set we @@ -894,7 +988,7 @@ impl PruningProofManager { if header.blue_work < min_blue_work { continue; } - let ghostdag = (&*self.ghostdag_stores[0].get_data(current).unwrap()).into(); + let ghostdag = (&*self.ghostdag_primary_store.get_data(current).unwrap()).into(); e.insert(TrustedHeader { header, ghostdag }); } let parents = self.relations_stores.read()[0].get_parents(current).unwrap(); From 40f1cc9bbe24031f9aefaae0db9bef7e1897bbb2 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:32:26 -0600 Subject: [PATCH 02/50] Use blue score as work for higher levels Co-authored-by: Ori Newman --- consensus/src/consensus/services.rs | 1 + consensus/src/processes/ghostdag/protocol.rs | 24 +++++++++++++------- consensus/src/processes/pruning_proof/mod.rs | 1 + 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 3db1e8d38..b5617ea76 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -126,6 +126,7 @@ impl ConsensusServices { relations_services[level].clone(), storage.headers_store.clone(), reachability_service.clone(), + level != 0, ) }) .collect_vec(), diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index 87beeb565..ac9ae41d7 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -5,6 +5,7 @@ use kaspa_consensus_core::{ BlockHashMap, BlueWorkType, HashMapCustomHasher, }; use kaspa_hashes::Hash; +use kaspa_math::Uint192; use kaspa_utils::refs::Refs; use crate::{ @@ -29,6 +30,7 @@ pub struct GhostdagManager, pub(super) reachability_service: U, + use_score_as_work: bool, } impl GhostdagManager { @@ -39,8 +41,9 @@ impl, reachability_service: U, + use_score_as_work: bool, ) -> Self { - Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store } + Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store, use_score_as_work } } pub fn genesis_ghostdag_data(&self) -> GhostdagData { @@ -115,14 +118,19 @@ impl Date: Mon, 3 Jun 2024 23:34:17 -0600 Subject: [PATCH 03/50] Remove pruning processor dependency on gd managers Co-authored-by: Ori Newman --- consensus/src/pipeline/pruning_processor/processor.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 8cded745a..bee46834a 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -2,7 +2,7 @@ use crate::{ consensus::{ - services::{ConsensusServices, DbGhostdagManager, DbPruningPointManager}, + services::{ConsensusServices, DbPruningPointManager}, storage::ConsensusStorage, }, model::{ @@ -69,7 +69,6 @@ pub struct PruningProcessor { // Managers and Services reachability_service: MTReachabilityService, - ghostdag_managers: Arc>, pruning_point_manager: DbPruningPointManager, pruning_proof_manager: Arc, @@ -106,7 +105,6 @@ impl PruningProcessor { db, storage: storage.clone(), reachability_service: services.reachability_service.clone(), - ghostdag_managers: services.ghostdag_managers.clone(), pruning_point_manager: services.pruning_point_manager.clone(), pruning_proof_manager: services.pruning_proof_manager.clone(), pruning_lock, From 1df5a22e2c5afb8b45a7cc59ae2579ac640cf238 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:35:17 -0600 Subject: [PATCH 04/50] Consistency renaming Co-authored-by: Ori Newman --- .../pipeline/body_processor/body_validation_in_context.rs | 2 +- consensus/src/pipeline/body_processor/processor.rs | 6 +++--- consensus/src/pipeline/header_processor/processor.rs | 7 ++++--- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index 2425556d0..b437f1f13 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -25,7 +25,7 @@ impl BlockBodyProcessor { } fn check_block_transactions_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { - let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap())?; + let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_primary_store.get_data(block.hash()).unwrap())?; for tx in block.transactions.iter() { if let Err(e) = self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, pmt) { return Err(RuleError::TxInContextFailed(tx.id(), e)); diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 1ea674263..8b6d35e19 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -55,7 +55,7 @@ pub struct BlockBodyProcessor { // Stores pub(super) statuses_store: Arc>, - pub(super) ghostdag_store: Arc, + pub(super) ghostdag_primary_store: Arc, pub(super) headers_store: Arc, pub(super) block_transactions_store: Arc, pub(super) body_tips_store: Arc>, @@ -92,7 +92,7 @@ impl BlockBodyProcessor { db: Arc, statuses_store: Arc>, - ghostdag_store: Arc, + ghostdag_primary_store: Arc, headers_store: Arc, block_transactions_store: Arc, body_tips_store: Arc>, @@ -116,7 +116,7 @@ impl BlockBodyProcessor { db, statuses_store, reachability_service, - ghostdag_store, + ghostdag_primary_store, headers_store, block_transactions_store, body_tips_store, diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index d1b74aeb5..a90e67c50 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -356,13 +356,13 @@ impl HeaderProcessor { .unwrap_or_else(|| Arc::new(self.ghostdag_managers[level].ghostdag(&ctx.known_parents[level]))) }) .collect_vec(); - self.counters.mergeset_counts.fetch_add(ghostdag_data[0].mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } fn commit_header(&self, ctx: HeaderProcessingContext, header: &Header) { let ghostdag_data = ctx.ghostdag_data.as_ref().unwrap(); + let ghostdag_primary_data = &ghostdag_data[0]; let pp = ctx.pruning_point(); // Create a DB batch writer @@ -375,6 +375,7 @@ impl HeaderProcessor { for (level, datum) in ghostdag_data.iter().enumerate() { self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap(); } + if let Some(window) = ctx.block_window_for_difficulty { self.block_window_cache_for_difficulty.insert(ctx.hash, window); } @@ -395,8 +396,8 @@ impl HeaderProcessor { // time, and thus serializing this part will do no harm. However this should be benchmarked. The // alternative is to create a separate ReachabilityProcessor and to manage things more tightly. let mut staging = StagingReachabilityStore::new(self.reachability_store.upgradable_read()); - let selected_parent = ghostdag_data[0].selected_parent; - let mut reachability_mergeset = ghostdag_data[0].unordered_mergeset_without_selected_parent(); + let selected_parent = ghostdag_primary_data.selected_parent; + let mut reachability_mergeset = ghostdag_primary_data.unordered_mergeset_without_selected_parent(); reachability::add_block(&mut staging, ctx.hash, selected_parent, &mut reachability_mergeset).unwrap(); // Non-append only stores need to use write locks. From d12592c34f3055fd474c3ae8847c4c83c853aff1 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:35:37 -0600 Subject: [PATCH 05/50] Update db version Co-authored-by: Ori Newman --- consensus/src/consensus/factory.rs | 2 +- database/src/registry.rs | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f3ee51d9c..f34aa54f9 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata { version: u32, } -const LATEST_DB_VERSION: u32 = 3; +const LATEST_DB_VERSION: u32 = 4; impl Default for MultiConsensusMetadata { fn default() -> Self { Self { diff --git a/database/src/registry.rs b/database/src/registry.rs index 9e1b129d6..981af729d 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -36,10 +36,12 @@ pub enum DatabaseStorePrefixes { UtxoMultisets = 26, VirtualUtxoset = 27, VirtualState = 28, + GhostdagProof = 29, + GhostdagCompactProof = 30, // ---- Decomposed reachability stores ---- - ReachabilityTreeChildren = 30, - ReachabilityFutureCoveringSet = 31, + ReachabilityTreeChildren = 31, + ReachabilityFutureCoveringSet = 32, // ---- Metadata ---- MultiConsensusMetadata = 124, From 2bea765a2228f2c003589c89193ce66307087c62 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 22:44:21 -0600 Subject: [PATCH 06/50] GD Optimizations Co-authored-by: Ori Newman --- consensus/src/model/stores/ghostdag.rs | 21 ++ .../pipeline/pruning_processor/processor.rs | 3 +- consensus/src/processes/pruning_proof/mod.rs | 296 ++++++++++++++++-- database/src/registry.rs | 2 + simpa/src/main.rs | 7 + 5 files changed, 304 insertions(+), 25 deletions(-) diff --git a/consensus/src/model/stores/ghostdag.rs b/consensus/src/model/stores/ghostdag.rs index 89c4686c5..3ffe23e7e 100644 --- a/consensus/src/model/stores/ghostdag.rs +++ b/consensus/src/model/stores/ghostdag.rs @@ -271,6 +271,27 @@ impl DbGhostdagStore { } } + pub fn new_temp( + db: Arc, + level: BlockLevel, + cache_policy: CachePolicy, + compact_cache_policy: CachePolicy, + temp_index: u8, + ) -> Self { + assert_ne!(SEPARATOR, level, "level {} is reserved for the separator", level); + let lvl_bytes = level.to_le_bytes(); + let temp_index_bytes = temp_index.to_le_bytes(); + let prefix = DatabaseStorePrefixes::TempGhostdag.into_iter().chain(lvl_bytes).chain(temp_index_bytes).collect_vec(); + let compact_prefix = + DatabaseStorePrefixes::TempGhostdagCompact.into_iter().chain(lvl_bytes).chain(temp_index_bytes).collect_vec(); + Self { + db: Arc::clone(&db), + level, + access: CachedDbAccess::new(db.clone(), cache_policy, prefix), + compact_access: CachedDbAccess::new(db, compact_cache_policy, compact_prefix), + } + } + pub fn clone_with_new_cache(&self, cache_policy: CachePolicy, compact_cache_policy: CachePolicy) -> Self { Self::new(Arc::clone(&self.db), self.level, cache_policy, compact_cache_policy) } diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index bee46834a..cd9026565 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -455,7 +455,8 @@ impl PruningProcessor { ); if self.config.enable_sanity_checks { - self.assert_proof_rebuilding(proof, new_pruning_point); + // self.assert_proof_rebuilding(proof, new_pruning_point); + self.pruning_proof_manager.validate_pruning_point_proof(&proof).unwrap(); self.assert_data_rebuilding(data, new_pruning_point); } diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 31b1df833..8b4b3e299 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -25,7 +25,7 @@ use kaspa_consensus_core::{ trusted::{TrustedBlock, TrustedGhostdagData, TrustedHeader}, BlockHashMap, BlockHashSet, BlockLevel, HashMapCustomHasher, KType, }; -use kaspa_core::{debug, info, trace}; +use kaspa_core::{debug, info, trace, warn}; use kaspa_database::{ prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}, utils::DbLifetime, @@ -41,11 +41,14 @@ use crate::{ storage::ConsensusStorage, }, model::{ - services::reachability::{MTReachabilityService, ReachabilityService}, + services::{ + reachability::{MTReachabilityService, ReachabilityService}, + relations::MTRelationsService, + }, stores::{ depth::DbDepthStore, ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, - headers::{DbHeadersStore, HeaderStore, HeaderStoreReader}, + headers::{DbHeadersStore, HeaderStore, HeaderStoreReader, HeaderWithBlockLevel}, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, pruning::{DbPruningStore, PruningStoreReader}, @@ -78,7 +81,11 @@ enum PruningProofManagerInternalError { #[error("cannot find a common ancestor: {0}")] NoCommonAncestor(String), + + #[error("missing headers to build proof: {0}")] + NotEnoughHeadersToBuildProof(String), } +type PruningProofManagerInternalResult = std::result::Result; struct CachedPruningPointData { pruning_point: Hash, @@ -714,40 +721,280 @@ impl PruningProofManager { Err(PruningImportError::PruningProofNotEnoughHeaders) } + // TODO: Find a better name + fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { + let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); + pp_header + .parents_by_level + .iter() + .enumerate() + .skip(1) + .find_map(|(level, parents)| { + if BlockHashSet::from_iter(parents.iter().copied()) == direct_parents { + None + } else { + Some((level - 1) as BlockLevel) + } + }) + .unwrap_or(self.max_block_level) + } + + fn estimated_blue_depth_at_level_0(&self, level: BlockLevel, level_depth: u64, current_dag_level: BlockLevel) -> u64 { + level_depth << current_dag_level.saturating_sub(level) + } + + fn find_selected_parent_header_at_level( + &self, + header: &Header, + level: BlockLevel, + ) -> PruningProofManagerInternalResult> { + let parents = self.parents_manager.parents_at_level(header, level); + let mut sp = SortableBlock { hash: parents[0], blue_work: self.headers_store.get_blue_score(parents[0]).unwrap_or(0).into() }; + for parent in parents.iter().copied().skip(1) { + let sblock = SortableBlock { + hash: parent, + blue_work: self + .headers_store + .get_blue_score(parent) + .unwrap_option() + .ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(format!( + "find_selected_parent_header_at_level (level {level}) couldn't find the header for block {parent}" + )))? + .into(), + }; + if sblock > sp { + sp = sblock; + } + } + // TODO: For higher levels the chance of having more than two parents is very small, so it might make sense to fetch the whole header for the SortableBlock instead of blue_score (which will probably come from a compact header). + self.headers_store.get_header(sp.hash).unwrap_option().ok_or(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof( + format!("find_selected_parent_header_at_level (level {level}) couldn't find the header for block {}", sp.hash,), + )) + // Ok(self.headers_store.get_header(sp.hash).unwrap_option().expect("already checked if compact header exists above")) + } + + fn find_sufficient_root( + &self, + pp_header: &HeaderWithBlockLevel, + level: BlockLevel, + current_dag_level: BlockLevel, + required_block: Option, + temp_db: Arc, + ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { + let selected_tip_header = if pp_header.block_level >= level { + pp_header.header.clone() + } else { + self.find_selected_parent_header_at_level(&pp_header.header, level)? + }; + let selected_tip = selected_tip_header.hash; + let pp = pp_header.header.hash; + let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); + let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size + let required_level_depth = 2 * self.pruning_proof_m; + let mut required_level_0_depth = if level == 0 { + required_level_depth + } else { + self.estimated_blue_depth_at_level_0( + level, + required_level_depth * 5 / 4, // We take a safety margin + current_dag_level, + ) + }; + + let mut tries = 0; + loop { + let required_block = if let Some(required_block) = required_block { + // TODO: We can change it to skip related checks if `None` + required_block + } else { + selected_tip + }; + + let mut finished_headers = false; + let mut finished_headers_for_required_block_chain = false; + let mut current_header = selected_tip_header.clone(); + let mut required_block_chain = BlockHashSet::new(); + let mut selected_chain = BlockHashSet::new(); + let mut intersected_with_required_block_chain = false; + let mut current_required_chain_block = self.headers_store.get_header(required_block).unwrap(); + let root_header = loop { + if !intersected_with_required_block_chain { + required_block_chain.insert(current_required_chain_block.hash); + selected_chain.insert(current_header.hash); + if required_block_chain.contains(¤t_header.hash) + || required_block_chain.contains(¤t_required_chain_block.hash) + { + intersected_with_required_block_chain = true; + } + } + + if current_header.direct_parents().is_empty() // Stop at genesis + || (pp_header.header.blue_score >= current_header.blue_score + required_level_0_depth + && intersected_with_required_block_chain) + { + break current_header; + } + current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + if !intersected_with_required_block_chain { + warn!("it's unknown if the selected root for level {level} ( {} ) is in the chain of the required block {required_block}", current_header.hash) + } + finished_headers = true; // We want to give this root a shot if all its past is pruned + break current_header; + } + Err(e) => return Err(e), + }; + + if !finished_headers_for_required_block_chain && !intersected_with_required_block_chain { + current_required_chain_block = + match self.find_selected_parent_header_at_level(¤t_required_chain_block, level) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + finished_headers_for_required_block_chain = true; + current_required_chain_block + } + Err(e) => return Err(e), + }; + } + }; + let root = root_header.hash; + + if level == 0 { + return Ok((self.ghostdag_primary_store.clone(), selected_tip, root)); + } + + let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); + let gd_manager = GhostdagManager::new( + root, + self.ghostdag_k, + ghostdag_store.clone(), + relations_service.clone(), + self.headers_store.clone(), + self.reachability_service.clone(), + true, + ); + ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + let mut topological_heap: BinaryHeap<_> = Default::default(); + let mut visited = BlockHashSet::new(); + for child in relations_service.get_children(root).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + })); + } + + let mut has_required_block = root == required_block; + loop { + let Some(current) = topological_heap.pop() else { + break; + }; + let current_hash = current.0.hash; + if !visited.insert(current_hash) { + continue; + } + + if !self.reachability_service.is_dag_ancestor_of(current_hash, pp) { + // We don't care about blocks in the antipast of the pruning point + continue; + } + + if !has_required_block && current_hash == required_block { + has_required_block = true; + } + + let relevant_parents: Box<[Hash]> = relations_service + .get_parents(current_hash) + .unwrap() + .iter() + .copied() + .filter(|parent| self.reachability_service.is_dag_ancestor_of(root, *parent)) + .collect(); + let current_gd = gd_manager.ghostdag(&relevant_parents); + ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap(); + for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + })); + } + } + + if has_required_block + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) + { + break Ok((ghostdag_store, selected_tip, root)); + } + + tries += 1; + if finished_headers { + panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned") + } + required_level_0_depth <<= 1; + warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); + } + } + + fn calc_gd_for_all_levels( + &self, + pp_header: &HeaderWithBlockLevel, + temp_db: Arc, + ) -> (Vec>, Vec, Vec) { + let current_dag_level = self.find_current_dag_level(&pp_header.header); + let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; + let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; + let mut root_by_level = vec![None; self.max_block_level as usize + 1]; + for level in (0..=self.max_block_level).rev() { + let level_usize = level as usize; + let required_block = if level != self.max_block_level { + let next_level_store = ghostdag_stores[level_usize + 1].as_ref().unwrap().clone(); + let block_at_depth_m_at_next_level = self + .block_at_depth(&*next_level_store, selected_tip_by_level[level_usize + 1].unwrap(), self.pruning_proof_m) + .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) + .unwrap(); + Some(block_at_depth_m_at_next_level) + } else { + None + }; + let (store, selected_tip, root) = self + .find_sufficient_root(&pp_header, level, current_dag_level, required_block, temp_db.clone()) + .expect(&format!("find_sufficient_root failed for level {level}")); + ghostdag_stores[level_usize] = Some(store); + selected_tip_by_level[level_usize] = Some(selected_tip); + root_by_level[level_usize] = Some(root); + } + + ( + ghostdag_stores.into_iter().map(Option::unwrap).collect_vec(), + selected_tip_by_level.into_iter().map(Option::unwrap).collect_vec(), + root_by_level.into_iter().map(Option::unwrap).collect_vec(), + ) + } + pub(crate) fn build_pruning_point_proof(&self, pp: Hash) -> PruningPointProof { if pp == self.genesis_hash { return vec![]; } + let (_db_lifetime, temp_db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); let pp_header = self.headers_store.get_header_with_block_level(pp).unwrap(); - let selected_tip_by_level = (0..=self.max_block_level) - .map(|level| { - if level <= pp_header.block_level { - pp - } else { - self.ghostdag_managers[level as usize].find_selected_parent( - self.parents_manager - .parents_at_level(&pp_header.header, level) - .iter() - .filter(|parent| self.ghostdag_stores[level as usize].has(**parent).unwrap()) - .cloned(), - ) - } - }) - .collect_vec(); + let (ghostdag_stores, selected_tip_by_level, roots_by_level) = self.calc_gd_for_all_levels(&pp_header, temp_db); (0..=self.max_block_level) .map(|level| { let level = level as usize; let selected_tip = selected_tip_by_level[level]; let block_at_depth_2m = self - .block_at_depth(&*self.ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .block_at_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) .map_err(|err| format!("level: {}, err: {}", level, err)) .unwrap(); - let root = if level != self.max_block_level as usize { + let root = roots_by_level[level]; + let old_root = if level != self.max_block_level as usize { let block_at_depth_m_at_next_level = self - .block_at_depth(&*self.ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) + .block_at_depth(&*ghostdag_stores[level + 1], selected_tip_by_level[level + 1], self.pruning_proof_m) .map_err(|err| format!("level + 1: {}, err: {}", level + 1, err)) .unwrap(); if self.reachability_service.is_dag_ancestor_of(block_at_depth_m_at_next_level, block_at_depth_2m) { @@ -756,7 +1003,7 @@ impl PruningProofManager { block_at_depth_2m } else { self.find_common_ancestor_in_chain_of_a( - &*self.ghostdag_stores[level], + &*ghostdag_stores[level], block_at_depth_m_at_next_level, block_at_depth_2m, ) @@ -766,11 +1013,12 @@ impl PruningProofManager { } else { block_at_depth_2m }; + // assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); let mut visited = BlockHashSet::new(); - queue.push(Reverse(SortableBlock::new(root, self.ghostdag_stores[level].get_blue_work(root).unwrap()))); + queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); while let Some(current) = queue.pop() { let current = current.0.hash; if !visited.insert(current) { @@ -783,7 +1031,7 @@ impl PruningProofManager { headers.push(self.headers_store.get_header(current).unwrap()); for child in self.relations_stores.read()[level].get_children(current).unwrap().read().iter().copied() { - queue.push(Reverse(SortableBlock::new(child, self.ghostdag_stores[level].get_blue_work(child).unwrap()))); + queue.push(Reverse(SortableBlock::new(child, self.headers_store.get_header(child).unwrap().blue_work))); } } diff --git a/database/src/registry.rs b/database/src/registry.rs index 981af729d..0b4f6e5d0 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -38,6 +38,8 @@ pub enum DatabaseStorePrefixes { VirtualState = 28, GhostdagProof = 29, GhostdagCompactProof = 30, + TempGhostdag = 33, + TempGhostdagCompact = 34, // ---- Decomposed reachability stores ---- ReachabilityTreeChildren = 31, diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 1baecc3e7..8975e974a 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -222,6 +222,11 @@ fn main_impl(mut args: Args) { Default::default(), unix_now(), )); + + // TODO: Remove the call to get_pruning_point_proof + // let the_hash = Hash::from_str("45d0bb998ab8c3513d18fef3f70d9c686539da7cbe4fab8021e55be1b3a0f8df").unwrap(); + // assert!(topologically_ordered_hashes(&consensus, config.params.genesis.hash).into_iter().contains(&the_hash)); + let _ = consensus.get_pruning_point_proof(); (consensus, lifetime) } else { let until = if args.target_blocks.is_none() { config.genesis.timestamp + args.sim_time * 1000 } else { u64::MAX }; // milliseconds @@ -441,6 +446,8 @@ mod tests { args.target_blocks = Some(5000); args.tpb = 1; args.test_pruning = true; + // args.output_dir = Some("/tmp/simpa".into()); + // args.input_dir = Some("/tmp/simpa".into()); kaspa_core::log::try_init_logger(&args.log_level); // As we log the panic, we want to set it up after the logger From 902b2172528982fe61c3a5ac2d41396960188d48 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Mon, 3 Jun 2024 23:52:38 -0600 Subject: [PATCH 07/50] Remove remnant of old impl. optimize db prefixes --- database/src/registry.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/database/src/registry.rs b/database/src/registry.rs index 0b4f6e5d0..87e89a491 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -36,14 +36,14 @@ pub enum DatabaseStorePrefixes { UtxoMultisets = 26, VirtualUtxoset = 27, VirtualState = 28, - GhostdagProof = 29, - GhostdagCompactProof = 30, - TempGhostdag = 33, - TempGhostdagCompact = 34, // ---- Decomposed reachability stores ---- - ReachabilityTreeChildren = 31, - ReachabilityFutureCoveringSet = 32, + ReachabilityTreeChildren = 30, + ReachabilityFutureCoveringSet = 31, + + // ---- Ghostdag Proof + TempGhostdag = 40, + TempGhostdagCompact = 41, // ---- Metadata ---- MultiConsensusMetadata = 124, From 7f1f412a7abc3b60fe8148483617ae9b187c6a44 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:13:31 -0600 Subject: [PATCH 08/50] Ensure parents are in relations; Add comments apply_proof only inserts parent entries for a header from the proof into the relations store for a level if there was GD data in the old stores for that header. This adds a check to filter out parent records not in relations store --- consensus/src/processes/pruning_proof/mod.rs | 62 +++++++++++++------- 1 file changed, 41 insertions(+), 21 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 8b4b3e299..c2aca9f49 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -721,7 +721,8 @@ impl PruningProofManager { Err(PruningImportError::PruningProofNotEnoughHeaders) } - // TODO: Find a better name + /// Looks for the first level whose parents are different from the direct parents of the pp_header + /// The current DAG level is the one right below that. fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); pp_header @@ -743,12 +744,26 @@ impl PruningProofManager { level_depth << current_dag_level.saturating_sub(level) } + /// selected parent at level = the parent of the header at the level + /// with the highest blue_work (using score as work in this case) fn find_selected_parent_header_at_level( &self, header: &Header, level: BlockLevel, + relations_service: MTRelationsService, ) -> PruningProofManagerInternalResult> { - let parents = self.parents_manager.parents_at_level(header, level); + // Logic of apply_proof only inserts parent entries for a header from the proof + // into the relations store for a level if there was GD data in the old stores for that + // header. To mimic that logic here, we need to filter out parents that are NOT in the relations_service + let parents = self + .parents_manager + .parents_at_level(header, level) + .iter() + .copied() + .filter(|parent| relations_service.has(*parent).unwrap()) + .collect_vec() + .push_if_empty(ORIGIN); + let mut sp = SortableBlock { hash: parents[0], blue_work: self.headers_store.get_blue_score(parents[0]).unwrap_or(0).into() }; for parent in parents.iter().copied().skip(1) { let sblock = SortableBlock { @@ -781,14 +796,16 @@ impl PruningProofManager { required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { + let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); let selected_tip_header = if pp_header.block_level >= level { pp_header.header.clone() } else { - self.find_selected_parent_header_at_level(&pp_header.header, level)? + self.find_selected_parent_header_at_level(&pp_header.header, level, relations_service.clone())? }; + let selected_tip = selected_tip_header.hash; let pp = pp_header.header.hash; - let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); + let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size let required_level_depth = 2 * self.pruning_proof_m; let mut required_level_0_depth = if level == 0 { @@ -822,7 +839,7 @@ impl PruningProofManager { required_block_chain.insert(current_required_chain_block.hash); selected_chain.insert(current_header.hash); if required_block_chain.contains(¤t_header.hash) - || required_block_chain.contains(¤t_required_chain_block.hash) + || selected_chain.contains(¤t_required_chain_block.hash) { intersected_with_required_block_chain = true; } @@ -834,7 +851,7 @@ impl PruningProofManager { { break current_header; } - current_header = match self.find_selected_parent_header_at_level(¤t_header, level) { + current_header = match self.find_selected_parent_header_at_level(¤t_header, level, relations_service.clone()) { Ok(header) => header, Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { if !intersected_with_required_block_chain { @@ -847,15 +864,18 @@ impl PruningProofManager { }; if !finished_headers_for_required_block_chain && !intersected_with_required_block_chain { - current_required_chain_block = - match self.find_selected_parent_header_at_level(¤t_required_chain_block, level) { - Ok(header) => header, - Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { - finished_headers_for_required_block_chain = true; - current_required_chain_block - } - Err(e) => return Err(e), - }; + current_required_chain_block = match self.find_selected_parent_header_at_level( + ¤t_required_chain_block, + level, + relations_service.clone(), + ) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + finished_headers_for_required_block_chain = true; + current_required_chain_block + } + Err(e) => return Err(e), + }; } }; let root = root_header.hash; @@ -1038,7 +1058,7 @@ impl PruningProofManager { // Temp assertion for verifying a bug fix: assert that the full 2M chain is actually contained in the composed level proof let set = BlockHashSet::from_iter(headers.iter().map(|h| h.hash)); let chain_2m = self - .chain_up_to_depth(&*self.ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) + .chain_up_to_depth(&*ghostdag_stores[level], selected_tip, 2 * self.pruning_proof_m) .map_err(|err| { dbg!(level, selected_tip, block_at_depth_2m, root); format!("Assert 2M chain -- level: {}, err: {}", level, err) @@ -1049,13 +1069,13 @@ impl PruningProofManager { if !set.contains(&chain_hash) { let next_level_tip = selected_tip_by_level[level + 1]; let next_level_chain_m = - self.chain_up_to_depth(&*self.ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); + self.chain_up_to_depth(&*ghostdag_stores[level + 1], next_level_tip, self.pruning_proof_m).unwrap(); let next_level_block_m = next_level_chain_m.last().copied().unwrap(); dbg!(next_level_chain_m.len()); - dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); - dbg!(self.ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); - dbg!(self.ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); - dbg!(self.ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level + 1].get_compact_data(next_level_block_m).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(selected_tip).unwrap().blue_score); + dbg!(ghostdag_stores[level].get_compact_data(block_at_depth_2m).unwrap().blue_score); dbg!(level, selected_tip, block_at_depth_2m, root); panic!("Assert 2M chain -- missing block {} at index {} out of {} chain blocks", chain_hash, i, chain_2m_len); } From f49478af23674a30fe8b53f4a3332942e1a17603 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:18:24 -0600 Subject: [PATCH 09/50] Match depth check to block_at_depth logic --- consensus/src/processes/pruning_proof/mod.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index c2aca9f49..c03c29449 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -721,8 +721,12 @@ impl PruningProofManager { Err(PruningImportError::PruningProofNotEnoughHeaders) } - /// Looks for the first level whose parents are different from the direct parents of the pp_header - /// The current DAG level is the one right below that. + // The "current dag level" is the level right before the level whose parents are + // not the same as our header's direct parents + // + // Find the current DAG level by going through all the parents at each level, + // starting from the bottom level and see which is the first level that has + // parents that are NOT our current pp_header's direct parents. fn find_current_dag_level(&self, pp_header: &Header) -> BlockLevel { let direct_parents = BlockHashSet::from_iter(pp_header.direct_parents().iter().copied()); pp_header @@ -846,7 +850,8 @@ impl PruningProofManager { } if current_header.direct_parents().is_empty() // Stop at genesis - || (pp_header.header.blue_score >= current_header.blue_score + required_level_0_depth + // Need to ensure this does the same 2M+1 depth that block_at_depth does + || (pp_header.header.blue_score > current_header.blue_score + required_level_0_depth && intersected_with_required_block_chain) { break current_header; @@ -942,8 +947,9 @@ impl PruningProofManager { } } + // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block - && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() > required_level_depth) { break Ok((ghostdag_store, selected_tip, root)); } From 879c135bd09edf7f9bd5feadbd75c23f8f603519 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:21:52 -0600 Subject: [PATCH 10/50] Use singular GD store for header processing --- .../pipeline/header_processor/processor.rs | 28 ++++++++----------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index a90e67c50..141c15418 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -127,7 +127,7 @@ pub struct HeaderProcessor { pub(super) relations_stores: Arc>>, pub(super) reachability_store: Arc>, pub(super) reachability_relations_store: Arc>, - pub(super) ghostdag_stores: Arc>>, + pub(super) ghostdag_primary_store: Arc, pub(super) statuses_store: Arc>, pub(super) pruning_point_store: Arc>, pub(super) block_window_cache_for_difficulty: Arc, @@ -138,7 +138,7 @@ pub struct HeaderProcessor { pub(super) depth_store: Arc, // Managers and services - pub(super) ghostdag_managers: Arc>, + pub(super) ghostdag_primary_manager: DbGhostdagManager, pub(super) dag_traversal_manager: DbDagTraversalManager, pub(super) window_manager: DbWindowManager, pub(super) depth_manager: DbBlockDepthManager, @@ -178,7 +178,7 @@ impl HeaderProcessor { relations_stores: storage.relations_stores.clone(), reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), - ghostdag_stores: storage.ghostdag_stores.clone(), + ghostdag_primary_store: storage.ghostdag_primary_store.clone(), statuses_store: storage.statuses_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), @@ -188,7 +188,7 @@ impl HeaderProcessor { block_window_cache_for_difficulty: storage.block_window_cache_for_difficulty.clone(), block_window_cache_for_past_median_time: storage.block_window_cache_for_past_median_time.clone(), - ghostdag_managers: services.ghostdag_managers.clone(), + ghostdag_primary_manager: services.ghostdag_primary_manager.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), window_manager: services.window_manager.clone(), reachability_service: services.reachability_service.clone(), @@ -348,14 +348,11 @@ impl HeaderProcessor { /// Runs the GHOSTDAG algorithm for all block levels and writes the data into the context (if hasn't run already) fn ghostdag(&self, ctx: &mut HeaderProcessingContext) { - let ghostdag_data = (0..=ctx.block_level as usize) - .map(|level| { - self.ghostdag_stores[level] - .get_data(ctx.hash) - .unwrap_option() - .unwrap_or_else(|| Arc::new(self.ghostdag_managers[level].ghostdag(&ctx.known_parents[level]))) - }) - .collect_vec(); + let ghostdag_data = vec![self + .ghostdag_primary_store + .get_data(ctx.hash) + .unwrap_option() + .unwrap_or_else(|| Arc::new(self.ghostdag_primary_manager.ghostdag(&ctx.known_parents[0])))]; self.counters.mergeset_counts.fetch_add(ghostdag_data[0].mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } @@ -373,7 +370,7 @@ impl HeaderProcessor { // for (level, datum) in ghostdag_data.iter().enumerate() { - self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap(); + self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, datum).unwrap(); } if let Some(window) = ctx.block_window_for_difficulty { @@ -454,7 +451,7 @@ impl HeaderProcessor { for (level, datum) in ghostdag_data.iter().enumerate() { // This data might have been already written when applying the pruning proof. - self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap_or_exists(); + self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, datum).unwrap_or_exists(); } let mut relations_write = self.relations_stores.write(); @@ -495,8 +492,7 @@ impl HeaderProcessor { PruningPointInfo::from_genesis(self.genesis.hash), (0..=self.max_block_level).map(|_| BlockHashes::new(vec![ORIGIN])).collect(), ); - ctx.ghostdag_data = - Some(self.ghostdag_managers.iter().map(|manager_by_level| Arc::new(manager_by_level.genesis_ghostdag_data())).collect()); + ctx.ghostdag_data = Some(vec![Arc::new(self.ghostdag_primary_manager.genesis_ghostdag_data())]); ctx.mergeset_non_daa = Some(Default::default()); ctx.merge_depth_root = Some(ORIGIN); ctx.finality_point = Some(ORIGIN); From 56c0b40505b70c3cb2a98a50f8628d5b3888e5e4 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 11 Jun 2024 21:52:11 -0600 Subject: [PATCH 11/50] Relax the panic to warn when finished_headers and couldn't find sufficient root This happens when there's not enough headers in the pruning proof but it satisfies validation --- consensus/src/processes/pruning_proof/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index c03c29449..35b502e33 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -956,7 +956,8 @@ impl PruningProofManager { tries += 1; if finished_headers { - panic!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned") + warn!("Failed to find sufficient root for level {level} after {tries} tries. Headers below the current depth of {required_level_0_depth} are already pruned. Trying anyway."); + break Ok((ghostdag_store, selected_tip, root)); } required_level_0_depth <<= 1; warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); From 43e9f9e82b85c352ce01d5173c38a0dd3bd8233d Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 22:26:17 -0600 Subject: [PATCH 12/50] Error handling for gd on higher levels relations.get_parents on GD gets extra parents that aren't in the current GD store. so get_blue_work throws an error next, ORIGIN was mising from the GD so add that --- consensus/src/processes/ghostdag/ordering.rs | 12 ++++++++++-- consensus/src/processes/pruning_proof/mod.rs | 1 + 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/consensus/src/processes/ghostdag/ordering.rs b/consensus/src/processes/ghostdag/ordering.rs index 88b648b8c..21306e5b8 100644 --- a/consensus/src/processes/ghostdag/ordering.rs +++ b/consensus/src/processes/ghostdag/ordering.rs @@ -44,8 +44,16 @@ impl Ord for SortableBlock { impl GhostdagManager { pub fn sort_blocks(&self, blocks: impl IntoIterator) -> Vec { let mut sorted_blocks: Vec = blocks.into_iter().collect(); - sorted_blocks - .sort_by_cached_key(|block| SortableBlock { hash: *block, blue_work: self.ghostdag_store.get_blue_work(*block).unwrap() }); + sorted_blocks.sort_by_cached_key(|block| SortableBlock { + hash: *block, + // Since we're only calculating GD at all levels on-demand, we may get blocks from the relations + // store in the mergeset that are not on our level + // Options for fixes: + // - do this + // - guarantee that we're only getting parents that are in this store + // - make relations store only return parents at the same or higher level + blue_work: self.ghostdag_store.get_blue_work(*block).unwrap_or_default(), + }); sorted_blocks } } diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 35b502e33..26b011134 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -900,6 +900,7 @@ impl PruningProofManager { true, ); ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); let mut topological_heap: BinaryHeap<_> = Default::default(); let mut visited = BlockHashSet::new(); for child in relations_service.get_children(root).unwrap().read().iter().copied() { From 34f20abd64c913ebe66d007eefdc2ddbc603ad3c Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Sat, 15 Jun 2024 09:57:43 -0600 Subject: [PATCH 13/50] remove using deeper requirements in lower levels --- consensus/src/processes/pruning_proof/mod.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 26b011134..313063172 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -812,15 +812,16 @@ impl PruningProofManager { let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size let required_level_depth = 2 * self.pruning_proof_m; - let mut required_level_0_depth = if level == 0 { - required_level_depth - } else { - self.estimated_blue_depth_at_level_0( - level, - required_level_depth * 5 / 4, // We take a safety margin - current_dag_level, - ) - }; + let mut required_level_0_depth = required_level_depth; + // let mut required_level_0_depth = if level == 0 { + // required_level_depth + // } else { + // self.estimated_blue_depth_at_level_0( + // level, + // required_level_depth * 5 / 4, // We take a safety margin + // current_dag_level, + // ) + // }; let mut tries = 0; loop { From 2654b254b97144cdf758e121f66b261987fd7b19 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 00:04:44 -0600 Subject: [PATCH 14/50] Fix missed references to self.ghostdag_stores in validate_pruning_point_proof --- consensus/src/processes/pruning_proof/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 313063172..6f7840ea6 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -666,7 +666,7 @@ impl PruningProofManager { let selected_tip_blue_work_diff = SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { - let parent_blue_work = self.ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); + let parent_blue_work = ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); let parent_blue_work_diff = SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); if parent_blue_work_diff >= selected_tip_blue_work_diff { @@ -702,7 +702,7 @@ impl PruningProofManager { if parents .iter() .copied() - .any(|parent| self.ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) + .any(|parent| ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) { return Ok(()); } From ba049296b978577b17868fa854c3ab9b5ece362a Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:03:17 -0600 Subject: [PATCH 15/50] Refactoring for single GD header processing --- .../pipeline/header_processor/processor.rs | 24 +++++++------------ 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 141c15418..c4ccc8bae 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -55,7 +55,7 @@ pub struct HeaderProcessingContext { pub known_parents: Vec, // Staging data - pub ghostdag_data: Option>>, + pub ghostdag_data: Option>, pub block_window_for_difficulty: Option>, pub block_window_for_past_median_time: Option>, pub mergeset_non_daa: Option, @@ -99,7 +99,7 @@ impl HeaderProcessingContext { /// Returns the primary (level 0) GHOSTDAG data of this header. /// NOTE: is expected to be called only after GHOSTDAG computation was pushed into the context pub fn ghostdag_data(&self) -> &Arc { - &self.ghostdag_data.as_ref().unwrap()[0] + &self.ghostdag_data.as_ref().unwrap() } } @@ -348,18 +348,17 @@ impl HeaderProcessor { /// Runs the GHOSTDAG algorithm for all block levels and writes the data into the context (if hasn't run already) fn ghostdag(&self, ctx: &mut HeaderProcessingContext) { - let ghostdag_data = vec![self + let ghostdag_data = self .ghostdag_primary_store .get_data(ctx.hash) .unwrap_option() - .unwrap_or_else(|| Arc::new(self.ghostdag_primary_manager.ghostdag(&ctx.known_parents[0])))]; - self.counters.mergeset_counts.fetch_add(ghostdag_data[0].mergeset_size() as u64, Ordering::Relaxed); + .unwrap_or_else(|| Arc::new(self.ghostdag_primary_manager.ghostdag(&ctx.known_parents[0]))); + self.counters.mergeset_counts.fetch_add(ghostdag_data.mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } fn commit_header(&self, ctx: HeaderProcessingContext, header: &Header) { - let ghostdag_data = ctx.ghostdag_data.as_ref().unwrap(); - let ghostdag_primary_data = &ghostdag_data[0]; + let ghostdag_primary_data = ctx.ghostdag_data.as_ref().unwrap(); let pp = ctx.pruning_point(); // Create a DB batch writer @@ -369,9 +368,7 @@ impl HeaderProcessor { // Append-only stores: these require no lock and hence done first in order to reduce locking time // - for (level, datum) in ghostdag_data.iter().enumerate() { - self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, datum).unwrap(); - } + self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); if let Some(window) = ctx.block_window_for_difficulty { self.block_window_cache_for_difficulty.insert(ctx.hash, window); @@ -449,10 +446,7 @@ impl HeaderProcessor { // Create a DB batch writer let mut batch = WriteBatch::default(); - for (level, datum) in ghostdag_data.iter().enumerate() { - // This data might have been already written when applying the pruning proof. - self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, datum).unwrap_or_exists(); - } + self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); let mut relations_write = self.relations_stores.write(); ctx.known_parents.into_iter().enumerate().for_each(|(level, parents_by_level)| { @@ -492,7 +486,7 @@ impl HeaderProcessor { PruningPointInfo::from_genesis(self.genesis.hash), (0..=self.max_block_level).map(|_| BlockHashes::new(vec![ORIGIN])).collect(), ); - ctx.ghostdag_data = Some(vec![Arc::new(self.ghostdag_primary_manager.genesis_ghostdag_data())]); + ctx.ghostdag_data = Some(Arc::new(self.ghostdag_primary_manager.genesis_ghostdag_data())); ctx.mergeset_non_daa = Some(Default::default()); ctx.merge_depth_root = Some(ORIGIN); ctx.finality_point = Some(ORIGIN); From a45b57122be5f9e8bec551d894f859dcae16d303 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:03:40 -0600 Subject: [PATCH 16/50] Add assertion to check root vs old_root --- consensus/src/processes/pruning_proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 6f7840ea6..8fbcb8b3c 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -1042,7 +1042,7 @@ impl PruningProofManager { } else { block_at_depth_2m }; - // assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); + assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); From edb5cd3d9300f93f6b8e3f3abf9e11ff8bc627c7 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:04:57 -0600 Subject: [PATCH 17/50] Lint fix current_dag_level --- consensus/src/processes/pruning_proof/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 8fbcb8b3c..e92cc6772 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -796,7 +796,6 @@ impl PruningProofManager { &self, pp_header: &HeaderWithBlockLevel, level: BlockLevel, - current_dag_level: BlockLevel, required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { @@ -971,7 +970,6 @@ impl PruningProofManager { pp_header: &HeaderWithBlockLevel, temp_db: Arc, ) -> (Vec>, Vec, Vec) { - let current_dag_level = self.find_current_dag_level(&pp_header.header); let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; let mut root_by_level = vec![None; self.max_block_level as usize + 1]; @@ -988,7 +986,7 @@ impl PruningProofManager { None }; let (store, selected_tip, root) = self - .find_sufficient_root(&pp_header, level, current_dag_level, required_block, temp_db.clone()) + .find_sufficient_root(&pp_header, level, required_block, temp_db.clone()) .expect(&format!("find_sufficient_root failed for level {level}")); ghostdag_stores[level_usize] = Some(store); selected_tip_by_level[level_usize] = Some(selected_tip); From e81394fe48bf797066407f53b220f02293472e83 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:19:46 -0600 Subject: [PATCH 18/50] Keep DB Version at 3 The new prefixes added are compatible with the old version. We don't want to trigger a db delete with this change --- consensus/src/consensus/factory.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f34aa54f9..f3ee51d9c 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata { version: u32, } -const LATEST_DB_VERSION: u32 = 4; +const LATEST_DB_VERSION: u32 = 3; impl Default for MultiConsensusMetadata { fn default() -> Self { Self { From 0e8c788c8e9b936df6327f4888ed3fb12400a008 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:38:47 -0600 Subject: [PATCH 19/50] Cleanup apply_proof logic and handle more ghostdag_stores logic --- .../pipeline/pruning_processor/processor.rs | 4 ++- consensus/src/processes/pruning_proof/mod.rs | 27 ++++++++++--------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index cd9026565..a6f3edf65 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -411,7 +411,9 @@ impl PruningProcessor { let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[level]); relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); staging_level_relations.commit(&mut batch).unwrap(); - self.ghostdag_stores[level].delete_batch(&mut batch, current).unwrap_option(); + if level == 0 { + self.ghostdag_primary_store.delete_batch(&mut batch, current).unwrap_option(); + } }); // Remove additional header related data diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index e92cc6772..5db4708be 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -1,7 +1,9 @@ use std::{ cmp::{max, Reverse}, - collections::{hash_map::Entry, BinaryHeap}, - collections::{hash_map::Entry::Vacant, VecDeque}, + collections::{ + hash_map::Entry::{self, Vacant}, + BinaryHeap, HashSet, VecDeque, + }, ops::{Deref, DerefMut}, sync::{ atomic::{AtomicBool, Ordering}, @@ -254,30 +256,29 @@ impl PruningProofManager { for (level, headers) in proof.iter().enumerate() { trace!("Applying level {} from the pruning point proof", level); - self.ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); + let mut level_ancestors: HashSet = HashSet::new(); + level_ancestors.insert(ORIGIN); + for header in headers.iter() { let parents = Arc::new( self.parents_manager .parents_at_level(header, level as BlockLevel) .iter() .copied() - .filter(|parent| self.ghostdag_stores[level].has(*parent).unwrap()) + .filter(|parent| level_ancestors.contains(parent)) .collect_vec() .push_if_empty(ORIGIN), ); self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); - let gd = if header.hash == self.genesis_hash { - self.ghostdag_managers[level].genesis_ghostdag_data() - } else { - self.ghostdag_managers[level].ghostdag(&parents) - }; if level == 0 { + self.ghostdag_primary_store.insert(ORIGIN, self.ghostdag_primary_manager.origin_ghostdag_data()).unwrap(); + let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { - let calculated_gd = self.ghostdag_managers[level].ghostdag(&parents); + let calculated_gd = self.ghostdag_primary_manager.ghostdag(&parents); // Override the ghostdag data with the real blue score and blue work GhostdagData { blue_score: header.blue_score, @@ -289,9 +290,9 @@ impl PruningProofManager { } }; self.ghostdag_primary_store.insert(header.hash, Arc::new(gd)).unwrap(); - } else { - self.ghostdag_stores[level].insert(header.hash, Arc::new(gd)).unwrap(); } + + level_ancestors.insert(header.hash); } } @@ -616,7 +617,7 @@ impl PruningProofManager { let mut proof_current = proof_selected_tip; let mut proof_current_gd = proof_selected_tip_gd; loop { - match self.ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { + match ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { Some(current_gd) => { break Some((proof_current_gd, current_gd)); } From 56f9dab2059d59541bb5ebdb8df69f0814c2a2a5 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 18 Jun 2024 23:40:27 -0600 Subject: [PATCH 20/50] remove simpa changes --- simpa/src/main.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 8975e974a..1baecc3e7 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -222,11 +222,6 @@ fn main_impl(mut args: Args) { Default::default(), unix_now(), )); - - // TODO: Remove the call to get_pruning_point_proof - // let the_hash = Hash::from_str("45d0bb998ab8c3513d18fef3f70d9c686539da7cbe4fab8021e55be1b3a0f8df").unwrap(); - // assert!(topologically_ordered_hashes(&consensus, config.params.genesis.hash).into_iter().contains(&the_hash)); - let _ = consensus.get_pruning_point_proof(); (consensus, lifetime) } else { let until = if args.target_blocks.is_none() { config.genesis.timestamp + args.sim_time * 1000 } else { u64::MAX }; // milliseconds @@ -446,8 +441,6 @@ mod tests { args.target_blocks = Some(5000); args.tpb = 1; args.test_pruning = true; - // args.output_dir = Some("/tmp/simpa".into()); - // args.input_dir = Some("/tmp/simpa".into()); kaspa_core::log::try_init_logger(&args.log_level); // As we log the panic, we want to set it up after the logger From c5be8ad40aaf0db3c75042bcdb8043aaf1c306d1 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Wed, 19 Jun 2024 00:45:09 -0600 Subject: [PATCH 21/50] Remove rewriting origin to primary GD It's already on there --- consensus/src/processes/pruning_proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 5db4708be..472e5f130 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -273,7 +273,7 @@ impl PruningProofManager { self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); if level == 0 { - self.ghostdag_primary_store.insert(ORIGIN, self.ghostdag_primary_manager.origin_ghostdag_data()).unwrap(); + // self.ghostdag_primary_store.insert(ORIGIN, self.ghostdag_primary_manager.origin_ghostdag_data()).unwrap(); let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() From 8d15e27a39baf53c4b6e6e529ff8efd577fbe4fb Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Wed, 19 Jun 2024 17:04:57 -0600 Subject: [PATCH 22/50] More refactoring to use single GD store/manager --- consensus/src/consensus/services.rs | 30 ++++++-------------- consensus/src/consensus/storage.rs | 21 ++++---------- consensus/src/processes/pruning_proof/mod.rs | 7 +---- 3 files changed, 15 insertions(+), 43 deletions(-) diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index b5617ea76..41478580c 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -53,7 +53,6 @@ pub struct ConsensusServices { pub reachability_service: MTReachabilityService, pub window_manager: DbWindowManager, pub dag_traversal_manager: DbDagTraversalManager, - pub ghostdag_managers: Arc>, pub ghostdag_primary_manager: DbGhostdagManager, pub coinbase_manager: CoinbaseManager, pub pruning_point_manager: DbPruningPointManager, @@ -112,26 +111,15 @@ impl ConsensusServices { reachability_service.clone(), storage.ghostdag_primary_store.clone(), ); - let ghostdag_managers = Arc::new( - storage - .ghostdag_stores - .iter() - .cloned() - .enumerate() - .map(|(level, ghostdag_store)| { - GhostdagManager::new( - params.genesis.hash, - params.ghostdag_k, - ghostdag_store, - relations_services[level].clone(), - storage.headers_store.clone(), - reachability_service.clone(), - level != 0, - ) - }) - .collect_vec(), + let ghostdag_primary_manager = GhostdagManager::new( + params.genesis.hash, + params.ghostdag_k, + storage.ghostdag_primary_store.clone(), + relations_services[0].clone(), + storage.headers_store.clone(), + reachability_service.clone(), + false, ); - let ghostdag_primary_manager = ghostdag_managers[0].clone(); let coinbase_manager = CoinbaseManager::new( params.coinbase_payload_script_public_key_max_len, @@ -185,7 +173,6 @@ impl ConsensusServices { &storage, parents_manager.clone(), reachability_service.clone(), - ghostdag_managers.clone(), ghostdag_primary_manager.clone(), dag_traversal_manager.clone(), window_manager.clone(), @@ -215,7 +202,6 @@ impl ConsensusServices { reachability_service, window_manager, dag_traversal_manager, - ghostdag_managers, ghostdag_primary_manager, coinbase_manager, pruning_point_manager, diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index d53324fc6..4b9646ec2 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -50,7 +50,6 @@ pub struct ConsensusStorage { pub selected_chain_store: Arc>, // Append-only stores - pub ghostdag_stores: Arc>>, pub ghostdag_primary_store: Arc, pub headers_store: Arc, pub block_transactions_store: Arc, @@ -193,19 +192,12 @@ impl ConsensusStorage { children_builder.build(), ))); - let ghostdag_stores = Arc::new( - (0..=params.max_block_level) - .map(|level| { - Arc::new(DbGhostdagStore::new( - db.clone(), - level, - ghostdag_builder.downscale(level).build(), - ghostdag_compact_builder.downscale(level).build(), - )) - }) - .collect_vec(), - ); - let ghostdag_primary_store = ghostdag_stores[0].clone(); + let ghostdag_primary_store = Arc::new(DbGhostdagStore::new( + db.clone(), + 0, + ghostdag_builder.downscale(0).build(), + ghostdag_compact_builder.downscale(0).build(), + )); let daa_excluded_store = Arc::new(DbDaaStore::new(db.clone(), daa_excluded_builder.build())); let headers_store = Arc::new(DbHeadersStore::new(db.clone(), headers_builder.build(), headers_compact_builder.build())); let depth_store = Arc::new(DbDepthStore::new(db.clone(), header_data_builder.build())); @@ -245,7 +237,6 @@ impl ConsensusStorage { relations_stores, reachability_relations_store, reachability_store, - ghostdag_stores, ghostdag_primary_store, pruning_point_store, headers_selected_tip_store, diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 472e5f130..fb0eceb77 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -117,7 +117,6 @@ pub struct PruningProofManager { reachability_store: Arc>, reachability_relations_store: Arc>, reachability_service: MTReachabilityService, - ghostdag_stores: Arc>>, ghostdag_primary_store: Arc, relations_stores: Arc>>, pruning_point_store: Arc>, @@ -128,7 +127,6 @@ pub struct PruningProofManager { depth_store: Arc, selected_chain_store: Arc>, - ghostdag_managers: Arc>, ghostdag_primary_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, @@ -153,7 +151,6 @@ impl PruningProofManager { storage: &Arc, parents_manager: DbParentsManager, reachability_service: MTReachabilityService, - ghostdag_managers: Arc>, ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, @@ -170,7 +167,6 @@ impl PruningProofManager { reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), reachability_service, - ghostdag_stores: storage.ghostdag_stores.clone(), ghostdag_primary_store: storage.ghostdag_primary_store.clone(), relations_stores: storage.relations_stores.clone(), pruning_point_store: storage.pruning_point_store.clone(), @@ -181,7 +177,6 @@ impl PruningProofManager { selected_chain_store: storage.selected_chain_store.clone(), depth_store: storage.depth_store.clone(), - ghostdag_managers, traversal_manager, window_manager, parents_manager, @@ -467,7 +462,7 @@ impl PruningProofManager { let level = level as usize; reachability::init(reachability_stores[level].write().deref_mut()).unwrap(); relations_stores[level].insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap(); - ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); + ghostdag_stores[level].insert(ORIGIN, ghostdag_managers[level].origin_ghostdag_data()).unwrap(); } db.write(batch).unwrap(); From 1c6b585d69d5cf85759d4ac65ab1c7fc0644a3f3 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 21 Jun 2024 17:01:26 -0600 Subject: [PATCH 23/50] Lint fixes --- consensus/src/pipeline/header_processor/processor.rs | 2 +- consensus/src/processes/pruning_proof/mod.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index c4ccc8bae..a04af90e6 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -99,7 +99,7 @@ impl HeaderProcessingContext { /// Returns the primary (level 0) GHOSTDAG data of this header. /// NOTE: is expected to be called only after GHOSTDAG computation was pushed into the context pub fn ghostdag_data(&self) -> &Arc { - &self.ghostdag_data.as_ref().unwrap() + self.ghostdag_data.as_ref().unwrap() } } diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index fb0eceb77..0058408fa 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -635,7 +635,7 @@ impl PruningProofManager { let proof_pp_header = proof[0].last().expect("checked if empty"); let proof_pp = proof_pp_header.hash; let proof_pp_level = calc_block_level(proof_pp_header, self.max_block_level); - let mut stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(&proof)?; + let mut stores_and_processes = self.init_validate_pruning_point_proof_stores_and_processes(proof)?; let selected_tip_by_level = self.populate_stores_for_validate_pruning_point_proof(proof, &mut stores_and_processes)?; let ghostdag_stores = stores_and_processes.ghostdag_stores; @@ -982,8 +982,8 @@ impl PruningProofManager { None }; let (store, selected_tip, root) = self - .find_sufficient_root(&pp_header, level, required_block, temp_db.clone()) - .expect(&format!("find_sufficient_root failed for level {level}")); + .find_sufficient_root(pp_header, level, required_block, temp_db.clone()) + .unwrap_or_else(|_| panic!("find_sufficient_root failed for level {level}")); ghostdag_stores[level_usize] = Some(store); selected_tip_by_level[level_usize] = Some(selected_tip); root_by_level[level_usize] = Some(root); From 273aa81fdd9c9a13edf2941bc8bfec0650486f30 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 21 Jun 2024 17:04:39 -0600 Subject: [PATCH 24/50] warn to trace for common retry --- consensus/src/processes/pruning_proof/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 0058408fa..39bf8d756 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -957,7 +957,7 @@ impl PruningProofManager { break Ok((ghostdag_store, selected_tip, root)); } required_level_0_depth <<= 1; - warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); + trace!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); } } From f9b3fda63bab6d8b7e8c7bf62e72fda5f544041f Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 25 Jun 2024 23:11:06 -0600 Subject: [PATCH 25/50] Address initial comments --- .../pipeline/header_processor/processor.rs | 1 + .../pipeline/pruning_processor/processor.rs | 7 ++-- consensus/src/processes/pruning_proof/mod.rs | 41 +++++++++---------- 3 files changed, 24 insertions(+), 25 deletions(-) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index a04af90e6..2214d0881 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -368,6 +368,7 @@ impl HeaderProcessor { // Append-only stores: these require no lock and hence done first in order to reduce locking time // + // This data might have been already written when applying the pruning proof. self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); if let Some(window) = ctx.block_window_for_difficulty { diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index a6f3edf65..b7f46f3b0 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -411,11 +411,10 @@ impl PruningProcessor { let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[level]); relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); staging_level_relations.commit(&mut batch).unwrap(); - if level == 0 { - self.ghostdag_primary_store.delete_batch(&mut batch, current).unwrap_option(); - } }); + self.ghostdag_primary_store.delete_batch(&mut batch, current).unwrap_option(); + // Remove additional header related data self.daa_excluded_store.delete_batch(&mut batch, current).unwrap(); self.depth_store.delete_batch(&mut batch, current).unwrap(); @@ -457,7 +456,7 @@ impl PruningProcessor { ); if self.config.enable_sanity_checks { - // self.assert_proof_rebuilding(proof, new_pruning_point); + self.assert_proof_rebuilding(proof.clone(), new_pruning_point); self.pruning_proof_manager.validate_pruning_point_proof(&proof).unwrap(); self.assert_data_rebuilding(data, new_pruning_point); } diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 39bf8d756..ccc8f81ff 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -268,8 +268,6 @@ impl PruningProofManager { self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); if level == 0 { - // self.ghostdag_primary_store.insert(ORIGIN, self.ghostdag_primary_manager.origin_ghostdag_data()).unwrap(); - let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { @@ -474,13 +472,13 @@ impl PruningProofManager { fn populate_stores_for_validate_pruning_point_proof( &self, proof: &PruningPointProof, - stores_and_processes: &mut TempProofContext, + ctx: &mut TempProofContext, ) -> PruningImportResult> { - let headers_store = &stores_and_processes.headers_store; - let ghostdag_stores = &stores_and_processes.ghostdag_stores; - let mut relations_stores = stores_and_processes.relations_stores.clone(); - let reachability_stores = &stores_and_processes.reachability_stores; - let ghostdag_managers = &stores_and_processes.ghostdag_managers; + let headers_store = &ctx.headers_store; + let ghostdag_stores = &ctx.ghostdag_stores; + let mut relations_stores = ctx.relations_stores.clone(); + let reachability_stores = &ctx.reachability_stores; + let ghostdag_managers = &ctx.ghostdag_managers; let proof_pp_header = proof[0].last().expect("checked if empty"); let proof_pp = proof_pp_header.hash; @@ -741,7 +739,7 @@ impl PruningProofManager { } fn estimated_blue_depth_at_level_0(&self, level: BlockLevel, level_depth: u64, current_dag_level: BlockLevel) -> u64 { - level_depth << current_dag_level.saturating_sub(level) + level_depth.checked_shl(level.saturating_sub(current_dag_level) as u32).unwrap_or(level_depth) } /// selected parent at level = the parent of the header at the level @@ -792,6 +790,7 @@ impl PruningProofManager { &self, pp_header: &HeaderWithBlockLevel, level: BlockLevel, + current_dag_level: BlockLevel, required_block: Option, temp_db: Arc, ) -> PruningProofManagerInternalResult<(Arc, Hash, Hash)> { @@ -807,16 +806,15 @@ impl PruningProofManager { let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size let required_level_depth = 2 * self.pruning_proof_m; - let mut required_level_0_depth = required_level_depth; - // let mut required_level_0_depth = if level == 0 { - // required_level_depth - // } else { - // self.estimated_blue_depth_at_level_0( - // level, - // required_level_depth * 5 / 4, // We take a safety margin - // current_dag_level, - // ) - // }; + let mut required_level_0_depth = if level == 0 { + required_level_depth + } else { + self.estimated_blue_depth_at_level_0( + level, + required_level_depth * 5 / 4, // We take a safety margin + current_dag_level, + ) + }; let mut tries = 0; loop { @@ -957,7 +955,7 @@ impl PruningProofManager { break Ok((ghostdag_store, selected_tip, root)); } required_level_0_depth <<= 1; - trace!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); + warn!("Failed to find sufficient root for level {level} after {tries} tries. Retrying again to find with depth {required_level_0_depth}"); } } @@ -966,6 +964,7 @@ impl PruningProofManager { pp_header: &HeaderWithBlockLevel, temp_db: Arc, ) -> (Vec>, Vec, Vec) { + let current_dag_level = self.find_current_dag_level(&pp_header.header); let mut ghostdag_stores: Vec>> = vec![None; self.max_block_level as usize + 1]; let mut selected_tip_by_level = vec![None; self.max_block_level as usize + 1]; let mut root_by_level = vec![None; self.max_block_level as usize + 1]; @@ -982,7 +981,7 @@ impl PruningProofManager { None }; let (store, selected_tip, root) = self - .find_sufficient_root(pp_header, level, required_block, temp_db.clone()) + .find_sufficient_root(pp_header, level, current_dag_level, required_block, temp_db.clone()) .unwrap_or_else(|_| panic!("find_sufficient_root failed for level {level}")); ghostdag_stores[level_usize] = Some(store); selected_tip_by_level[level_usize] = Some(selected_tip); From ca8bb429691a3575e3b59379de1c086391879554 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 25 Jun 2024 23:27:22 -0600 Subject: [PATCH 26/50] Remove "primary" in ghostdag store/manager references --- consensus/src/consensus/mod.rs | 10 +++---- consensus/src/consensus/services.rs | 20 ++++++------- consensus/src/consensus/storage.rs | 6 ++-- consensus/src/consensus/test_consensus.rs | 6 ++-- .../body_validation_in_context.rs | 2 +- .../src/pipeline/body_processor/processor.rs | 6 ++-- .../pipeline/header_processor/processor.rs | 18 ++++++------ .../pipeline/pruning_processor/processor.rs | 6 ++-- .../pipeline/virtual_processor/processor.rs | 18 ++++++------ .../virtual_processor/utxo_validation.rs | 2 +- consensus/src/processes/pruning_proof/mod.rs | 28 +++++++++---------- simpa/src/main.rs | 12 ++++---- 12 files changed, 67 insertions(+), 67 deletions(-) diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 80babbef0..7e1690b2a 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -231,7 +231,7 @@ impl Consensus { block_processors_pool, db.clone(), storage.statuses_store.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.block_transactions_store.clone(), storage.body_tips_store.clone(), @@ -484,7 +484,7 @@ impl ConsensusApi for Consensus { fn get_virtual_merge_depth_blue_work_threshold(&self) -> BlueWorkType { // PRUNE SAFETY: merge depth root is never close to being pruned (in terms of block depth) - self.get_virtual_merge_depth_root().map_or(BlueWorkType::ZERO, |root| self.ghostdag_primary_store.get_blue_work(root).unwrap()) + self.get_virtual_merge_depth_root().map_or(BlueWorkType::ZERO, |root| self.ghostdag_store.get_blue_work(root).unwrap()) } fn get_sink(&self) -> Hash { @@ -812,7 +812,7 @@ impl ConsensusApi for Consensus { Some(BlockStatus::StatusInvalid) => return Err(ConsensusError::InvalidBlock(hash)), _ => {} }; - let ghostdag = self.ghostdag_primary_store.get_data(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))?; + let ghostdag = self.ghostdag_store.get_data(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))?; Ok((&*ghostdag).into()) } @@ -864,7 +864,7 @@ impl ConsensusApi for Consensus { Ok(self .services .window_manager - .block_window(&self.ghostdag_primary_store.get_data(hash).unwrap(), WindowType::SampledDifficultyWindow) + .block_window(&self.ghostdag_store.get_data(hash).unwrap(), WindowType::SampledDifficultyWindow) .unwrap() .deref() .iter() @@ -903,7 +903,7 @@ impl ConsensusApi for Consensus { match start_hash { Some(hash) => { self.validate_block_exists(hash)?; - let ghostdag_data = self.ghostdag_primary_store.get_data(hash).unwrap(); + let ghostdag_data = self.ghostdag_store.get_data(hash).unwrap(); // The selected parent header is used within to check for sampling activation, so we verify its existence first if !self.headers_store.has(ghostdag_data.selected_parent).unwrap() { return Err(ConsensusError::DifficultyError(DifficultyError::InsufficientWindowData(0))); diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 41478580c..74544c11b 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -53,7 +53,7 @@ pub struct ConsensusServices { pub reachability_service: MTReachabilityService, pub window_manager: DbWindowManager, pub dag_traversal_manager: DbDagTraversalManager, - pub ghostdag_primary_manager: DbGhostdagManager, + pub ghostdag_manager: DbGhostdagManager, pub coinbase_manager: CoinbaseManager, pub pruning_point_manager: DbPruningPointManager, pub pruning_proof_manager: Arc, @@ -81,13 +81,13 @@ impl ConsensusServices { let reachability_service = MTReachabilityService::new(storage.reachability_store.clone()); let dag_traversal_manager = DagTraversalManager::new( params.genesis.hash, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), relations_service.clone(), reachability_service.clone(), ); let window_manager = DualWindowManager::new( ¶ms.genesis, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.daa_excluded_store.clone(), storage.block_window_cache_for_difficulty.clone(), @@ -109,12 +109,12 @@ impl ConsensusServices { params.genesis.hash, storage.depth_store.clone(), reachability_service.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), ); - let ghostdag_primary_manager = GhostdagManager::new( + let ghostdag_manager = GhostdagManager::new( params.genesis.hash, params.ghostdag_k, - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), relations_services[0].clone(), storage.headers_store.clone(), reachability_service.clone(), @@ -154,7 +154,7 @@ impl ConsensusServices { params.finality_depth, params.genesis.hash, reachability_service.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.headers_store.clone(), storage.past_pruning_points_store.clone(), storage.headers_selected_tip_store.clone(), @@ -173,7 +173,7 @@ impl ConsensusServices { &storage, parents_manager.clone(), reachability_service.clone(), - ghostdag_primary_manager.clone(), + ghostdag_manager.clone(), dag_traversal_manager.clone(), window_manager.clone(), params.max_block_level, @@ -188,7 +188,7 @@ impl ConsensusServices { params.mergeset_size_limit as usize, reachability_service.clone(), dag_traversal_manager.clone(), - storage.ghostdag_primary_store.clone(), + storage.ghostdag_store.clone(), storage.selected_chain_store.clone(), storage.headers_selected_tip_store.clone(), storage.pruning_point_store.clone(), @@ -202,7 +202,7 @@ impl ConsensusServices { reachability_service, window_manager, dag_traversal_manager, - ghostdag_primary_manager, + ghostdag_manager, coinbase_manager, pruning_point_manager, pruning_proof_manager, diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index 4b9646ec2..e170ace04 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -50,7 +50,7 @@ pub struct ConsensusStorage { pub selected_chain_store: Arc>, // Append-only stores - pub ghostdag_primary_store: Arc, + pub ghostdag_store: Arc, pub headers_store: Arc, pub block_transactions_store: Arc, pub past_pruning_points_store: Arc, @@ -192,7 +192,7 @@ impl ConsensusStorage { children_builder.build(), ))); - let ghostdag_primary_store = Arc::new(DbGhostdagStore::new( + let ghostdag_store = Arc::new(DbGhostdagStore::new( db.clone(), 0, ghostdag_builder.downscale(0).build(), @@ -237,7 +237,7 @@ impl ConsensusStorage { relations_stores, reachability_relations_store, reachability_store, - ghostdag_primary_store, + ghostdag_store, pruning_point_store, headers_selected_tip_store, body_tips_store, diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index c626e00ff..a937388ba 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -118,7 +118,7 @@ impl TestConsensus { pub fn build_header_with_parents(&self, hash: Hash, parents: Vec) -> Header { let mut header = header_from_precomputed_hash(hash, parents); - let ghostdag_data = self.consensus.services.ghostdag_primary_manager.ghostdag(header.direct_parents()); + let ghostdag_data = self.consensus.services.ghostdag_manager.ghostdag(header.direct_parents()); header.pruning_point = self .consensus .services @@ -201,7 +201,7 @@ impl TestConsensus { } pub fn ghostdag_store(&self) -> &Arc { - &self.consensus.ghostdag_primary_store + &self.consensus.ghostdag_store } pub fn reachability_store(&self) -> &Arc> { @@ -233,7 +233,7 @@ impl TestConsensus { } pub fn ghostdag_manager(&self) -> &DbGhostdagManager { - &self.consensus.services.ghostdag_primary_manager + &self.consensus.services.ghostdag_manager } } diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index b437f1f13..2425556d0 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -25,7 +25,7 @@ impl BlockBodyProcessor { } fn check_block_transactions_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { - let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_primary_store.get_data(block.hash()).unwrap())?; + let (pmt, _) = self.window_manager.calc_past_median_time(&self.ghostdag_store.get_data(block.hash()).unwrap())?; for tx in block.transactions.iter() { if let Err(e) = self.transaction_validator.utxo_free_tx_validation(tx, block.header.daa_score, pmt) { return Err(RuleError::TxInContextFailed(tx.id(), e)); diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 8b6d35e19..1ea674263 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -55,7 +55,7 @@ pub struct BlockBodyProcessor { // Stores pub(super) statuses_store: Arc>, - pub(super) ghostdag_primary_store: Arc, + pub(super) ghostdag_store: Arc, pub(super) headers_store: Arc, pub(super) block_transactions_store: Arc, pub(super) body_tips_store: Arc>, @@ -92,7 +92,7 @@ impl BlockBodyProcessor { db: Arc, statuses_store: Arc>, - ghostdag_primary_store: Arc, + ghostdag_store: Arc, headers_store: Arc, block_transactions_store: Arc, body_tips_store: Arc>, @@ -116,7 +116,7 @@ impl BlockBodyProcessor { db, statuses_store, reachability_service, - ghostdag_primary_store, + ghostdag_store, headers_store, block_transactions_store, body_tips_store, diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 2214d0881..22a5c566c 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -127,7 +127,7 @@ pub struct HeaderProcessor { pub(super) relations_stores: Arc>>, pub(super) reachability_store: Arc>, pub(super) reachability_relations_store: Arc>, - pub(super) ghostdag_primary_store: Arc, + pub(super) ghostdag_store: Arc, pub(super) statuses_store: Arc>, pub(super) pruning_point_store: Arc>, pub(super) block_window_cache_for_difficulty: Arc, @@ -138,7 +138,7 @@ pub struct HeaderProcessor { pub(super) depth_store: Arc, // Managers and services - pub(super) ghostdag_primary_manager: DbGhostdagManager, + pub(super) ghostdag_manager: DbGhostdagManager, pub(super) dag_traversal_manager: DbDagTraversalManager, pub(super) window_manager: DbWindowManager, pub(super) depth_manager: DbBlockDepthManager, @@ -178,7 +178,7 @@ impl HeaderProcessor { relations_stores: storage.relations_stores.clone(), reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), - ghostdag_primary_store: storage.ghostdag_primary_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), statuses_store: storage.statuses_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), @@ -188,7 +188,7 @@ impl HeaderProcessor { block_window_cache_for_difficulty: storage.block_window_cache_for_difficulty.clone(), block_window_cache_for_past_median_time: storage.block_window_cache_for_past_median_time.clone(), - ghostdag_primary_manager: services.ghostdag_primary_manager.clone(), + ghostdag_manager: services.ghostdag_manager.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), window_manager: services.window_manager.clone(), reachability_service: services.reachability_service.clone(), @@ -349,10 +349,10 @@ impl HeaderProcessor { /// Runs the GHOSTDAG algorithm for all block levels and writes the data into the context (if hasn't run already) fn ghostdag(&self, ctx: &mut HeaderProcessingContext) { let ghostdag_data = self - .ghostdag_primary_store + .ghostdag_store .get_data(ctx.hash) .unwrap_option() - .unwrap_or_else(|| Arc::new(self.ghostdag_primary_manager.ghostdag(&ctx.known_parents[0]))); + .unwrap_or_else(|| Arc::new(self.ghostdag_manager.ghostdag(&ctx.known_parents[0]))); self.counters.mergeset_counts.fetch_add(ghostdag_data.mergeset_size() as u64, Ordering::Relaxed); ctx.ghostdag_data = Some(ghostdag_data); } @@ -369,7 +369,7 @@ impl HeaderProcessor { // // This data might have been already written when applying the pruning proof. - self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); + self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_primary_data).unwrap(); if let Some(window) = ctx.block_window_for_difficulty { self.block_window_cache_for_difficulty.insert(ctx.hash, window); @@ -447,7 +447,7 @@ impl HeaderProcessor { // Create a DB batch writer let mut batch = WriteBatch::default(); - self.ghostdag_primary_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); + self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); let mut relations_write = self.relations_stores.write(); ctx.known_parents.into_iter().enumerate().for_each(|(level, parents_by_level)| { @@ -487,7 +487,7 @@ impl HeaderProcessor { PruningPointInfo::from_genesis(self.genesis.hash), (0..=self.max_block_level).map(|_| BlockHashes::new(vec![ORIGIN])).collect(), ); - ctx.ghostdag_data = Some(Arc::new(self.ghostdag_primary_manager.genesis_ghostdag_data())); + ctx.ghostdag_data = Some(Arc::new(self.ghostdag_manager.genesis_ghostdag_data())); ctx.mergeset_non_daa = Some(Default::default()); ctx.merge_depth_root = Some(ORIGIN); ctx.finality_point = Some(ORIGIN); diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index b7f46f3b0..f73f8c12e 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -280,7 +280,7 @@ impl PruningProcessor { let mut counter = 0; let mut batch = WriteBatch::default(); for kept in keep_relations.iter().copied() { - let Some(ghostdag) = self.ghostdag_primary_store.get_data(kept).unwrap_option() else { + let Some(ghostdag) = self.ghostdag_store.get_data(kept).unwrap_option() else { continue; }; if ghostdag.unordered_mergeset().any(|h| !keep_relations.contains(&h)) { @@ -292,7 +292,7 @@ impl PruningProcessor { mutable_ghostdag.selected_parent = ORIGIN; } counter += 1; - self.ghostdag_primary_store.update_batch(&mut batch, kept, &Arc::new(mutable_ghostdag.into())).unwrap(); + self.ghostdag_store.update_batch(&mut batch, kept, &Arc::new(mutable_ghostdag.into())).unwrap(); } } self.db.write(batch).unwrap(); @@ -413,7 +413,7 @@ impl PruningProcessor { staging_level_relations.commit(&mut batch).unwrap(); }); - self.ghostdag_primary_store.delete_batch(&mut batch, current).unwrap_option(); + self.ghostdag_store.delete_batch(&mut batch, current).unwrap_option(); // Remove additional header related data self.daa_excluded_store.delete_batch(&mut batch, current).unwrap(); diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index ded062251..db8efed3a 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -115,7 +115,7 @@ pub struct VirtualStateProcessor { // Stores pub(super) statuses_store: Arc>, - pub(super) ghostdag_primary_store: Arc, + pub(super) ghostdag_store: Arc, pub(super) headers_store: Arc, pub(super) daa_excluded_store: Arc, pub(super) block_transactions_store: Arc, @@ -190,7 +190,7 @@ impl VirtualStateProcessor { db, statuses_store: storage.statuses_store.clone(), headers_store: storage.headers_store.clone(), - ghostdag_primary_store: storage.ghostdag_primary_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), daa_excluded_store: storage.daa_excluded_store.clone(), block_transactions_store: storage.block_transactions_store.clone(), pruning_point_store: storage.pruning_point_store.clone(), @@ -205,7 +205,7 @@ impl VirtualStateProcessor { pruning_utxoset_stores: storage.pruning_utxoset_stores.clone(), lkg_virtual_state: storage.lkg_virtual_state.clone(), - ghostdag_manager: services.ghostdag_primary_manager.clone(), + ghostdag_manager: services.ghostdag_manager.clone(), reachability_service: services.reachability_service.clone(), relations_service: services.relations_service.clone(), dag_traversal_manager: services.dag_traversal_manager.clone(), @@ -302,7 +302,7 @@ impl VirtualStateProcessor { .expect("all possible rule errors are unexpected here"); // Update the pruning processor about the virtual state change - let sink_ghostdag_data = self.ghostdag_primary_store.get_compact_data(new_sink).unwrap(); + let sink_ghostdag_data = self.ghostdag_store.get_compact_data(new_sink).unwrap(); // Empty the channel before sending the new message. If pruning processor is busy, this step makes sure // the internal channel does not grow with no need (since we only care about the most recent message) let _consume = self.pruning_receiver.try_iter().count(); @@ -401,7 +401,7 @@ impl VirtualStateProcessor { } let header = self.headers_store.get_header(current).unwrap(); - let mergeset_data = self.ghostdag_primary_store.get_data(current).unwrap(); + let mergeset_data = self.ghostdag_store.get_data(current).unwrap(); let pov_daa_score = header.daa_score; let selected_parent_multiset_hash = self.utxo_multisets_store.get(selected_parent).unwrap(); @@ -562,7 +562,7 @@ impl VirtualStateProcessor { let mut heap = tips .into_iter() - .map(|block| SortableBlock { hash: block, blue_work: self.ghostdag_primary_store.get_blue_work(block).unwrap() }) + .map(|block| SortableBlock { hash: block, blue_work: self.ghostdag_store.get_blue_work(block).unwrap() }) .collect::>(); // The initial diff point is the previous sink @@ -584,7 +584,7 @@ impl VirtualStateProcessor { // 2. will be removed eventually by the bounded merge check. // Hence as an optimization we prefer removing such blocks in advance to allow valid tips to be considered. let filtering_root = self.depth_store.merge_depth_root(candidate).unwrap(); - let filtering_blue_work = self.ghostdag_primary_store.get_blue_work(filtering_root).unwrap_or_default(); + let filtering_blue_work = self.ghostdag_store.get_blue_work(filtering_root).unwrap_or_default(); return ( candidate, heap.into_sorted_iter().take_while(|s| s.blue_work >= filtering_blue_work).map(|s| s.hash).collect(), @@ -602,7 +602,7 @@ impl VirtualStateProcessor { if self.reachability_service.is_dag_ancestor_of(finality_point, parent) && !self.reachability_service.is_dag_ancestor_of_any(parent, &mut heap.iter().map(|sb| sb.hash)) { - heap.push(SortableBlock { hash: parent, blue_work: self.ghostdag_primary_store.get_blue_work(parent).unwrap() }); + heap.push(SortableBlock { hash: parent, blue_work: self.ghostdag_store.get_blue_work(parent).unwrap() }); } } drop(prune_guard); @@ -1117,7 +1117,7 @@ impl VirtualStateProcessor { // in depth of 2*finality_depth, and can give false negatives for smaller finality violations. let current_pp = self.pruning_point_store.read().pruning_point().unwrap(); let vf = self.virtual_finality_point(&self.lkg_virtual_state.load().ghostdag_data, current_pp); - let vff = self.depth_manager.calc_finality_point(&self.ghostdag_primary_store.get_data(vf).unwrap(), current_pp); + let vff = self.depth_manager.calc_finality_point(&self.ghostdag_store.get_data(vf).unwrap(), current_pp); let last_known_pp = pp_list.iter().rev().find(|pp| match self.statuses_store.read().get(pp.hash).unwrap_option() { Some(status) => status.is_valid(), diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 112976294..0e3ca7533 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -82,7 +82,7 @@ impl VirtualStateProcessor { for (i, (merged_block, txs)) in once((ctx.selected_parent(), selected_parent_transactions)) .chain( ctx.ghostdag_data - .consensus_ordered_mergeset_without_selected_parent(self.ghostdag_primary_store.deref()) + .consensus_ordered_mergeset_without_selected_parent(self.ghostdag_store.deref()) .map(|b| (b, self.block_transactions_store.get(b).unwrap())), ) .enumerate() diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index ccc8f81ff..82ebc7433 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -117,7 +117,7 @@ pub struct PruningProofManager { reachability_store: Arc>, reachability_relations_store: Arc>, reachability_service: MTReachabilityService, - ghostdag_primary_store: Arc, + ghostdag_store: Arc, relations_stores: Arc>>, pruning_point_store: Arc>, past_pruning_points_store: Arc, @@ -127,7 +127,7 @@ pub struct PruningProofManager { depth_store: Arc, selected_chain_store: Arc>, - ghostdag_primary_manager: DbGhostdagManager, + ghostdag_manager: DbGhostdagManager, traversal_manager: DbDagTraversalManager, window_manager: DbWindowManager, parents_manager: DbParentsManager, @@ -167,7 +167,7 @@ impl PruningProofManager { reachability_store: storage.reachability_store.clone(), reachability_relations_store: storage.reachability_relations_store.clone(), reachability_service, - ghostdag_primary_store: storage.ghostdag_primary_store.clone(), + ghostdag_store: storage.ghostdag_store.clone(), relations_stores: storage.relations_stores.clone(), pruning_point_store: storage.pruning_point_store.clone(), past_pruning_points_store: storage.past_pruning_points_store.clone(), @@ -189,7 +189,7 @@ impl PruningProofManager { pruning_proof_m, anticone_finalization_depth, ghostdag_k, - ghostdag_primary_manager: ghostdag_manager, + ghostdag_manager, is_consensus_exiting, } @@ -271,7 +271,7 @@ impl PruningProofManager { let gd = if let Some(gd) = trusted_gd_map.get(&header.hash) { gd.clone() } else { - let calculated_gd = self.ghostdag_primary_manager.ghostdag(&parents); + let calculated_gd = self.ghostdag_manager.ghostdag(&parents); // Override the ghostdag data with the real blue score and blue work GhostdagData { blue_score: header.blue_score, @@ -282,7 +282,7 @@ impl PruningProofManager { blues_anticone_sizes: calculated_gd.blues_anticone_sizes.clone(), } }; - self.ghostdag_primary_store.insert(header.hash, Arc::new(gd)).unwrap(); + self.ghostdag_store.insert(header.hash, Arc::new(gd)).unwrap(); } level_ancestors.insert(header.hash); @@ -292,7 +292,7 @@ impl PruningProofManager { let virtual_parents = vec![pruning_point]; let virtual_state = Arc::new(VirtualState { parents: virtual_parents.clone(), - ghostdag_data: self.ghostdag_primary_manager.ghostdag(&virtual_parents), + ghostdag_data: self.ghostdag_manager.ghostdag(&virtual_parents), ..VirtualState::default() }); self.virtual_stores.write().state.set(virtual_state).unwrap(); @@ -880,7 +880,7 @@ impl PruningProofManager { let root = root_header.hash; if level == 0 { - return Ok((self.ghostdag_primary_store.clone(), selected_tip, root)); + return Ok((self.ghostdag_store.clone(), selected_tip, root)); } let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); @@ -1181,7 +1181,7 @@ impl PruningProofManager { let mut current = hash; for _ in 0..=self.ghostdag_k { hashes.push(current); - let Some(parent) = self.ghostdag_primary_store.get_selected_parent(current).unwrap_option() else { + let Some(parent) = self.ghostdag_store.get_selected_parent(current).unwrap_option() else { break; }; if parent == self.genesis_hash || parent == blockhash::ORIGIN { @@ -1201,7 +1201,7 @@ impl PruningProofManager { .traversal_manager .anticone(pruning_point, virtual_parents, None) .expect("no error is expected when max_traversal_allowed is None"); - let mut anticone = self.ghostdag_primary_manager.sort_blocks(anticone); + let mut anticone = self.ghostdag_manager.sort_blocks(anticone); anticone.insert(0, pruning_point); let mut daa_window_blocks = BlockHashMap::new(); @@ -1212,14 +1212,14 @@ impl PruningProofManager { for anticone_block in anticone.iter().copied() { let window = self .window_manager - .block_window(&self.ghostdag_primary_store.get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) + .block_window(&self.ghostdag_store.get_data(anticone_block).unwrap(), WindowType::FullDifficultyWindow) .unwrap(); for hash in window.deref().iter().map(|block| block.0.hash) { if let Entry::Vacant(e) = daa_window_blocks.entry(hash) { e.insert(TrustedHeader { header: self.headers_store.get_header(hash).unwrap(), - ghostdag: (&*self.ghostdag_primary_store.get_data(hash).unwrap()).into(), + ghostdag: (&*self.ghostdag_store.get_data(hash).unwrap()).into(), }); } } @@ -1227,7 +1227,7 @@ impl PruningProofManager { let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block); for hash in ghostdag_chain { if let Entry::Vacant(e) = ghostdag_blocks.entry(hash) { - let ghostdag = self.ghostdag_primary_store.get_data(hash).unwrap(); + let ghostdag = self.ghostdag_store.get_data(hash).unwrap(); e.insert((&*ghostdag).into()); // We fill `ghostdag_blocks` only for kaspad-go legacy reasons, but the real set we @@ -1259,7 +1259,7 @@ impl PruningProofManager { if header.blue_work < min_blue_work { continue; } - let ghostdag = (&*self.ghostdag_primary_store.get_data(current).unwrap()).into(); + let ghostdag = (&*self.ghostdag_store.get_data(current).unwrap()).into(); e.insert(TrustedHeader { header, ghostdag }); } let parents = self.relations_stores.read()[0].get_parents(current).unwrap(); diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 1baecc3e7..368b52344 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -414,12 +414,12 @@ fn topologically_ordered_hashes(src_consensus: &Consensus, genesis_hash: Hash) - } fn print_stats(src_consensus: &Consensus, hashes: &[Hash], delay: f64, bps: f64, k: KType) -> usize { - let blues_mean = - hashes.iter().map(|&h| src_consensus.ghostdag_primary_store.get_data(h).unwrap().mergeset_blues.len()).sum::() as f64 - / hashes.len() as f64; - let reds_mean = - hashes.iter().map(|&h| src_consensus.ghostdag_primary_store.get_data(h).unwrap().mergeset_reds.len()).sum::() as f64 - / hashes.len() as f64; + let blues_mean = hashes.iter().map(|&h| src_consensus.ghostdag_store.get_data(h).unwrap().mergeset_blues.len()).sum::() + as f64 + / hashes.len() as f64; + let reds_mean = hashes.iter().map(|&h| src_consensus.ghostdag_store.get_data(h).unwrap().mergeset_reds.len()).sum::() + as f64 + / hashes.len() as f64; let parents_mean = hashes.iter().map(|&h| src_consensus.headers_store.get_header(h).unwrap().direct_parents().len()).sum::() as f64 / hashes.len() as f64; From 61183faba05ff020bcad48951bfc484dac93e1c1 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Wed, 26 Jun 2024 22:55:59 -0600 Subject: [PATCH 27/50] Add small safety margin to proof at level 0 This prevents the case where new root is an anticone of old root --- consensus/src/processes/pruning_proof/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 82ebc7433..d6c109b54 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -807,7 +807,7 @@ impl PruningProofManager { let cache_policy = CachePolicy::Count(2 * self.pruning_proof_m as usize); // TODO: We can probably reduce cache size let required_level_depth = 2 * self.pruning_proof_m; let mut required_level_0_depth = if level == 0 { - required_level_depth + required_level_depth + 100 // smaller safety margin } else { self.estimated_blue_depth_at_level_0( level, @@ -1035,6 +1035,8 @@ impl PruningProofManager { } else { block_at_depth_2m }; + + // new root is expected to be always an ancestor of old root because new root takes a safety margin assert!(self.reachability_service.is_dag_ancestor_of(root, old_root)); let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); From 34bc88f399f6fae547ca79500b657c18a18e3325 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 28 Jun 2024 23:23:40 -0600 Subject: [PATCH 28/50] Revert to only do proof rebuilding on sanity check --- consensus/src/pipeline/pruning_processor/processor.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index f73f8c12e..bbc1ea9a9 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -456,8 +456,7 @@ impl PruningProcessor { ); if self.config.enable_sanity_checks { - self.assert_proof_rebuilding(proof.clone(), new_pruning_point); - self.pruning_proof_manager.validate_pruning_point_proof(&proof).unwrap(); + self.assert_proof_rebuilding(proof, new_pruning_point); self.assert_data_rebuilding(data, new_pruning_point); } From da1cfe34b608d32b1663d865e1c81331afc1bc65 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Fri, 28 Jun 2024 23:24:38 -0600 Subject: [PATCH 29/50] Proper "better" proof check --- consensus/src/processes/pruning_proof/mod.rs | 260 +++++++++++-------- 1 file changed, 158 insertions(+), 102 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index d6c109b54..6dac50563 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -49,7 +49,7 @@ use crate::{ }, stores::{ depth::DbDepthStore, - ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, + ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, headers::{DbHeadersStore, HeaderStore, HeaderStoreReader, HeaderWithBlockLevel}, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, @@ -597,32 +597,32 @@ impl PruningProofManager { Ok(()) } - // find_proof_and_consensus_common_chain_ancestor_ghostdag_data returns an option of a tuple - // that contains the ghostdag data of the proof and current consensus common ancestor. If no - // such ancestor exists, it returns None. - fn find_proof_and_consensus_common_ancestor_ghostdag_data( + /// Returns the common ancestor of the proof and the current consensus if there is one. + /// + /// ghostdag_stores currently contain only entries for blocks in the proof. + /// While iterating through the selected parent chain of the current consensus, if we find any + /// that is already in ghostdag_stores that must mean it's a common ancestor of the proof + /// and current consensus + fn find_proof_and_consensus_common_ancestor( &self, - ghostdag_stores: &[Arc], - proof_selected_tip: Hash, + ghostdag_store: &Arc, + current_consensus_selected_tip_header: Arc
, level: BlockLevel, - proof_selected_tip_gd: CompactGhostdagData, - ) -> Option<(CompactGhostdagData, CompactGhostdagData)> { - let mut proof_current = proof_selected_tip; - let mut proof_current_gd = proof_selected_tip_gd; - loop { - match ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap_option() { - Some(current_gd) => { - break Some((proof_current_gd, current_gd)); - } - None => { - proof_current = proof_current_gd.selected_parent; - if proof_current.is_origin() { - break None; - } - proof_current_gd = ghostdag_stores[level as usize].get_compact_data(proof_current).unwrap(); - } - }; + relations_service: &MTRelationsService, + ) -> Option { + let mut chain_block = current_consensus_selected_tip_header.clone(); + + for _ in 0..(2 * self.pruning_proof_m as usize) { + if chain_block.direct_parents().is_empty() || chain_block.hash.is_origin() { + break; + } + if ghostdag_store.has(chain_block.hash).unwrap() { + return Some(chain_block.hash); + } + chain_block = self.find_selected_parent_header_at_level(&chain_block, level, relations_service).unwrap(); } + + None } pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { @@ -640,29 +640,54 @@ impl PruningProofManager { let pruning_read = self.pruning_point_store.read(); let relations_read = self.relations_stores.read(); let current_pp = pruning_read.get().unwrap().pruning_point; - let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); + let current_pp_header = self.headers_store.get_header_with_block_level(current_pp).unwrap(); for (level_idx, selected_tip) in selected_tip_by_level.iter().copied().enumerate() { let level = level_idx as BlockLevel; self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; let proof_selected_tip_gd = ghostdag_stores[level_idx].get_compact_data(selected_tip).unwrap(); + + // Next check is to see if this proof is "better" than what's in the current consensus + // Step 1 - look at only levels that have a full proof (least 2m blocks in the proof) if proof_selected_tip_gd.blue_score < 2 * self.pruning_proof_m { continue; } - if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( - &ghostdag_stores, - selected_tip, + // Step 2 - if we can find a common ancestor between the proof and current consensus + // we can determine if the proof is better. The proof is better if the score difference between the + // old current consensus's tips and the common ancestor is less than the score difference between the + // proof's tip and the common ancestor + let relations_service = MTRelationsService::new(self.relations_stores.clone(), level); + let current_consensus_selected_tip_header = if current_pp_header.block_level >= level { + current_pp_header.header.clone() + } else { + self.find_selected_parent_header_at_level(¤t_pp_header.header, level, &relations_service).unwrap() + }; + if let Some(common_ancestor) = self.find_proof_and_consensus_common_ancestor( + &ghostdag_stores[level_idx], + current_consensus_selected_tip_header.clone(), level, - proof_selected_tip_gd, + &relations_service, ) { + // Fill the GD store with data from current consensus, + // starting from the common ancestor until the current level selected tip + let _ = self.fill_proof_ghostdag_data( + proof[level_idx].first().unwrap().hash, + common_ancestor, + current_consensus_selected_tip_header.hash, + &ghostdag_stores[level_idx], + &relations_service, + level != 0, + None, + false, + ); + let common_ancestor_blue_work = ghostdag_stores[level_idx].get_blue_work(common_ancestor).unwrap(); let selected_tip_blue_work_diff = - SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); - for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { + SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(common_ancestor_blue_work); + for parent in self.parents_manager.parents_at_level(¤t_pp_header.header, level).iter().copied() { let parent_blue_work = ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); - let parent_blue_work_diff = - SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); + let parent_blue_work_diff = SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_blue_work); if parent_blue_work_diff >= selected_tip_blue_work_diff { return Err(PruningImportError::PruningProofInsufficientBlueWork); } @@ -748,7 +773,7 @@ impl PruningProofManager { &self, header: &Header, level: BlockLevel, - relations_service: MTRelationsService, + relations_service: &MTRelationsService, ) -> PruningProofManagerInternalResult> { // Logic of apply_proof only inserts parent entries for a header from the proof // into the relations store for a level if there was GD data in the old stores for that @@ -798,7 +823,7 @@ impl PruningProofManager { let selected_tip_header = if pp_header.block_level >= level { pp_header.header.clone() } else { - self.find_selected_parent_header_at_level(&pp_header.header, level, relations_service.clone())? + self.find_selected_parent_header_at_level(&pp_header.header, level, &relations_service)? }; let selected_tip = selected_tip_header.hash; @@ -850,7 +875,7 @@ impl PruningProofManager { { break current_header; } - current_header = match self.find_selected_parent_header_at_level(¤t_header, level, relations_service.clone()) { + current_header = match self.find_selected_parent_header_at_level(¤t_header, level, &relations_service) { Ok(header) => header, Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { if !intersected_with_required_block_chain { @@ -863,18 +888,15 @@ impl PruningProofManager { }; if !finished_headers_for_required_block_chain && !intersected_with_required_block_chain { - current_required_chain_block = match self.find_selected_parent_header_at_level( - ¤t_required_chain_block, - level, - relations_service.clone(), - ) { - Ok(header) => header, - Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { - finished_headers_for_required_block_chain = true; - current_required_chain_block - } - Err(e) => return Err(e), - }; + current_required_chain_block = + match self.find_selected_parent_header_at_level(¤t_required_chain_block, level, &relations_service) { + Ok(header) => header, + Err(PruningProofManagerInternalError::NotEnoughHeadersToBuildProof(_)) => { + finished_headers_for_required_block_chain = true; + current_required_chain_block + } + Err(e) => return Err(e), + }; } }; let root = root_header.hash; @@ -884,63 +906,16 @@ impl PruningProofManager { } let ghostdag_store = Arc::new(DbGhostdagStore::new_temp(temp_db.clone(), level, cache_policy, cache_policy, tries)); - let gd_manager = GhostdagManager::new( + let has_required_block = self.fill_proof_ghostdag_data( + root, root, - self.ghostdag_k, - ghostdag_store.clone(), - relations_service.clone(), - self.headers_store.clone(), - self.reachability_service.clone(), + pp, + &ghostdag_store, + &relations_service, + level != 0, + Some(required_block), true, ); - ghostdag_store.insert(root, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); - ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); - let mut topological_heap: BinaryHeap<_> = Default::default(); - let mut visited = BlockHashSet::new(); - for child in relations_service.get_children(root).unwrap().read().iter().copied() { - topological_heap.push(Reverse(SortableBlock { - hash: child, - // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? - })); - } - - let mut has_required_block = root == required_block; - loop { - let Some(current) = topological_heap.pop() else { - break; - }; - let current_hash = current.0.hash; - if !visited.insert(current_hash) { - continue; - } - - if !self.reachability_service.is_dag_ancestor_of(current_hash, pp) { - // We don't care about blocks in the antipast of the pruning point - continue; - } - - if !has_required_block && current_hash == required_block { - has_required_block = true; - } - - let relevant_parents: Box<[Hash]> = relations_service - .get_parents(current_hash) - .unwrap() - .iter() - .copied() - .filter(|parent| self.reachability_service.is_dag_ancestor_of(root, *parent)) - .collect(); - let current_gd = gd_manager.ghostdag(&relevant_parents); - ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap(); - for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { - topological_heap.push(Reverse(SortableBlock { - hash: child, - // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? - })); - } - } // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block @@ -1090,6 +1065,87 @@ impl PruningProofManager { .collect_vec() } + /// BFS forward iterates from starting_hash until selected tip, ignoring blocks in the antipast of selected_tip. + /// For each block along the way, insert that hash into the ghostdag_store + /// If we have a required_block to find, this will return true if that block was found along the way + fn fill_proof_ghostdag_data( + &self, + genesis_hash: Hash, + starting_hash: Hash, + selected_tip: Hash, + ghostdag_store: &Arc, + relations_service: &MTRelationsService, + use_score_as_work: bool, + required_block: Option, + initialize_store: bool, + ) -> bool { + let gd_manager = GhostdagManager::new( + genesis_hash, + self.ghostdag_k, + ghostdag_store.clone(), + relations_service.clone(), + self.headers_store.clone(), + self.reachability_service.clone(), + use_score_as_work, + ); + + if initialize_store { + ghostdag_store.insert(genesis_hash, Arc::new(gd_manager.genesis_ghostdag_data())).unwrap(); + ghostdag_store.insert(ORIGIN, gd_manager.origin_ghostdag_data()).unwrap(); + } + + let mut topological_heap: BinaryHeap<_> = Default::default(); + let mut visited = BlockHashSet::new(); + for child in relations_service.get_children(starting_hash).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + })); + } + + let mut has_required_block = required_block.is_some_and(|required_block| starting_hash == required_block); + loop { + let Some(current) = topological_heap.pop() else { + break; + }; + let current_hash = current.0.hash; + if !visited.insert(current_hash) { + continue; + } + + if !self.reachability_service.is_dag_ancestor_of(current_hash, selected_tip) { + // We don't care about blocks in the antipast of the selected tip + continue; + } + + if !has_required_block && required_block.is_some_and(|required_block| current_hash == required_block) { + has_required_block = true; + } + + let relevant_parents: Box<[Hash]> = relations_service + .get_parents(current_hash) + .unwrap() + .iter() + .copied() + .filter(|parent| self.reachability_service.is_dag_ancestor_of(starting_hash, *parent)) + .collect(); + let current_gd = gd_manager.ghostdag(&relevant_parents); + + ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap_or_exists(); + + for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { + topological_heap.push(Reverse(SortableBlock { + hash: child, + // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology + blue_work: self.headers_store.get_header(child).unwrap().blue_work, // TODO: Maybe add to compact data? + })); + } + } + + has_required_block + } + /// Copy of `block_at_depth` which returns the full chain up to depth. Temporarily used for assertion purposes. fn chain_up_to_depth( &self, From a23d1dd88b79dc0770b4ffa96f392024a8647dbd Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Thu, 4 Jul 2024 22:22:30 -0600 Subject: [PATCH 30/50] Update comment on find_selected_parent_header_at_level --- consensus/src/processes/pruning_proof/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 6dac50563..e0ca2a1e8 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -775,9 +775,8 @@ impl PruningProofManager { level: BlockLevel, relations_service: &MTRelationsService, ) -> PruningProofManagerInternalResult> { - // Logic of apply_proof only inserts parent entries for a header from the proof - // into the relations store for a level if there was GD data in the old stores for that - // header. To mimic that logic here, we need to filter out parents that are NOT in the relations_service + // Parents manager parents_at_level may return parents that aren't in relations_service, so it's important + // to filter to include only parents that are in relations_service. let parents = self .parents_manager .parents_at_level(header, level) From 974d2004dc427d7a56ce43e3a736d24e61c1bb69 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Thu, 4 Jul 2024 22:45:21 -0600 Subject: [PATCH 31/50] Re-apply missed comment --- consensus/src/pipeline/header_processor/processor.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 22a5c566c..b64fe4ea2 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -447,6 +447,7 @@ impl HeaderProcessor { // Create a DB batch writer let mut batch = WriteBatch::default(); + // This data might have been already written when applying the pruning proof. self.ghostdag_store.insert_batch(&mut batch, ctx.hash, ghostdag_data).unwrap_or_exists(); let mut relations_write = self.relations_stores.write(); From 6ea832819d78732b9da3d4a0785f8e3ce7accfbc Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Wed, 3 Jul 2024 23:48:59 -0600 Subject: [PATCH 32/50] Implement db upgrade logic from 3 to 4 --- Cargo.lock | 2 + consensus/src/consensus/factory.rs | 19 ++++- kaspad/Cargo.toml | 2 + kaspad/src/daemon.rs | 109 +++++++++++++++++++++++++++-- 4 files changed, 125 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 22cd64f4f..67272dd78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3755,6 +3755,7 @@ dependencies = [ "dhat", "dirs", "futures-util", + "itertools 0.11.0", "kaspa-addresses", "kaspa-addressmanager", "kaspa-alloc", @@ -3782,6 +3783,7 @@ dependencies = [ "num_cpus", "rand 0.8.5", "rayon", + "rocksdb", "serde", "serde_with", "tempfile", diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index f3ee51d9c..f8af5fb5a 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -59,7 +59,7 @@ pub struct MultiConsensusMetadata { version: u32, } -const LATEST_DB_VERSION: u32 = 3; +const LATEST_DB_VERSION: u32 = 4; impl Default for MultiConsensusMetadata { fn default() -> Self { Self { @@ -219,6 +219,23 @@ impl MultiConsensusManagementStore { } } + /// Returns the current version of this database + pub fn version(&self) -> StoreResult { + match self.metadata.read() { + Ok(data) => Ok(data.version), + Err(err) => Err(err), + } + } + + /// Set the database version to a different one + pub fn set_version(&mut self, version: u32) -> StoreResult<()> { + self.metadata.update(DirectDbWriter::new(&self.db), |mut data| { + data.version = version; + data + })?; + Ok(()) + } + pub fn should_upgrade(&self) -> StoreResult { match self.metadata.read() { Ok(data) => Ok(data.version != LATEST_DB_VERSION), diff --git a/kaspad/Cargo.toml b/kaspad/Cargo.toml index 9f3290a51..0decbc9cc 100644 --- a/kaspad/Cargo.toml +++ b/kaspad/Cargo.toml @@ -46,10 +46,12 @@ dhat = { workspace = true, optional = true } serde.workspace = true dirs.workspace = true futures-util.workspace = true +itertools.workspace = true log.workspace = true num_cpus.workspace = true rand.workspace = true rayon.workspace = true +rocksdb.workspace = true tempfile.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 0950ad8fa..08dc1d87a 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -8,7 +8,10 @@ use kaspa_consensus_core::{ use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; use kaspa_core::{core::Core, info, trace}; use kaspa_core::{kaspad_env::version, task::tick::TickService}; -use kaspa_database::prelude::CachePolicy; +use kaspa_database::{ + prelude::{CachePolicy, DbWriter, DirectDbWriter}, + registry::DatabaseStorePrefixes, +}; use kaspa_grpc_server::service::GrpcService; use kaspa_notify::{address::tracker::Tracker, subscription::context::SubscriptionContext}; use kaspa_rpc_service::service::RpcCoreService; @@ -31,6 +34,7 @@ use kaspa_mining::{ }; use kaspa_p2p_flows::{flow_context::FlowContext, service::P2pService}; +use itertools::Itertools; use kaspa_perf_monitor::{builder::Builder as PerfMonitorBuilder, counters::CountersSnapshot}; use kaspa_utxoindex::{api::UtxoIndexProxy, UtxoIndex}; use kaspa_wrpc_server::service::{Options as WrpcServerOptions, WebSocketCounters as WrpcServerCounters, WrpcEncoding, WrpcService}; @@ -308,13 +312,106 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm && (meta_db.get_pinned(b"multi-consensus-metadata-key").is_ok_and(|r| r.is_some()) || MultiConsensusManagementStore::new(meta_db.clone()).should_upgrade().unwrap()) { - let msg = - "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; - get_user_approval_or_exit(msg, args.yes); + let mut mcms = MultiConsensusManagementStore::new(meta_db.clone()); + let version = mcms.version().unwrap(); + + // TODO: Update this entire section to a more robust implementation that allows applying multiple upgrade strategies. + // If I'm at version 3 and latest version is 7, I need to be able to upgrade to that version following the intermediate + // steps without having to delete the DB + if version == 3 { + let active_consensus_dir_name = mcms.active_consensus_dir_name().unwrap(); + + match active_consensus_dir_name { + Some(current_consensus_db) => { + // Apply soft upgrade logic: delete GD data from higher levels + // and then update DB version to 4 + let consensus_db = kaspa_database::prelude::ConnBuilder::default() + .with_db_path(consensus_db_dir.clone().join(current_consensus_db)) + .with_files_limit(1) + .build() + .unwrap(); + info!("Scanning for deprecated records to cleanup"); + + let mut gd_record_count: u32 = 0; + let mut compact_record_count: u32 = 0; + + let start_level: u8 = 1; + let start_level_bytes = start_level.to_le_bytes(); + let ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); + let ghostdag_prefix = ghostdag_prefix_vec.as_slice(); + + // This section is used to count the records to be deleted. It's not used for the actual delete. + for result in consensus_db.iterator(rocksdb::IteratorMode::From(ghostdag_prefix, rocksdb::Direction::Forward)) { + let (key, _) = result.unwrap(); + if !key.starts_with(&[DatabaseStorePrefixes::Ghostdag.into()]) { + break; + } + + gd_record_count += 1; + } + + let compact_prefix_vec = DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); + let compact_prefix = compact_prefix_vec.as_slice(); + + for result in consensus_db.iterator(rocksdb::IteratorMode::From(compact_prefix, rocksdb::Direction::Forward)) { + let (key, _) = result.unwrap(); + if !key.starts_with(&[DatabaseStorePrefixes::GhostdagCompact.into()]) { + break; + } + + compact_record_count += 1; + } + + trace!("Number of Ghostdag records to cleanup: {}", gd_record_count); + trace!("Number of GhostdagCompact records to cleanup: {}", compact_record_count); + info!("Number of deprecated records to cleanup: {}", gd_record_count + compact_record_count); + + let msg = + "Node database currently at version 3. Upgrade process to version 4 needs to be applied. Continue? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + // Actual delete only happens after user consents to the upgrade: + let mut writer = DirectDbWriter::new(&consensus_db); + + let end_level: u8 = config.max_block_level + 1; + let end_level_bytes = end_level.to_le_bytes(); - info!("Deleting databases from previous Kaspad version"); + let start_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(start_level_bytes).collect_vec(); + let end_ghostdag_prefix_vec = DatabaseStorePrefixes::Ghostdag.into_iter().chain(end_level_bytes).collect_vec(); - is_db_reset_needed = true; + let start_compact_prefix_vec = + DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(start_level_bytes).collect_vec(); + let end_compact_prefix_vec = + DatabaseStorePrefixes::GhostdagCompact.into_iter().chain(end_level_bytes).collect_vec(); + + // Apply delete of range from level 1 to max (+1) for Ghostdag and GhostdagCompact: + writer.delete_range(start_ghostdag_prefix_vec.clone(), end_ghostdag_prefix_vec.clone()).unwrap(); + writer.delete_range(start_compact_prefix_vec.clone(), end_compact_prefix_vec.clone()).unwrap(); + + // Compact the deleted rangeto apply the delete immediately + consensus_db.compact_range(Some(start_ghostdag_prefix_vec.as_slice()), Some(end_ghostdag_prefix_vec.as_slice())); + consensus_db.compact_range(Some(start_compact_prefix_vec.as_slice()), Some(end_compact_prefix_vec.as_slice())); + + // Also update the version to one higher: + mcms.set_version(version + 1).unwrap(); + } + None => { + let msg = + "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + is_db_reset_needed = true; + } + } + } else { + let msg = + "Node database is from a different Kaspad *DB* version and needs to be fully deleted, do you confirm the delete? (y/n)"; + get_user_approval_or_exit(msg, args.yes); + + info!("Deleting databases from previous Kaspad version"); + + is_db_reset_needed = true; + } } // Will be true if any of the other condition above except args.reset_db From f8baf69015d2b4156e6403cfe162281f23d95d3b Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 9 Jul 2024 22:23:23 -0600 Subject: [PATCH 33/50] Explain further the workaround for GD ordering.rs --- consensus/src/processes/ghostdag/ordering.rs | 28 +++++++++++++++----- consensus/src/processes/ghostdag/protocol.rs | 7 +++++ 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/consensus/src/processes/ghostdag/ordering.rs b/consensus/src/processes/ghostdag/ordering.rs index 21306e5b8..cb73c3398 100644 --- a/consensus/src/processes/ghostdag/ordering.rs +++ b/consensus/src/processes/ghostdag/ordering.rs @@ -1,7 +1,9 @@ use std::cmp::Ordering; use kaspa_consensus_core::BlueWorkType; +use kaspa_core::warn; use kaspa_hashes::Hash; +use kaspa_math::Uint192; use serde::{Deserialize, Serialize}; use crate::model::{ @@ -46,13 +48,27 @@ impl = blocks.into_iter().collect(); sorted_blocks.sort_by_cached_key(|block| SortableBlock { hash: *block, - // Since we're only calculating GD at all levels on-demand, we may get blocks from the relations - // store in the mergeset that are not on our level + // TODO: Reconsider this approach + // It's possible for mergeset.rs::unordered_mergeset_without_selected_parent (which calls this) to reference parents + // that are in a lower level when calling relations.get_parents. This will panic at self.ghostdag_store.get_blue_work(*block) + // // Options for fixes: - // - do this - // - guarantee that we're only getting parents that are in this store - // - make relations store only return parents at the same or higher level - blue_work: self.ghostdag_store.get_blue_work(*block).unwrap_or_default(), + // 1) do this where we simply unwrap and default to 0 (currently implemented) + // - consequence is that it affects all GD calculations + // - I argue this is fine for the short term because GD entries not being in the GD store + // can only happen IFF the parent is on a lower level. For level 0 (primary GD), this is not a problem + // and for higher GD it's also not a problem since we only want to use blocks in the same + // level or higher. + // - There is also an extra check being done in ghostdag call side to verify that the hashes in the mergeset + // belong to this + // 2) in mergeset.rs::unordered_mergeset_without_selected_parent, guarantee that we're only getting + // parents that are in this store + // 3) make relations store only return parents at the same or higher level + // - we know that realtions.get_parents can return parents in one level lower + blue_work: self.ghostdag_store.get_blue_work(*block).unwrap_or_else(|_| { + warn!("Tried getting blue work of hash not in GD store: {}", block); + Uint192::from_u64(0) + }), }); sorted_blocks } diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index ac9ae41d7..bfc66ebe6 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -106,6 +106,13 @@ impl Date: Tue, 9 Jul 2024 22:23:52 -0600 Subject: [PATCH 34/50] Minor update to Display of TempGD keys --- database/src/key.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/database/src/key.rs b/database/src/key.rs index e8aeff091..83fa8ebb2 100644 --- a/database/src/key.rs +++ b/database/src/key.rs @@ -73,6 +73,8 @@ impl Display for DbKey { match prefix { Ghostdag | GhostdagCompact + | TempGhostdag + | TempGhostdagCompact | RelationsParents | RelationsChildren | Reachability From bc56e65d5dd93d17c00e12e9f2c05e0a924e24b5 Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Tue, 9 Jul 2024 22:28:14 -0600 Subject: [PATCH 35/50] Various fixes - Keep using old root to minimize proof size. Old root is calculated using the temporary gd stores - fix the off-by-one in block_at_depth and chain_up_to_depth - revert the temp fix to sync with the off-by-one --- consensus/src/processes/pruning_proof/mod.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index e0ca2a1e8..34ae371db 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -868,8 +868,7 @@ impl PruningProofManager { } if current_header.direct_parents().is_empty() // Stop at genesis - // Need to ensure this does the same 2M+1 depth that block_at_depth does - || (pp_header.header.blue_score > current_header.blue_score + required_level_0_depth + || (pp_header.header.blue_score >= current_header.blue_score + required_level_0_depth && intersected_with_required_block_chain) { break current_header; @@ -916,9 +915,8 @@ impl PruningProofManager { true, ); - // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block - && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() > required_level_depth) + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) { break Ok((ghostdag_store, selected_tip, root)); } @@ -1016,7 +1014,8 @@ impl PruningProofManager { let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); let mut visited = BlockHashSet::new(); - queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); + // Still use "old_root" to make sure we use the minimum amount of records for the proof + queue.push(Reverse(SortableBlock::new(old_root, self.headers_store.get_header(old_root).unwrap().blue_work))); while let Some(current) = queue.pop() { let current = current.0.hash; if !visited.insert(current) { @@ -1158,7 +1157,7 @@ impl PruningProofManager { let mut current_gd = high_gd; let mut current = high; let mut res = vec![current]; - while current_gd.blue_score + depth >= high_gd.blue_score { + while current_gd.blue_score + depth > high_gd.blue_score { if current_gd.selected_parent.is_origin() { break; } @@ -1186,7 +1185,7 @@ impl PruningProofManager { .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {depth}, {err}")))?; let mut current_gd = high_gd; let mut current = high; - while current_gd.blue_score + depth >= high_gd.blue_score { + while current_gd.blue_score + depth > high_gd.blue_score { if current_gd.selected_parent.is_origin() { break; } From b917e211973407bbcbed6c889bebaa2fc78245d6 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 14 Jul 2024 09:12:57 +0000 Subject: [PATCH 36/50] Squashed commit of the following: commit 5c739491ae224720904deb1e2dbc3ff369ae35ae Author: Michael Sutton Date: Thu Jul 11 11:02:44 2024 +0000 simplify trace commit 20c44e20c25fa7c76abe0709185ccf0849649322 Merge: 21c17b7c d7a5ba2a Author: Michael Sutton Date: Mon Jun 17 23:05:03 2024 +0000 Merge branch 'dev' into modify-template-mass-activation commit 21c17b7c2ec6e1064a2450daf8d87f9b537b424c Author: Michael Sutton Date: Thu May 30 10:43:45 2024 +0000 bump version to 0.14.2 commit 8b21ac80d006ed01cf330011b5f7df91d61b466b Author: Michael Sutton Date: Thu May 16 14:09:37 2024 +0000 fixes a tn11 bug where tx mass hashing was not activated when modifying block template --- Cargo.lock | 114 +++++++++++++------------- Cargo.toml | 114 +++++++++++++------------- kaspad/src/daemon.rs | 1 + mining/src/block_template/builder.rs | 7 +- mining/src/block_template/selector.rs | 6 +- mining/src/manager.rs | 23 ++++-- mining/src/manager_tests.rs | 6 +- 7 files changed, 142 insertions(+), 129 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 67272dd78..76cbb0a89 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2423,7 +2423,7 @@ dependencies = [ [[package]] name = "kaspa-addresses" -version = "0.14.1" +version = "0.14.2" dependencies = [ "borsh", "criterion", @@ -2440,7 +2440,7 @@ dependencies = [ [[package]] name = "kaspa-addressmanager" -version = "0.14.1" +version = "0.14.2" dependencies = [ "borsh", "igd-next", @@ -2463,14 +2463,14 @@ dependencies = [ [[package]] name = "kaspa-alloc" -version = "0.14.1" +version = "0.14.2" dependencies = [ "mimalloc", ] [[package]] name = "kaspa-bip32" -version = "0.14.1" +version = "0.14.2" dependencies = [ "borsh", "bs58", @@ -2496,7 +2496,7 @@ dependencies = [ [[package]] name = "kaspa-cli" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-trait", "borsh", @@ -2541,7 +2541,7 @@ dependencies = [ [[package]] name = "kaspa-connectionmanager" -version = "0.14.1" +version = "0.14.2" dependencies = [ "duration-string", "futures-util", @@ -2558,7 +2558,7 @@ dependencies = [ [[package]] name = "kaspa-consensus" -version = "0.14.1" +version = "0.14.2" dependencies = [ "arc-swap", "async-channel 2.2.1", @@ -2601,7 +2601,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-client" -version = "0.14.1" +version = "0.14.2" dependencies = [ "ahash 0.8.11", "cfg-if 1.0.0", @@ -2629,7 +2629,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-core" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-trait", "bincode", @@ -2666,7 +2666,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-notify" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-channel 2.2.1", "cfg-if 1.0.0", @@ -2685,7 +2685,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-wasm" -version = "0.14.1" +version = "0.14.2" dependencies = [ "cfg-if 1.0.0", "faster-hex 0.6.1", @@ -2709,7 +2709,7 @@ dependencies = [ [[package]] name = "kaspa-consensusmanager" -version = "0.14.1" +version = "0.14.2" dependencies = [ "duration-string", "futures", @@ -2727,7 +2727,7 @@ dependencies = [ [[package]] name = "kaspa-core" -version = "0.14.1" +version = "0.14.2" dependencies = [ "cfg-if 1.0.0", "ctrlc", @@ -2745,7 +2745,7 @@ dependencies = [ [[package]] name = "kaspa-daemon" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-trait", "borsh", @@ -2767,7 +2767,7 @@ dependencies = [ [[package]] name = "kaspa-database" -version = "0.14.1" +version = "0.14.2" dependencies = [ "bincode", "enum-primitive-derive", @@ -2789,7 +2789,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-client" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-channel 2.2.1", "async-stream", @@ -2820,7 +2820,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-core" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-channel 2.2.1", "async-stream", @@ -2849,7 +2849,7 @@ dependencies = [ [[package]] name = "kaspa-grpc-server" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-channel 2.2.1", "async-stream", @@ -2884,7 +2884,7 @@ dependencies = [ [[package]] name = "kaspa-hashes" -version = "0.14.1" +version = "0.14.2" dependencies = [ "blake2b_simd", "borsh", @@ -2905,7 +2905,7 @@ dependencies = [ [[package]] name = "kaspa-index-core" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-channel 2.2.1", "async-trait", @@ -2924,7 +2924,7 @@ dependencies = [ [[package]] name = "kaspa-index-processor" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-channel 2.2.1", "async-trait", @@ -2952,7 +2952,7 @@ dependencies = [ [[package]] name = "kaspa-math" -version = "0.14.1" +version = "0.14.2" dependencies = [ "borsh", "criterion", @@ -2973,14 +2973,14 @@ dependencies = [ [[package]] name = "kaspa-merkle" -version = "0.14.1" +version = "0.14.2" dependencies = [ "kaspa-hashes", ] [[package]] name = "kaspa-metrics-core" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-trait", "borsh", @@ -2996,7 +2996,7 @@ dependencies = [ [[package]] name = "kaspa-mining" -version = "0.14.1" +version = "0.14.2" dependencies = [ "criterion", "futures-util", @@ -3022,7 +3022,7 @@ dependencies = [ [[package]] name = "kaspa-mining-errors" -version = "0.14.1" +version = "0.14.2" dependencies = [ "kaspa-consensus-core", "thiserror", @@ -3030,7 +3030,7 @@ dependencies = [ [[package]] name = "kaspa-muhash" -version = "0.14.1" +version = "0.14.2" dependencies = [ "criterion", "kaspa-hashes", @@ -3043,7 +3043,7 @@ dependencies = [ [[package]] name = "kaspa-notify" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-channel 2.2.1", "async-trait", @@ -3078,7 +3078,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-flows" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-trait", "chrono", @@ -3109,7 +3109,7 @@ dependencies = [ [[package]] name = "kaspa-p2p-lib" -version = "0.14.1" +version = "0.14.2" dependencies = [ "borsh", "ctrlc", @@ -3140,7 +3140,7 @@ dependencies = [ [[package]] name = "kaspa-perf-monitor" -version = "0.14.1" +version = "0.14.2" dependencies = [ "kaspa-core", "log", @@ -3152,7 +3152,7 @@ dependencies = [ [[package]] name = "kaspa-pow" -version = "0.14.1" +version = "0.14.2" dependencies = [ "criterion", "js-sys", @@ -3168,7 +3168,7 @@ dependencies = [ [[package]] name = "kaspa-resolver" -version = "0.14.1" +version = "0.14.2" dependencies = [ "ahash 0.8.11", "axum 0.7.5", @@ -3198,7 +3198,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-core" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-channel 2.2.1", "async-trait", @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-macros" -version = "0.14.1" +version = "0.14.2" dependencies = [ "convert_case 0.6.0", "proc-macro-error", @@ -3250,7 +3250,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-service" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-trait", "kaspa-addresses", @@ -3279,7 +3279,7 @@ dependencies = [ [[package]] name = "kaspa-testing-integration" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-channel 2.2.1", "async-trait", @@ -3339,7 +3339,7 @@ dependencies = [ [[package]] name = "kaspa-txscript" -version = "0.14.1" +version = "0.14.2" dependencies = [ "blake2b_simd", "borsh", @@ -3365,7 +3365,7 @@ dependencies = [ [[package]] name = "kaspa-txscript-errors" -version = "0.14.1" +version = "0.14.2" dependencies = [ "secp256k1", "thiserror", @@ -3373,7 +3373,7 @@ dependencies = [ [[package]] name = "kaspa-utils" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-channel 2.2.1", "async-trait", @@ -3402,7 +3402,7 @@ dependencies = [ [[package]] name = "kaspa-utils-tower" -version = "0.14.1" +version = "0.14.2" dependencies = [ "cfg-if 1.0.0", "futures", @@ -3416,7 +3416,7 @@ dependencies = [ [[package]] name = "kaspa-utxoindex" -version = "0.14.1" +version = "0.14.2" dependencies = [ "futures", "kaspa-consensus", @@ -3437,7 +3437,7 @@ dependencies = [ [[package]] name = "kaspa-wallet" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-std", "async-trait", @@ -3449,7 +3449,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-cli-wasm" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-trait", "js-sys", @@ -3463,7 +3463,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-core" -version = "0.14.1" +version = "0.14.2" dependencies = [ "aes", "ahash 0.8.11", @@ -3543,7 +3543,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-keys" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-trait", "borsh", @@ -3576,7 +3576,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-macros" -version = "0.14.1" +version = "0.14.2" dependencies = [ "convert_case 0.5.0", "proc-macro-error", @@ -3589,7 +3589,7 @@ dependencies = [ [[package]] name = "kaspa-wasm" -version = "0.14.1" +version = "0.14.2" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3615,7 +3615,7 @@ dependencies = [ [[package]] name = "kaspa-wasm-core" -version = "0.14.1" +version = "0.14.2" dependencies = [ "faster-hex 0.6.1", "js-sys", @@ -3624,7 +3624,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-client" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-std", "async-trait", @@ -3658,7 +3658,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-example-subscriber" -version = "0.14.1" +version = "0.14.2" dependencies = [ "ctrlc", "futures", @@ -3673,7 +3673,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-proxy" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-trait", "clap 4.5.4", @@ -3692,7 +3692,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-server" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-trait", "borsh", @@ -3719,7 +3719,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-wasm" -version = "0.14.1" +version = "0.14.2" dependencies = [ "ahash 0.8.11", "async-std", @@ -3748,7 +3748,7 @@ dependencies = [ [[package]] name = "kaspad" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-channel 2.2.1", "clap 4.5.4", @@ -5139,7 +5139,7 @@ dependencies = [ [[package]] name = "rothschild" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-channel 2.2.1", "clap 4.5.4", @@ -5584,7 +5584,7 @@ dependencies = [ [[package]] name = "simpa" -version = "0.14.1" +version = "0.14.2" dependencies = [ "async-channel 2.2.1", "clap 4.5.4", diff --git a/Cargo.toml b/Cargo.toml index 283981df8..2a548c844 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,7 +62,7 @@ members = [ [workspace.package] rust-version = "1.78.0" -version = "0.14.1" +version = "0.14.2" authors = ["Kaspa developers"] license = "ISC" repository = "https://github.com/kaspanet/rusty-kaspa" @@ -79,62 +79,62 @@ include = [ ] [workspace.dependencies] -# kaspa-testing-integration = { version = "0.14.1", path = "testing/integration" } -kaspa-addresses = { version = "0.14.1", path = "crypto/addresses" } -kaspa-addressmanager = { version = "0.14.1", path = "components/addressmanager" } -kaspa-bip32 = { version = "0.14.1", path = "wallet/bip32" } -kaspa-resolver = { version = "0.14.1", path = "rpr/wrpc/resolver" } -kaspa-cli = { version = "0.14.1", path = "cli" } -kaspa-connectionmanager = { version = "0.14.1", path = "components/connectionmanager" } -kaspa-consensus = { version = "0.14.1", path = "consensus" } -kaspa-consensus-core = { version = "0.14.1", path = "consensus/core" } -kaspa-consensus-client = { version = "0.14.1", path = "consensus/client" } -kaspa-consensus-notify = { version = "0.14.1", path = "consensus/notify" } -kaspa-consensus-wasm = { version = "0.14.1", path = "consensus/wasm" } -kaspa-consensusmanager = { version = "0.14.1", path = "components/consensusmanager" } -kaspa-core = { version = "0.14.1", path = "core" } -kaspa-daemon = { version = "0.14.1", path = "daemon" } -kaspa-database = { version = "0.14.1", path = "database" } -kaspa-grpc-client = { version = "0.14.1", path = "rpc/grpc/client" } -kaspa-grpc-core = { version = "0.14.1", path = "rpc/grpc/core" } -kaspa-grpc-server = { version = "0.14.1", path = "rpc/grpc/server" } -kaspa-hashes = { version = "0.14.1", path = "crypto/hashes" } -kaspa-index-core = { version = "0.14.1", path = "indexes/core" } -kaspa-index-processor = { version = "0.14.1", path = "indexes/processor" } -kaspa-math = { version = "0.14.1", path = "math" } -kaspa-merkle = { version = "0.14.1", path = "crypto/merkle" } -kaspa-metrics-core = { version = "0.14.1", path = "metrics/core" } -kaspa-mining = { version = "0.14.1", path = "mining" } -kaspa-mining-errors = { version = "0.14.1", path = "mining/errors" } -kaspa-muhash = { version = "0.14.1", path = "crypto/muhash" } -kaspa-notify = { version = "0.14.1", path = "notify" } -kaspa-p2p-flows = { version = "0.14.1", path = "protocol/flows" } -kaspa-p2p-lib = { version = "0.14.1", path = "protocol/p2p" } -kaspa-perf-monitor = { version = "0.14.1", path = "metrics/perf_monitor" } -kaspa-pow = { version = "0.14.1", path = "consensus/pow" } -kaspa-rpc-core = { version = "0.14.1", path = "rpc/core" } -kaspa-rpc-macros = { version = "0.14.1", path = "rpc/macros" } -kaspa-rpc-service = { version = "0.14.1", path = "rpc/service" } -kaspa-txscript = { version = "0.14.1", path = "crypto/txscript" } -kaspa-txscript-errors = { version = "0.14.1", path = "crypto/txscript/errors" } -kaspa-utils = { version = "0.14.1", path = "utils" } -kaspa-utils-tower = { version = "0.14.1", path = "utils/tower" } -kaspa-utxoindex = { version = "0.14.1", path = "indexes/utxoindex" } -kaspa-wallet = { version = "0.14.1", path = "wallet/native" } -kaspa-wallet-cli-wasm = { version = "0.14.1", path = "wallet/wasm" } -kaspa-wallet-keys = { version = "0.14.1", path = "wallet/keys" } -kaspa-wallet-core = { version = "0.14.1", path = "wallet/core" } -kaspa-wallet-macros = { version = "0.14.1", path = "wallet/macros" } -kaspa-wasm = { version = "0.14.1", path = "wasm" } -kaspa-wasm-core = { version = "0.14.1", path = "wasm/core" } -kaspa-wrpc-client = { version = "0.14.1", path = "rpc/wrpc/client" } -kaspa-wrpc-core = { version = "0.14.1", path = "rpc/wrpc/core" } -kaspa-wrpc-proxy = { version = "0.14.1", path = "rpc/wrpc/proxy" } -kaspa-wrpc-server = { version = "0.14.1", path = "rpc/wrpc/server" } -kaspa-wrpc-wasm = { version = "0.14.1", path = "rpc/wrpc/wasm" } -kaspa-wrpc-example-subscriber = { version = "0.14.1", path = "rpc/wrpc/examples/subscriber" } -kaspad = { version = "0.14.1", path = "kaspad" } -kaspa-alloc = { version = "0.14.1", path = "utils/alloc" } +# kaspa-testing-integration = { version = "0.14.2", path = "testing/integration" } +kaspa-addresses = { version = "0.14.2", path = "crypto/addresses" } +kaspa-addressmanager = { version = "0.14.2", path = "components/addressmanager" } +kaspa-bip32 = { version = "0.14.2", path = "wallet/bip32" } +kaspa-resolver = { version = "0.14.2", path = "rpr/wrpc/resolver" } +kaspa-cli = { version = "0.14.2", path = "cli" } +kaspa-connectionmanager = { version = "0.14.2", path = "components/connectionmanager" } +kaspa-consensus = { version = "0.14.2", path = "consensus" } +kaspa-consensus-core = { version = "0.14.2", path = "consensus/core" } +kaspa-consensus-client = { version = "0.14.2", path = "consensus/client" } +kaspa-consensus-notify = { version = "0.14.2", path = "consensus/notify" } +kaspa-consensus-wasm = { version = "0.14.2", path = "consensus/wasm" } +kaspa-consensusmanager = { version = "0.14.2", path = "components/consensusmanager" } +kaspa-core = { version = "0.14.2", path = "core" } +kaspa-daemon = { version = "0.14.2", path = "daemon" } +kaspa-database = { version = "0.14.2", path = "database" } +kaspa-grpc-client = { version = "0.14.2", path = "rpc/grpc/client" } +kaspa-grpc-core = { version = "0.14.2", path = "rpc/grpc/core" } +kaspa-grpc-server = { version = "0.14.2", path = "rpc/grpc/server" } +kaspa-hashes = { version = "0.14.2", path = "crypto/hashes" } +kaspa-index-core = { version = "0.14.2", path = "indexes/core" } +kaspa-index-processor = { version = "0.14.2", path = "indexes/processor" } +kaspa-math = { version = "0.14.2", path = "math" } +kaspa-merkle = { version = "0.14.2", path = "crypto/merkle" } +kaspa-metrics-core = { version = "0.14.2", path = "metrics/core" } +kaspa-mining = { version = "0.14.2", path = "mining" } +kaspa-mining-errors = { version = "0.14.2", path = "mining/errors" } +kaspa-muhash = { version = "0.14.2", path = "crypto/muhash" } +kaspa-notify = { version = "0.14.2", path = "notify" } +kaspa-p2p-flows = { version = "0.14.2", path = "protocol/flows" } +kaspa-p2p-lib = { version = "0.14.2", path = "protocol/p2p" } +kaspa-perf-monitor = { version = "0.14.2", path = "metrics/perf_monitor" } +kaspa-pow = { version = "0.14.2", path = "consensus/pow" } +kaspa-rpc-core = { version = "0.14.2", path = "rpc/core" } +kaspa-rpc-macros = { version = "0.14.2", path = "rpc/macros" } +kaspa-rpc-service = { version = "0.14.2", path = "rpc/service" } +kaspa-txscript = { version = "0.14.2", path = "crypto/txscript" } +kaspa-txscript-errors = { version = "0.14.2", path = "crypto/txscript/errors" } +kaspa-utils = { version = "0.14.2", path = "utils" } +kaspa-utils-tower = { version = "0.14.2", path = "utils/tower" } +kaspa-utxoindex = { version = "0.14.2", path = "indexes/utxoindex" } +kaspa-wallet = { version = "0.14.2", path = "wallet/native" } +kaspa-wallet-cli-wasm = { version = "0.14.2", path = "wallet/wasm" } +kaspa-wallet-keys = { version = "0.14.2", path = "wallet/keys" } +kaspa-wallet-core = { version = "0.14.2", path = "wallet/core" } +kaspa-wallet-macros = { version = "0.14.2", path = "wallet/macros" } +kaspa-wasm = { version = "0.14.2", path = "wasm" } +kaspa-wasm-core = { version = "0.14.2", path = "wasm/core" } +kaspa-wrpc-client = { version = "0.14.2", path = "rpc/wrpc/client" } +kaspa-wrpc-core = { version = "0.14.2", path = "rpc/wrpc/core" } +kaspa-wrpc-proxy = { version = "0.14.2", path = "rpc/wrpc/proxy" } +kaspa-wrpc-server = { version = "0.14.2", path = "rpc/wrpc/server" } +kaspa-wrpc-wasm = { version = "0.14.2", path = "rpc/wrpc/wasm" } +kaspa-wrpc-example-subscriber = { version = "0.14.2", path = "rpc/wrpc/examples/subscriber" } +kaspad = { version = "0.14.2", path = "kaspad" } +kaspa-alloc = { version = "0.14.2", path = "utils/alloc" } # external aes = "0.8.3" diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 08dc1d87a..2df03bab8 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -524,6 +524,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm config.ram_scale, config.block_template_cache_lifetime, mining_counters, + config.storage_mass_activation_daa_score, ))); let flow_context = Arc::new(FlowContext::new( diff --git a/mining/src/block_template/builder.rs b/mining/src/block_template/builder.rs index de3428a74..9645ca171 100644 --- a/mining/src/block_template/builder.rs +++ b/mining/src/block_template/builder.rs @@ -4,7 +4,7 @@ use kaspa_consensus_core::{ api::ConsensusApi, block::{BlockTemplate, TemplateBuildMode}, coinbase::MinerData, - merkle::calc_hash_merkle_root, + merkle::calc_hash_merkle_root_with_options, tx::COINBASE_TRANSACTION_INDEX, }; use kaspa_core::{ @@ -103,6 +103,7 @@ impl BlockTemplateBuilder { consensus: &dyn ConsensusApi, new_miner_data: &MinerData, block_template_to_modify: &BlockTemplate, + storage_mass_activation_daa_score: u64, ) -> BuilderResult { let mut block_template = block_template_to_modify.clone(); @@ -115,7 +116,9 @@ impl BlockTemplateBuilder { coinbase_tx.outputs.last_mut().unwrap().script_public_key = new_miner_data.script_public_key.clone(); } // Update the hash merkle root according to the modified transactions - block_template.block.header.hash_merkle_root = calc_hash_merkle_root(block_template.block.transactions.iter()); + let storage_mass_activated = block_template.block.header.daa_score > storage_mass_activation_daa_score; + block_template.block.header.hash_merkle_root = + calc_hash_merkle_root_with_options(block_template.block.transactions.iter(), storage_mass_activated); let new_timestamp = unix_now(); if new_timestamp > block_template.block.header.timestamp { // Only if new time stamp is later than current, update the header. Otherwise, diff --git a/mining/src/block_template/selector.rs b/mining/src/block_template/selector.rs index b65126caf..a55ecb93d 100644 --- a/mining/src/block_template/selector.rs +++ b/mining/src/block_template/selector.rs @@ -182,11 +182,7 @@ impl TransactionsSelector { self.total_mass += selected_tx.calculated_mass; self.total_fees += selected_tx.calculated_fee; - trace!( - "Adding tx {0} (fee per megagram: {1})", - selected_tx.tx.id(), - selected_tx.calculated_fee * 1_000_000 / selected_tx.calculated_mass - ); + trace!("Adding tx {0} (fee per gram: {1})", selected_tx.tx.id(), selected_tx.calculated_fee / selected_tx.calculated_mass); // Mark for deletion selected_candidate.is_marked_for_deletion = true; diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 574390122..4350401e6 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -39,6 +39,8 @@ pub struct MiningManager { block_template_cache: BlockTemplateCache, mempool: RwLock, counters: Arc, + // Storage mass hardfork DAA score + pub(crate) storage_mass_activation_daa_score: u64, } impl MiningManager { @@ -50,7 +52,7 @@ impl MiningManager { counters: Arc, ) -> Self { let config = Config::build_default(target_time_per_block, relay_non_std_transactions, max_block_mass); - Self::with_config(config, cache_lifetime, counters) + Self::with_config(config, cache_lifetime, counters, u64::MAX) } pub fn new_with_extended_config( @@ -60,17 +62,23 @@ impl MiningManager { ram_scale: f64, cache_lifetime: Option, counters: Arc, + storage_mass_activation_daa_score: u64, ) -> Self { let config = Config::build_default(target_time_per_block, relay_non_std_transactions, max_block_mass).apply_ram_scale(ram_scale); - Self::with_config(config, cache_lifetime, counters) + Self::with_config(config, cache_lifetime, counters, storage_mass_activation_daa_score) } - pub(crate) fn with_config(config: Config, cache_lifetime: Option, counters: Arc) -> Self { + pub(crate) fn with_config( + config: Config, + cache_lifetime: Option, + counters: Arc, + storage_mass_activation_daa_score: u64, + ) -> Self { let config = Arc::new(config); let mempool = RwLock::new(Mempool::new(config.clone(), counters.clone())); let block_template_cache = BlockTemplateCache::new(cache_lifetime); - Self { config, block_template_cache, mempool, counters } + Self { config, block_template_cache, mempool, counters, storage_mass_activation_daa_score } } pub fn get_block_template(&self, consensus: &dyn ConsensusApi, miner_data: &MinerData) -> MiningManagerResult { @@ -86,7 +94,12 @@ impl MiningManager { } // Miner data is new -- make the minimum changes required // Note the call returns a modified clone of the cached block template - let block_template = BlockTemplateBuilder::modify_block_template(consensus, miner_data, &immutable_template)?; + let block_template = BlockTemplateBuilder::modify_block_template( + consensus, + miner_data, + &immutable_template, + self.storage_mass_activation_daa_score, + )?; // No point in updating cache since we have no reason to believe this coinbase will be used more // than the previous one, and we want to maintain the original template caching time diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index 530117094..f919a3654 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -585,7 +585,7 @@ mod tests { // Limit the orphan pool to 2 transactions config.maximum_orphan_transaction_count = 2; let counters = Arc::new(MiningCounters::default()); - let mining_manager = MiningManager::with_config(config.clone(), None, counters); + let mining_manager = MiningManager::with_config(config.clone(), None, counters, u64::MAX); // Create pairs of transaction parent-and-child pairs according to the test vector let (parent_txs, child_txs) = create_arrays_of_parent_and_children_transactions(&consensus, tests.len()); @@ -853,7 +853,7 @@ mod tests { let expected_template = result.unwrap(); // Modify to miner_data_1 - let result = BlockTemplateBuilder::modify_block_template(consensus, &miner_data_1, &expected_template); + let result = BlockTemplateBuilder::modify_block_template(consensus, &miner_data_1, &expected_template, u64::MAX); assert!(result.is_ok(), "modify block template failed for miner data 1"); let mut modified_template = result.unwrap(); // Make sure timestamps are equal before comparing the hash @@ -872,7 +872,7 @@ mod tests { assert_ne!(expected_block.hash(), modified_block.hash(), "built and modified blocks should have different hashes"); // And modify back to miner_data_2 - let result = BlockTemplateBuilder::modify_block_template(consensus, &miner_data_2, &modified_template); + let result = BlockTemplateBuilder::modify_block_template(consensus, &miner_data_2, &modified_template, u64::MAX); assert!(result.is_ok(), "modify block template failed for miner data 2"); let mut modified_template_2 = result.unwrap(); // Make sure timestamps are equal before comparing the hash From 99eed7c2debe40836089ed51c1518971de0a43bf Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 14 Jul 2024 09:20:07 +0000 Subject: [PATCH 37/50] Squashed commit of the following: commit 6a56461ff1249e11e3124090253ca0bf128f7189 Author: Maxim <59533214+biryukovmaxim@users.noreply.github.com> Date: Tue Jun 18 18:09:29 2024 +0300 Implement PSKT(Partially Signed Kaspa Transaction) (#481) * initial support of pskt: supported roles: creator, constructor, updater, signer roles * add builder * handle combine errors * finalize * extractor * chore: typo * style: fmt * expose txid to global * chore: change version * feat: serde for optional bytes * feat: impl (de)serialization * style: fmt * add example, fixes * style: fmt * style: clippy * rollback unrelated changes * psbt -> pskt * refactor: avoid copy-paste by using recursion * docs: add description of roles commit a797e1ea324e67c509f70fd2b5f083abaf2cbd7c Author: George Bogodukhov Date: Mon Jun 17 15:50:16 2024 +1000 Add support for IP only for --rpclisten-borsh/json (#402) (#439) * Add support for IP only for --rpclisten-borsh/json * Fix cehck complaints --- Cargo.lock | 50 +++ Cargo.toml | 4 + consensus/core/src/hashing/sighash_type.rs | 3 +- consensus/core/src/tx.rs | 4 +- kaspad/src/args.rs | 2 +- rpc/wrpc/server/src/address.rs | 40 +- utils/src/lib.rs | 1 + utils/src/networking.rs | 12 +- utils/src/serde_bytes_optional.rs | 111 +++++ wallet/bip32/src/derivation_path.rs | 40 ++ wallet/pskt/Cargo.toml | 37 ++ wallet/pskt/examples/multisig.rs | 119 ++++++ wallet/pskt/src/error.rs | 15 + wallet/pskt/src/global.rs | 165 ++++++++ wallet/pskt/src/input.rs | 167 ++++++++ wallet/pskt/src/lib.rs | 458 +++++++++++++++++++++ wallet/pskt/src/output.rs | 82 ++++ wallet/pskt/src/role.rs | 27 ++ wallet/pskt/src/utils.rs | 29 ++ 19 files changed, 1359 insertions(+), 7 deletions(-) create mode 100644 utils/src/serde_bytes_optional.rs create mode 100644 wallet/pskt/Cargo.toml create mode 100644 wallet/pskt/examples/multisig.rs create mode 100644 wallet/pskt/src/error.rs create mode 100644 wallet/pskt/src/global.rs create mode 100644 wallet/pskt/src/input.rs create mode 100644 wallet/pskt/src/lib.rs create mode 100644 wallet/pskt/src/output.rs create mode 100644 wallet/pskt/src/role.rs create mode 100644 wallet/pskt/src/utils.rs diff --git a/Cargo.lock b/Cargo.lock index 76cbb0a89..b70409bb7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1383,6 +1383,37 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_builder" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0350b5cb0331628a5916d6c5c0b72e97393b8b6b03b47a9284f4e7f5a405ffd7" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d48cda787f839151732d396ac69e3473923d54312c070ee21e9effcaa8ca0b1d" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.60", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" +dependencies = [ + "derive_builder_core", + "syn 2.0.60", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -3587,6 +3618,25 @@ dependencies = [ "xxhash-rust", ] +[[package]] +name = "kaspa-wallet-pskt" +version = "0.14.2" +dependencies = [ + "derive_builder", + "kaspa-bip32", + "kaspa-consensus-client", + "kaspa-consensus-core", + "kaspa-txscript", + "kaspa-txscript-errors", + "kaspa-utils", + "secp256k1", + "serde", + "serde-value", + "serde_json", + "serde_repr", + "thiserror", +] + [[package]] name = "kaspa-wasm" version = "0.14.2" diff --git a/Cargo.toml b/Cargo.toml index 2a548c844..2409c7354 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ members = [ "wallet/wasm", "wallet/bip32", "wallet/keys", + "wallet/pskt", "consensus", "consensus/core", "consensus/client", @@ -123,6 +124,7 @@ kaspa-utxoindex = { version = "0.14.2", path = "indexes/utxoindex" } kaspa-wallet = { version = "0.14.2", path = "wallet/native" } kaspa-wallet-cli-wasm = { version = "0.14.2", path = "wallet/wasm" } kaspa-wallet-keys = { version = "0.14.2", path = "wallet/keys" } +kaspa-wallet-pskt = { version = "0.14.1", path = "wallet/pskt" } kaspa-wallet-core = { version = "0.14.2", path = "wallet/core" } kaspa-wallet-macros = { version = "0.14.2", path = "wallet/macros" } kaspa-wasm = { version = "0.14.2", path = "wasm" } @@ -162,6 +164,7 @@ ctrlc = "3.4.1" crypto_box = { version = "0.9.1", features = ["chacha20"] } dashmap = "5.5.3" derivative = "2.2.0" +derive_builder = "0.20.0" derive_more = "0.99.17" dhat = "0.3.2" dirs = "5.0.1" @@ -228,6 +231,7 @@ serde = { version = "1.0.190", features = ["derive", "rc"] } serde_bytes = "0.11.12" serde_json = "1.0.107" serde_repr = "0.1.18" +serde-value = "0.7.0" serde-wasm-bindgen = "0.6.1" sha1 = "0.10.6" sha2 = "0.10.8" diff --git a/consensus/core/src/hashing/sighash_type.rs b/consensus/core/src/hashing/sighash_type.rs index 76d772f0d..a80091bba 100644 --- a/consensus/core/src/hashing/sighash_type.rs +++ b/consensus/core/src/hashing/sighash_type.rs @@ -1,3 +1,4 @@ +use serde::{Deserialize, Serialize}; use wasm_bindgen::prelude::*; pub const SIG_HASH_ALL: SigHashType = SigHashType(0b00000001); @@ -18,7 +19,7 @@ const ALLOWED_SIG_HASH_TYPES_VALUES: [u8; 6] = [ SIG_HASH_SINGLE.0 | SIG_HASH_ANY_ONE_CAN_PAY.0, ]; -#[derive(Copy, Clone)] +#[derive(Debug, Copy, Clone, Serialize, Deserialize)] #[wasm_bindgen] pub struct SigHashType(pub(crate) u8); diff --git a/consensus/core/src/tx.rs b/consensus/core/src/tx.rs index 137633701..c2d3ba2e0 100644 --- a/consensus/core/src/tx.rs +++ b/consensus/core/src/tx.rs @@ -29,7 +29,7 @@ pub type TransactionId = kaspa_hashes::Hash; /// score of the block that accepts the tx, its public key script, and how /// much it pays. /// @category Consensus -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] #[wasm_bindgen(inspectable, js_name = TransactionUtxoEntry)] pub struct UtxoEntry { @@ -53,7 +53,7 @@ impl MemSizeEstimator for UtxoEntry {} pub type TransactionIndexType = u32; /// Represents a Kaspa transaction outpoint -#[derive(Eq, Hash, PartialEq, Debug, Copy, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Eq, Default, Hash, PartialEq, Debug, Copy, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] pub struct TransactionOutpoint { #[serde(with = "serde_bytes_fixed_ref")] diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index e11672915..2774269d3 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -366,7 +366,7 @@ Setting to 0 prevents the preallocation and sets the maximum to {}, leading to 0 .long("ram-scale") .require_equals(true) .value_parser(clap::value_parser!(f64)) - .help("Apply a scale factor to memory allocation bounds. Nodes with limited RAM (~4-8GB) should set this to ~0.3-0.5 respectively. Nodes with + .help("Apply a scale factor to memory allocation bounds. Nodes with limited RAM (~4-8GB) should set this to ~0.3-0.5 respectively. Nodes with a large RAM (~64GB) can set this value to ~3.0-4.0 and gain superior performance especially for syncing peers faster"), ) ; diff --git a/rpc/wrpc/server/src/address.rs b/rpc/wrpc/server/src/address.rs index 7dac4d75d..81ccabe5d 100644 --- a/rpc/wrpc/server/src/address.rs +++ b/rpc/wrpc/server/src/address.rs @@ -29,7 +29,17 @@ impl WrpcNetAddress { }; format!("0.0.0.0:{port}").parse().unwrap() } - WrpcNetAddress::Custom(address) => *address, + WrpcNetAddress::Custom(address) => { + if address.port_not_specified() { + let port = match encoding { + WrpcEncoding::Borsh => network_type.default_borsh_rpc_port(), + WrpcEncoding::SerdeJson => network_type.default_json_rpc_port(), + }; + address.with_port(port) + } else { + *address + } + } } } } @@ -63,3 +73,31 @@ impl TryFrom for WrpcNetAddress { WrpcNetAddress::from_str(&s) } } + +#[cfg(test)] +mod tests { + use super::*; + use kaspa_utils::networking::IpAddress; + + #[test] + fn test_wrpc_net_address_from_str() { + // Addresses + let port: u16 = 8080; + let addr = format!("1.2.3.4:{port}").parse::().unwrap(); + let addr_without_port = "1.2.3.4".parse::().unwrap(); + let ip_addr = "1.2.3.4".parse::().unwrap(); + // Test + for schema in WrpcEncoding::iter() { + for network in NetworkType::iter() { + let expected_port = match schema { + WrpcEncoding::Borsh => Some(network.default_borsh_rpc_port()), + WrpcEncoding::SerdeJson => Some(network.default_json_rpc_port()), + }; + // Custom address with port + assert_eq!(addr.to_address(&network, schema), ContextualNetAddress::new(ip_addr, Some(port))); + // Custom address without port + assert_eq!(addr_without_port.to_address(&network, schema), ContextualNetAddress::new(ip_addr, expected_port)) + } + } + } +} diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 956e3a2b9..bd3143719 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -67,6 +67,7 @@ pub mod as_slice; /// assert_eq!(test_struct, from_json); /// ``` pub mod serde_bytes; +pub mod serde_bytes_optional; /// # Examples /// diff --git a/utils/src/networking.rs b/utils/src/networking.rs index bb38b4d04..ebd72b259 100644 --- a/utils/src/networking.rs +++ b/utils/src/networking.rs @@ -34,7 +34,7 @@ const TS_IP_ADDRESS: &'static str = r#" /// A bucket based on an ip's prefix bytes. /// for ipv4 it consists of 6 leading zero bytes, and the first two octets, /// for ipv6 it consists of the first 8 octets, -/// encoded into a big endian u64. +/// encoded into a big endian u64. #[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)] pub struct PrefixBucket(u64); @@ -271,7 +271,7 @@ pub struct ContextualNetAddress { } impl ContextualNetAddress { - fn new(ip: IpAddress, port: Option) -> Self { + pub fn new(ip: IpAddress, port: Option) -> Self { Self { ip, port } } @@ -286,6 +286,14 @@ impl ContextualNetAddress { pub fn loopback() -> Self { Self { ip: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).into(), port: None } } + + pub fn port_not_specified(&self) -> bool { + self.port.is_none() + } + + pub fn with_port(&self, port: u16) -> Self { + Self { ip: self.ip, port: Some(port) } + } } impl From for ContextualNetAddress { diff --git a/utils/src/serde_bytes_optional.rs b/utils/src/serde_bytes_optional.rs new file mode 100644 index 000000000..308737667 --- /dev/null +++ b/utils/src/serde_bytes_optional.rs @@ -0,0 +1,111 @@ +pub use de::Deserialize; +pub use ser::Serialize; + +pub fn serialize(bytes: &T, serializer: S) -> Result +where + T: ?Sized + Serialize, + S: serde::Serializer, +{ + Serialize::serialize(bytes, serializer) +} + +pub fn deserialize<'de, T, D>(deserializer: D) -> Result +where + T: Deserialize<'de>, + D: serde::Deserializer<'de>, +{ + Deserialize::deserialize(deserializer) +} + +mod de { + use std::fmt::Display; + + pub trait Deserialize<'de>: Sized { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>; + } + + impl<'de, T: crate::serde_bytes::Deserialize<'de>> Deserialize<'de> for Option + where + >::Error: Display, + { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + struct OptionalVisitor { + out: std::marker::PhantomData, + } + + impl<'de, T> serde::de::Visitor<'de> for OptionalVisitor + where + T: crate::serde_bytes::Deserialize<'de>, + { + type Value = Option; + + fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.write_str("optional string, str or slice, vec of bytes") + } + + fn visit_unit(self) -> Result { + Ok(None) + } + + fn visit_none(self) -> Result { + Ok(None) + } + + fn visit_some(self, deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + T::deserialize(deserializer).map(Some) + } + } + + let visitor = OptionalVisitor { out: std::marker::PhantomData }; + deserializer.deserialize_option(visitor) + } + } +} + +mod ser { + use serde::Serializer; + + pub trait Serialize { + #[allow(missing_docs)] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer; + } + + impl Serialize for Option + where + T: crate::serde_bytes::Serialize + std::convert::AsRef<[u8]>, + { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + struct AsBytes(T); + + impl serde::Serialize for AsBytes + where + T: crate::serde_bytes::Serialize, + { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + crate::serde_bytes::Serialize::serialize(&self.0, serializer) + } + } + + match self { + Some(b) => serializer.serialize_some(&AsBytes(b)), + None => serializer.serialize_none(), + } + } + } +} diff --git a/wallet/bip32/src/derivation_path.rs b/wallet/bip32/src/derivation_path.rs index 414bf2bf9..6ef47703c 100644 --- a/wallet/bip32/src/derivation_path.rs +++ b/wallet/bip32/src/derivation_path.rs @@ -6,6 +6,7 @@ use core::{ fmt::{self, Display}, str::FromStr, }; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; /// Prefix for all derivation paths. const PREFIX: &str = "m"; @@ -16,6 +17,45 @@ pub struct DerivationPath { path: Vec, } +impl<'de> Deserialize<'de> for DerivationPath { + fn deserialize(deserializer: D) -> std::result::Result + where + D: Deserializer<'de>, + { + struct DerivationPathVisitor; + impl<'de> de::Visitor<'de> for DerivationPathVisitor { + type Value = DerivationPath; + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("a string containing list of permissions separated by a '+'") + } + + fn visit_str(self, value: &str) -> std::result::Result + where + E: de::Error, + { + DerivationPath::from_str(value).map_err(|err| de::Error::custom(err.to_string())) + } + fn visit_borrowed_str(self, v: &'de str) -> std::result::Result + where + E: de::Error, + { + DerivationPath::from_str(v).map_err(|err| de::Error::custom(err.to_string())) + } + } + + deserializer.deserialize_str(DerivationPathVisitor) + } +} + +impl Serialize for DerivationPath { + fn serialize(&self, serializer: S) -> std::result::Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + impl DerivationPath { /// Iterate over the [`ChildNumber`] values in this derivation path. pub fn iter(&self) -> impl Iterator + '_ { diff --git a/wallet/pskt/Cargo.toml b/wallet/pskt/Cargo.toml new file mode 100644 index 000000000..f2d82cf07 --- /dev/null +++ b/wallet/pskt/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "kaspa-wallet-pskt" +keywords = ["kaspa", "wallet", "pskt", "psbt", "bip-370"] +description = "Partially Signed Kaspa Transaction" +categories = ["cryptography::cryptocurrencies"] +rust-version.workspace = true +version.workspace = true +authors.workspace = true +license.workspace = true +repository.workspace = true +edition.workspace = true +include.workspace = true + +[lib] +crate-type = ["cdylib", "lib"] + +[features] +wasm32-sdk = ["kaspa-consensus-client/wasm32-sdk"] +wasm32-types = ["kaspa-consensus-client/wasm32-types"] + +[dependencies] +kaspa-bip32.workspace = true +kaspa-consensus-client.workspace = true +kaspa-consensus-core.workspace = true +kaspa-txscript-errors.workspace = true +kaspa-txscript.workspace = true +kaspa-utils.workspace = true + +derive_builder.workspace = true +secp256k1.workspace = true +serde-value.workspace = true +serde.workspace = true +serde_repr.workspace = true +thiserror.workspace = true + +[dev-dependencies] +serde_json.workspace = true diff --git a/wallet/pskt/examples/multisig.rs b/wallet/pskt/examples/multisig.rs new file mode 100644 index 000000000..a34bef9b5 --- /dev/null +++ b/wallet/pskt/examples/multisig.rs @@ -0,0 +1,119 @@ +use kaspa_consensus_core::{ + hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + tx::{TransactionId, TransactionOutpoint, UtxoEntry}, +}; +use kaspa_txscript::{multisig_redeem_script, opcodes::codes::OpData65, pay_to_script_hash_script, script_builder::ScriptBuilder}; +use kaspa_wallet_pskt::{Combiner, Creator, Extractor, Finalizer, Inner, InputBuilder, SignInputOk, Signature, Signer, Updater, PSKT}; +use secp256k1::{rand::thread_rng, Keypair}; +use std::{iter, str::FromStr}; + +fn main() { + let kps = [Keypair::new(secp256k1::SECP256K1, &mut thread_rng()), Keypair::new(secp256k1::SECP256K1, &mut thread_rng())]; + let redeem_script = multisig_redeem_script(kps.iter().map(|pk| pk.x_only_public_key().0.serialize()), 2).unwrap(); + // Create the PSKT. + let created = PSKT::::default().inputs_modifiable().outputs_modifiable(); + let ser = serde_json::to_string_pretty(&created).expect("Failed to serialize after creation"); + println!("Serialized after creation: {}", ser); + + // The first constructor entity receives the PSKT and adds an input. + let pskt: PSKT = serde_json::from_str(&ser).expect("Failed to deserialize"); + // let in_0 = dummy_out_point(); + let input_0 = InputBuilder::default() + .utxo_entry(UtxoEntry { + amount: 12793000000000, + script_public_key: pay_to_script_hash_script(&redeem_script), + block_daa_score: 36151168, + is_coinbase: false, + }) + .previous_outpoint(TransactionOutpoint { + transaction_id: TransactionId::from_str("63020db736215f8b1105a9281f7bcbb6473d965ecc45bb2fb5da59bd35e6ff84").unwrap(), + index: 0, + }) + .sig_op_count(2) + .redeem_script(redeem_script) + .build() + .unwrap(); + let pskt_in0 = pskt.constructor().input(input_0); + let ser_in_0 = serde_json::to_string_pretty(&pskt_in0).expect("Failed to serialize after adding first input"); + println!("Serialized after adding first input: {}", ser_in_0); + + let combiner_pskt: PSKT = serde_json::from_str(&ser).expect("Failed to deserialize"); + let combined_pskt = (combiner_pskt + pskt_in0).unwrap(); + let ser_combined = serde_json::to_string_pretty(&combined_pskt).expect("Failed to serialize after adding output"); + println!("Serialized after combining: {}", ser_combined); + + // The PSKT is now ready for handling with the updater role. + let updater_pskt: PSKT = serde_json::from_str(&ser_combined).expect("Failed to deserialize"); + let updater_pskt = updater_pskt.set_sequence(u64::MAX, 0).expect("Failed to set sequence"); + let ser_updated = serde_json::to_string_pretty(&updater_pskt).expect("Failed to serialize after setting sequence"); + println!("Serialized after setting sequence: {}", ser_updated); + + let signer_pskt: PSKT = serde_json::from_str(&ser_updated).expect("Failed to deserialize"); + let mut reused_values = SigHashReusedValues::new(); + let mut sign = |signer_pskt: PSKT, kp: &Keypair| { + signer_pskt + .pass_signature_sync(|tx, sighash| -> Result, String> { + let tx = dbg!(tx); + tx.tx + .inputs + .iter() + .enumerate() + .map(|(idx, _input)| { + let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &mut reused_values); + let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); + Ok(SignInputOk { + signature: Signature::Schnorr(kp.sign_schnorr(msg)), + pub_key: kp.public_key(), + key_source: None, + }) + }) + .collect() + }) + .unwrap() + }; + let signed_0 = sign(signer_pskt.clone(), &kps[0]); + let signed_1 = sign(signer_pskt, &kps[1]); + let combiner_pskt: PSKT = serde_json::from_str(&ser_updated).expect("Failed to deserialize"); + let combined_signed = (combiner_pskt + signed_0).and_then(|combined| combined + signed_1).unwrap(); + let ser_combined_signed = serde_json::to_string_pretty(&combined_signed).expect("Failed to serialize after combining signed"); + println!("Combined Signed: {}", ser_combined_signed); + let pskt_finalizer: PSKT = serde_json::from_str(&ser_combined_signed).expect("Failed to deserialize"); + let pskt_finalizer = pskt_finalizer + .finalize_sync(|inner: &Inner| -> Result>, String> { + Ok(inner + .inputs + .iter() + .map(|input| -> Vec { + // todo actually required count can be retrieved from redeem_script, sigs can be taken from partial sigs according to required count + // considering xpubs sorted order + + let signatures: Vec<_> = kps + .iter() + .flat_map(|kp| { + let sig = input.partial_sigs.get(&kp.public_key()).unwrap().into_bytes(); + iter::once(OpData65).chain(sig).chain([input.sighash_type.to_u8()]) + }) + .collect(); + signatures + .into_iter() + .chain( + ScriptBuilder::new() + .add_data(input.redeem_script.as_ref().unwrap().as_slice()) + .unwrap() + .drain() + .iter() + .cloned(), + ) + .collect() + }) + .collect()) + }) + .unwrap(); + let ser_finalized = serde_json::to_string_pretty(&pskt_finalizer).expect("Failed to serialize after finalizing"); + println!("Finalized: {}", ser_finalized); + + let extractor_pskt: PSKT = serde_json::from_str(&ser_finalized).expect("Failed to deserialize"); + let tx = extractor_pskt.extract_tx().unwrap()(10).0; + let ser_tx = serde_json::to_string_pretty(&tx).unwrap(); + println!("Tx: {}", ser_tx); +} diff --git a/wallet/pskt/src/error.rs b/wallet/pskt/src/error.rs new file mode 100644 index 000000000..504119086 --- /dev/null +++ b/wallet/pskt/src/error.rs @@ -0,0 +1,15 @@ +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + ConstructorError(#[from] ConstructorError), + #[error("OutputNotModifiable")] + OutOfBounds, +} + +#[derive(thiserror::Error, Debug)] +pub enum ConstructorError { + #[error("InputNotModifiable")] + InputNotModifiable, + #[error("OutputNotModifiable")] + OutputNotModifiable, +} diff --git a/wallet/pskt/src/global.rs b/wallet/pskt/src/global.rs new file mode 100644 index 000000000..8e16b832b --- /dev/null +++ b/wallet/pskt/src/global.rs @@ -0,0 +1,165 @@ +use crate::{utils::combine_if_no_conflicts, KeySource, Version}; +use derive_builder::Builder; +use kaspa_consensus_core::tx::TransactionId; +use serde::{Deserialize, Serialize}; +use std::{ + collections::{btree_map, BTreeMap}, + ops::Add, +}; + +type Xpub = kaspa_bip32::ExtendedPublicKey; + +#[derive(Debug, Clone, Builder, Serialize, Deserialize)] +#[builder(default)] +pub struct Global { + /// The version number of this PSKT. + pub version: Version, + /// The version number of the transaction being built. + pub tx_version: u16, + #[builder(setter(strip_option))] + /// The transaction locktime to use if no inputs specify a required locktime. + pub fallback_lock_time: Option, + + pub inputs_modifiable: bool, + pub outputs_modifiable: bool, + + /// The number of inputs in this PSKT. + pub input_count: usize, + /// The number of outputs in this PSKT. + pub output_count: usize, + /// A map from xpub to the used key fingerprint and derivation path as defined by BIP 32. + pub xpubs: BTreeMap, + pub id: Option, + /// Proprietary key-value pairs for this output. + pub proprietaries: BTreeMap, + /// Unknown key-value pairs for this output. + pub unknowns: BTreeMap, +} + +impl Add for Global { + type Output = Result; + + fn add(mut self, rhs: Self) -> Self::Output { + if self.version != rhs.version { + return Err(CombineError::VersionMismatch { this: self.version, that: rhs.version }); + } + if self.tx_version != rhs.tx_version { + return Err(CombineError::TxVersionMismatch { this: self.tx_version, that: rhs.tx_version }); + } + self.fallback_lock_time = match (self.fallback_lock_time, rhs.fallback_lock_time) { + (Some(lhs), Some(rhs)) if lhs != rhs => return Err(CombineError::LockTimeMismatch { this: lhs, that: rhs }), + (Some(v), _) | (_, Some(v)) => Some(v), + _ => None, + }; + // todo discussable, maybe throw error + self.inputs_modifiable &= rhs.inputs_modifiable; + self.outputs_modifiable &= rhs.outputs_modifiable; + self.input_count = self.input_count.max(rhs.input_count); + self.output_count = self.output_count.max(rhs.output_count); + // BIP 174: The Combiner must remove any duplicate key-value pairs, in accordance with + // the specification. It can pick arbitrarily when conflicts occur. + + // Merging xpubs + for (xpub, KeySource { key_fingerprint: fingerprint1, derivation_path: derivation1 }) in rhs.xpubs { + match self.xpubs.entry(xpub) { + btree_map::Entry::Vacant(entry) => { + entry.insert(KeySource::new(fingerprint1, derivation1)); + } + btree_map::Entry::Occupied(mut entry) => { + // Here in case of the conflict we select the version with algorithm: + // 1) if everything is equal we do nothing + // 2) report an error if + // - derivation paths are equal and fingerprints are not + // - derivation paths are of the same length, but not equal + // - derivation paths has different length, but the shorter one + // is not the strict suffix of the longer one + // 3) choose longest derivation otherwise + + let KeySource { key_fingerprint: fingerprint2, derivation_path: derivation2 } = entry.get().clone(); + + if (derivation1 == derivation2 && fingerprint1 == fingerprint2) + || (derivation1.len() < derivation2.len() + && derivation1.as_ref() == &derivation2.as_ref()[derivation2.len() - derivation1.len()..]) + { + continue; + } else if derivation2.as_ref() == &derivation1.as_ref()[derivation1.len() - derivation2.len()..] { + entry.insert(KeySource::new(fingerprint1, derivation1)); + continue; + } + return Err(CombineError::InconsistentKeySources(entry.key().clone())); + } + } + } + self.id = match (self.id, rhs.id) { + (Some(lhs), Some(rhs)) if lhs != rhs => return Err(CombineError::TransactionIdMismatch { this: lhs, that: rhs }), + (Some(v), _) | (_, Some(v)) => Some(v), + _ => None, + }; + + self.proprietaries = + combine_if_no_conflicts(self.proprietaries, rhs.proprietaries).map_err(CombineError::NotCompatibleProprietary)?; + self.unknowns = combine_if_no_conflicts(self.unknowns, rhs.unknowns).map_err(CombineError::NotCompatibleUnknownField)?; + Ok(self) + } +} + +impl Default for Global { + fn default() -> Self { + Global { + version: Version::Zero, + tx_version: kaspa_consensus_core::constants::TX_VERSION, + fallback_lock_time: None, + inputs_modifiable: false, + outputs_modifiable: false, + input_count: 0, + output_count: 0, + xpubs: Default::default(), + id: None, + proprietaries: Default::default(), + unknowns: Default::default(), + } + } +} + +/// Error combining two global maps. +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum CombineError { + #[error("The version numbers are not the same")] + /// The version numbers are not the same. + VersionMismatch { + /// Attempted to combine a PSKT with `this` version. + this: Version, + /// Into a PSKT with `that` version. + that: Version, + }, + #[error("The transaction version numbers are not the same")] + TxVersionMismatch { + /// Attempted to combine a PSKT with `this` tx version. + this: u16, + /// Into a PSKT with `that` tx version. + that: u16, + }, + #[error("The transaction lock times are not the same")] + LockTimeMismatch { + /// Attempted to combine a PSKT with `this` lock times. + this: u64, + /// Into a PSKT with `that` lock times. + that: u64, + }, + #[error("The transaction ids are not the same")] + TransactionIdMismatch { + /// Attempted to combine a PSKT with `this` tx id. + this: TransactionId, + /// Into a PSKT with `that` tx id. + that: TransactionId, + }, + + #[error("combining PSKT, key-source conflict for xpub {0}")] + /// Xpubs have inconsistent key sources. + InconsistentKeySources(Xpub), + + #[error("Two different unknown field values")] + NotCompatibleUnknownField(crate::utils::Error), + #[error("Two different proprietary values")] + NotCompatibleProprietary(crate::utils::Error), +} diff --git a/wallet/pskt/src/input.rs b/wallet/pskt/src/input.rs new file mode 100644 index 000000000..4c25600a1 --- /dev/null +++ b/wallet/pskt/src/input.rs @@ -0,0 +1,167 @@ +use crate::{ + utils::{combine_if_no_conflicts, Error as CombineMapErr}, + KeySource, PartialSigs, +}; +use derive_builder::Builder; +use kaspa_consensus_core::{ + hashing::sighash_type::{SigHashType, SIG_HASH_ALL}, + tx::{TransactionId, TransactionOutpoint, UtxoEntry}, +}; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, marker::PhantomData, ops::Add}; + +// todo add unknown field? combine them by deduplicating, if there are different values - return error? +#[derive(Builder, Serialize, Deserialize, Debug, Clone)] +#[builder(default)] +#[builder(setter(skip))] +pub struct Input { + #[builder(setter(strip_option))] + pub utxo_entry: Option, + #[builder(setter)] + pub previous_outpoint: TransactionOutpoint, + /// The sequence number of this input. + /// + /// If omitted, assumed to be the final sequence number + pub sequence: Option, + #[builder(setter)] + /// The minimum Unix timestamp that this input requires to be set as the transaction's lock time. + pub min_time: Option, + /// A map from public keys to their corresponding signature as would be + /// pushed to the stack from a scriptSig. + pub partial_sigs: PartialSigs, + #[builder(setter)] + /// The sighash type to be used for this input. Signatures for this input + /// must use the sighash type. + pub sighash_type: SigHashType, + #[serde(with = "kaspa_utils::serde_bytes_optional")] + #[builder(setter(strip_option))] + /// The redeem script for this input. + pub redeem_script: Option>, + #[builder(setter(strip_option))] + pub sig_op_count: Option, + /// A map from public keys needed to sign this input to their corresponding + /// master key fingerprints and derivation paths. + pub bip32_derivations: BTreeMap>, + #[serde(with = "kaspa_utils::serde_bytes_optional")] + /// The finalized, fully-constructed scriptSig with signatures and any other + /// scripts necessary for this input to pass validation. + pub final_script_sig: Option>, + #[serde(skip_serializing, default)] + hidden: PhantomData<()>, // prevents manual filling of fields + #[builder(setter)] + /// Proprietary key-value pairs for this output. + pub proprietaries: BTreeMap, + #[serde(flatten)] + #[builder(setter)] + /// Unknown key-value pairs for this output. + pub unknowns: BTreeMap, +} + +impl Default for Input { + fn default() -> Self { + Self { + utxo_entry: Default::default(), + previous_outpoint: Default::default(), + sequence: Default::default(), + min_time: Default::default(), + partial_sigs: Default::default(), + sighash_type: SIG_HASH_ALL, + redeem_script: Default::default(), + sig_op_count: Default::default(), + bip32_derivations: Default::default(), + final_script_sig: Default::default(), + hidden: Default::default(), + proprietaries: Default::default(), + unknowns: Default::default(), + } + } +} + +impl Add for Input { + type Output = Result; + + fn add(mut self, rhs: Self) -> Self::Output { + if self.previous_outpoint.transaction_id != rhs.previous_outpoint.transaction_id { + return Err(CombineError::PreviousTxidMismatch { + this: self.previous_outpoint.transaction_id, + that: rhs.previous_outpoint.transaction_id, + }); + } + + if self.previous_outpoint.index != rhs.previous_outpoint.index { + return Err(CombineError::SpentOutputIndexMismatch { + this: self.previous_outpoint.index, + that: rhs.previous_outpoint.index, + }); + } + self.utxo_entry = match (self.utxo_entry.take(), rhs.utxo_entry) { + (None, None) => None, + (Some(utxo), None) | (None, Some(utxo)) => Some(utxo), + (Some(left), Some(right)) if left == right => Some(left), + (Some(left), Some(right)) => return Err(CombineError::NotCompatibleUtxos { this: left, that: right }), + }; + + // todo discuss merging. if sequence is equal - combine, otherwise use input which has bigger sequence number as is + self.sequence = self.sequence.max(rhs.sequence); + self.min_time = self.min_time.max(rhs.min_time); + self.partial_sigs.extend(rhs.partial_sigs); + // todo combine sighash? or always use sighash all since all signatures must be passed after completion of construction step + // self.sighash_type + + self.redeem_script = match (self.redeem_script.take(), rhs.redeem_script) { + (None, None) => None, + (Some(script), None) | (None, Some(script)) => Some(script), + (Some(script_left), Some(script_right)) if script_left == script_right => Some(script_left), + (Some(script_left), Some(script_right)) => { + return Err(CombineError::NotCompatibleRedeemScripts { this: script_left, that: script_right }) + } + }; + + // todo Does Combiner allowed to change final script sig?? + self.final_script_sig = match (self.final_script_sig.take(), rhs.final_script_sig) { + (None, None) => None, + (Some(script), None) | (None, Some(script)) => Some(script), + (Some(script_left), Some(script_right)) if script_left == script_right => Some(script_left), + (Some(script_left), Some(script_right)) => { + return Err(CombineError::NotCompatibleRedeemScripts { this: script_left, that: script_right }) + } + }; + + self.bip32_derivations = combine_if_no_conflicts(self.bip32_derivations, rhs.bip32_derivations)?; + self.proprietaries = + combine_if_no_conflicts(self.proprietaries, rhs.proprietaries).map_err(CombineError::NotCompatibleProprietary)?; + self.unknowns = combine_if_no_conflicts(self.unknowns, rhs.unknowns).map_err(CombineError::NotCompatibleUnknownField)?; + + Ok(self) + } +} + +/// Error combining two input maps. +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum CombineError { + #[error("The previous txids are not the same")] + PreviousTxidMismatch { + /// Attempted to combine a PSKT with `this` previous txid. + this: TransactionId, + /// Into a PSKT with `that` previous txid. + that: TransactionId, + }, + #[error("The spent output indexes are not the same")] + SpentOutputIndexMismatch { + /// Attempted to combine a PSKT with `this` spent output index. + this: u32, + /// Into a PSKT with `that` spent output index. + that: u32, + }, + #[error("Two different redeem scripts detected")] + NotCompatibleRedeemScripts { this: Vec, that: Vec }, + #[error("Two different utxos detected")] + NotCompatibleUtxos { this: UtxoEntry, that: UtxoEntry }, + + #[error("Two different derivations for the same key")] + NotCompatibleBip32Derivations(#[from] CombineMapErr>), + #[error("Two different unknown field values")] + NotCompatibleUnknownField(CombineMapErr), + #[error("Two different proprietary values")] + NotCompatibleProprietary(CombineMapErr), +} diff --git a/wallet/pskt/src/lib.rs b/wallet/pskt/src/lib.rs new file mode 100644 index 000000000..e26d5c9ea --- /dev/null +++ b/wallet/pskt/src/lib.rs @@ -0,0 +1,458 @@ +use kaspa_bip32::{secp256k1, DerivationPath, KeyFingerprint}; +use serde::{Deserialize, Serialize}; +use serde_repr::{Deserialize_repr, Serialize_repr}; +use std::{collections::BTreeMap, fmt::Display, fmt::Formatter, future::Future, marker::PhantomData, ops::Deref}; + +mod error; +mod global; +mod input; + +mod output; + +mod role; +mod utils; + +pub use error::Error; +pub use global::{Global, GlobalBuilder}; +pub use input::{Input, InputBuilder}; +use kaspa_consensus_core::tx::UtxoEntry; +use kaspa_consensus_core::{ + hashing::{sighash::SigHashReusedValues, sighash_type::SigHashType}, + subnets::SUBNETWORK_ID_NATIVE, + tx::{MutableTransaction, SignableTransaction, Transaction, TransactionId, TransactionInput, TransactionOutput}, +}; +use kaspa_txscript::{caches::Cache, TxScriptEngine}; +pub use output::{Output, OutputBuilder}; +pub use role::{Combiner, Constructor, Creator, Extractor, Finalizer, Signer, Updater}; + +#[derive(Debug, Default, Serialize, Deserialize, Clone)] +pub struct Inner { + /// The global map. + pub global: Global, + /// The corresponding key-value map for each input in the unsigned transaction. + pub inputs: Vec, + /// The corresponding key-value map for each output in the unsigned transaction. + pub outputs: Vec, +} + +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash, Serialize_repr, Deserialize_repr)] +#[repr(u8)] +pub enum Version { + #[default] + Zero = 0, +} + +impl Display for Version { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Version::Zero => write!(f, "{}", Version::Zero as u8), + } + } +} + +/// Full information on the used extended public key: fingerprint of the +/// master extended public key and a derivation path from it. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub struct KeySource { + #[serde(with = "kaspa_utils::serde_bytes_fixed")] + pub key_fingerprint: KeyFingerprint, + pub derivation_path: DerivationPath, +} + +impl KeySource { + pub fn new(key_fingerprint: KeyFingerprint, derivation_path: DerivationPath) -> Self { + Self { key_fingerprint, derivation_path } + } +} + +pub type PartialSigs = BTreeMap; + +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Copy, Clone)] +pub enum Signature { + ECDSA(secp256k1::ecdsa::Signature), + Schnorr(secp256k1::schnorr::Signature), +} + +impl Signature { + pub fn into_bytes(self) -> [u8; 64] { + match self { + Signature::ECDSA(s) => s.serialize_compact(), + Signature::Schnorr(s) => s.serialize(), + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct PSKT { + #[serde(flatten)] + inner_pskt: Inner, + #[serde(skip_serializing, default)] + role: PhantomData, +} + +impl Clone for PSKT { + fn clone(&self) -> Self { + PSKT { inner_pskt: self.inner_pskt.clone(), role: Default::default() } + } +} + +impl Deref for PSKT { + type Target = Inner; + + fn deref(&self) -> &Self::Target { + &self.inner_pskt + } +} + +impl PSKT { + fn unsigned_tx(&self) -> SignableTransaction { + let tx = Transaction::new( + self.global.tx_version, + self.inputs + .iter() + .map(|Input { previous_outpoint, sequence, sig_op_count, .. }| TransactionInput { + previous_outpoint: *previous_outpoint, + signature_script: vec![], + sequence: sequence.unwrap_or(u64::MAX), + sig_op_count: sig_op_count.unwrap_or(0), + }) + .collect(), + self.outputs + .iter() + .map(|Output { amount, script_public_key, .. }: &Output| TransactionOutput { + value: *amount, + script_public_key: script_public_key.clone(), + }) + .collect(), + self.determine_lock_time(), + SUBNETWORK_ID_NATIVE, + 0, + vec![], + ); + let entries = self.inputs.iter().filter_map(|Input { utxo_entry, .. }| utxo_entry.clone()).collect(); + SignableTransaction::with_entries(tx, entries) + } + + fn calculate_id_internal(&self) -> TransactionId { + self.unsigned_tx().tx.id() + } + + fn determine_lock_time(&self) -> u64 { + self.inputs.iter().map(|input: &Input| input.min_time).max().unwrap_or(self.global.fallback_lock_time).unwrap_or(0) + } +} + +impl Default for PSKT { + fn default() -> Self { + PSKT { inner_pskt: Default::default(), role: Default::default() } + } +} + +impl PSKT { + /// Sets the fallback lock time. + pub fn fallback_lock_time(mut self, fallback: u64) -> Self { + self.inner_pskt.global.fallback_lock_time = Some(fallback); + self + } + + // todo generic const + /// Sets the inputs modifiable bit in the transaction modifiable flags. + pub fn inputs_modifiable(mut self) -> Self { + self.inner_pskt.global.inputs_modifiable = true; + self + } + // todo generic const + /// Sets the outputs modifiable bit in the transaction modifiable flags. + pub fn outputs_modifiable(mut self) -> Self { + self.inner_pskt.global.outputs_modifiable = true; + self + } + + pub fn constructor(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } +} + +impl PSKT { + // todo generic const + /// Marks that the `PSKT` can not have any more inputs added to it. + pub fn no_more_inputs(mut self) -> Self { + self.inner_pskt.global.inputs_modifiable = false; + self + } + // todo generic const + /// Marks that the `PSKT` can not have any more outputs added to it. + pub fn no_more_outputs(mut self) -> Self { + self.inner_pskt.global.outputs_modifiable = false; + self + } + + /// Adds an input to the PSKT. + pub fn input(mut self, input: Input) -> Self { + self.inner_pskt.inputs.push(input); + self.inner_pskt.global.input_count += 1; + self + } + + /// Adds an output to the PSKT. + pub fn output(mut self, output: Output) -> Self { + self.inner_pskt.outputs.push(output); + self.inner_pskt.global.output_count += 1; + self + } + + /// Returns a PSKT [`Updater`] once construction is completed. + pub fn updater(self) -> PSKT { + let pskt = self.no_more_inputs().no_more_outputs(); + PSKT { inner_pskt: pskt.inner_pskt, role: Default::default() } + } + + pub fn signer(self) -> PSKT { + self.updater().signer() + } + + pub fn combiner(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } +} + +impl PSKT { + pub fn set_sequence(mut self, n: u64, input_index: usize) -> Result { + self.inner_pskt.inputs.get_mut(input_index).ok_or(Error::OutOfBounds)?.sequence = Some(n); + Ok(self) + } + + pub fn signer(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } + + pub fn combiner(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } +} + +impl PSKT { + // todo use iterator instead of vector + pub fn pass_signature_sync(mut self, sign_fn: SignFn) -> Result + where + E: Display, + SignFn: FnOnce(SignableTransaction, Vec) -> Result, E>, + { + let unsigned_tx = self.unsigned_tx(); + let sighashes = self.inputs.iter().map(|input| input.sighash_type).collect(); + self.inner_pskt.inputs.iter_mut().zip(sign_fn(unsigned_tx, sighashes)?).for_each( + |(input, SignInputOk { signature, pub_key, key_source })| { + input.bip32_derivations.insert(pub_key, key_source); + input.partial_sigs.insert(pub_key, signature); + }, + ); + + Ok(self) + } + // todo use iterator instead of vector + pub async fn pass_signature(mut self, sign_fn: SignFn) -> Result + where + E: Display, + Fut: Future, E>>, + SignFn: FnOnce(SignableTransaction, Vec) -> Fut, + { + let unsigned_tx = self.unsigned_tx(); + let sighashes = self.inputs.iter().map(|input| input.sighash_type).collect(); + self.inner_pskt.inputs.iter_mut().zip(sign_fn(unsigned_tx, sighashes).await?).for_each( + |(input, SignInputOk { signature, pub_key, key_source })| { + input.bip32_derivations.insert(pub_key, key_source); + input.partial_sigs.insert(pub_key, signature); + }, + ); + Ok(self) + } + + pub fn calculate_id(&self) -> TransactionId { + self.calculate_id_internal() + } + + pub fn finalizer(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } + + pub fn combiner(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SignInputOk { + pub signature: Signature, + pub pub_key: secp256k1::PublicKey, + pub key_source: Option, +} + +impl std::ops::Add> for PSKT { + type Output = Result; + + fn add(mut self, mut rhs: PSKT) -> Self::Output { + self.inner_pskt.global = (self.inner_pskt.global + rhs.inner_pskt.global)?; + macro_rules! combine { + ($left:expr, $right:expr, $err: ty) => { + if $left.len() > $right.len() { + $left.iter_mut().zip($right.iter_mut()).try_for_each(|(left, right)| -> Result<(), $err> { + *left = (std::mem::take(left) + std::mem::take(right))?; + Ok(()) + })?; + $left + } else { + $right.iter_mut().zip($left.iter_mut()).try_for_each(|(left, right)| -> Result<(), $err> { + *left = (std::mem::take(left) + std::mem::take(right))?; + Ok(()) + })?; + $right + } + }; + } + // todo add sort to build deterministic combination + self.inner_pskt.inputs = combine!(self.inner_pskt.inputs, rhs.inner_pskt.inputs, input::CombineError); + self.inner_pskt.outputs = combine!(self.inner_pskt.outputs, rhs.inner_pskt.outputs, output::CombineError); + Ok(self) + } +} + +impl PSKT { + pub fn signer(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } + pub fn finalizer(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } +} + +impl PSKT { + pub fn finalize_sync( + self, + final_sig_fn: impl FnOnce(&Inner) -> Result>, E>, + ) -> Result> { + let sigs = final_sig_fn(&self); + self.finalize_internal(sigs) + } + + pub async fn finalize(self, final_sig_fn: F) -> Result> + where + E: Display, + F: FnOnce(&Inner) -> Fut, + Fut: Future>, E>>, + { + let sigs = final_sig_fn(&self).await; + self.finalize_internal(sigs) + } + + pub fn id(&self) -> Option { + self.global.id + } + + pub fn extractor(self) -> Result, TxNotFinalized> { + if self.global.id.is_none() { + Err(TxNotFinalized {}) + } else { + Ok(PSKT { inner_pskt: self.inner_pskt, role: Default::default() }) + } + } + + fn finalize_internal(mut self, sigs: Result>, E>) -> Result> { + let sigs = sigs?; + if sigs.len() != self.inputs.len() { + return Err(FinalizeError::WrongFinalizedSigsCount { expected: self.inputs.len(), actual: sigs.len() }); + } + self.inner_pskt.inputs.iter_mut().enumerate().zip(sigs).try_for_each(|((idx, input), sig)| { + if sig.is_empty() { + return Err(FinalizeError::EmptySignature(idx)); + } + input.sequence = Some(input.sequence.unwrap_or(u64::MAX)); // todo discussable + input.final_script_sig = Some(sig); + Ok(()) + })?; + self.inner_pskt.global.id = Some(self.calculate_id_internal()); + Ok(self) + } +} + +impl PSKT { + pub fn extract_tx_unchecked(self) -> Result (Transaction, Vec>), TxNotFinalized> { + let tx = self.unsigned_tx(); + let entries = tx.entries; + let mut tx = tx.tx; + tx.inputs.iter_mut().zip(self.inner_pskt.inputs).try_for_each(|(dest, src)| { + dest.signature_script = src.final_script_sig.ok_or(TxNotFinalized {})?; + Ok(()) + })?; + Ok(move |mass| { + tx.set_mass(mass); + (tx, entries) + }) + } + + pub fn extract_tx(self) -> Result (Transaction, Vec>), ExtractError> { + let (tx, entries) = self.extract_tx_unchecked()?(0); + + let tx = MutableTransaction::with_entries(tx, entries.into_iter().flatten().collect()); + use kaspa_consensus_core::tx::VerifiableTransaction; + { + let tx = tx.as_verifiable(); + let cache = Cache::new(10_000); + let mut reused_values = SigHashReusedValues::new(); + + tx.populated_inputs().enumerate().try_for_each(|(idx, (input, entry))| { + TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &mut reused_values, &cache)?.execute()?; + >::Ok(()) + })?; + } + let entries = tx.entries; + let tx = tx.tx; + let closure = move |mass| { + tx.set_mass(mass); + (tx, entries) + }; + Ok(closure) + } +} + +/// Error combining pskt. +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum CombineError { + #[error(transparent)] + Global(#[from] global::CombineError), + #[error(transparent)] + Inputs(#[from] input::CombineError), + #[error(transparent)] + Outputs(#[from] output::CombineError), +} + +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum FinalizeError { + #[error("Signatures count mismatch")] + WrongFinalizedSigsCount { expected: usize, actual: usize }, + #[error("Signatures at index: {0} is empty")] + EmptySignature(usize), + #[error(transparent)] + FinalaziCb(#[from] E), +} + +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum ExtractError { + #[error(transparent)] + TxScriptError(#[from] kaspa_txscript_errors::TxScriptError), + #[error(transparent)] + TxNotFinalized(#[from] TxNotFinalized), +} + +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +#[error("Transaction is not finalized")] +pub struct TxNotFinalized {} + +#[cfg(test)] +mod tests { + + // #[test] + // fn it_works() { + // let result = add(2, 2); + // assert_eq!(result, 4); + // } +} diff --git a/wallet/pskt/src/output.rs b/wallet/pskt/src/output.rs new file mode 100644 index 000000000..952b63d3f --- /dev/null +++ b/wallet/pskt/src/output.rs @@ -0,0 +1,82 @@ +use crate::utils::combine_if_no_conflicts; +use crate::KeySource; +use derive_builder::Builder; +use kaspa_consensus_core::tx::ScriptPublicKey; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, ops::Add}; + +#[derive(Builder, Default, Serialize, Deserialize, Clone, Debug)] +#[builder(default)] +pub struct Output { + /// The output's amount (serialized as sompi). + pub amount: u64, + /// The script for this output, also known as the scriptPubKey. + pub script_public_key: ScriptPublicKey, + #[builder(setter(strip_option))] + #[serde(with = "kaspa_utils::serde_bytes_optional")] + /// The redeem script for this output. + pub redeem_script: Option>, + /// A map from public keys needed to spend this output to their + /// corresponding master key fingerprints and derivation paths. + pub bip32_derivations: BTreeMap>, + /// Proprietary key-value pairs for this output. + pub proprietaries: BTreeMap, + #[serde(flatten)] + /// Unknown key-value pairs for this output. + pub unknowns: BTreeMap, +} + +impl Add for Output { + type Output = Result; + + fn add(mut self, rhs: Self) -> Self::Output { + if self.amount != rhs.amount { + return Err(CombineError::AmountMismatch { this: self.amount, that: rhs.amount }); + } + if self.script_public_key != rhs.script_public_key { + return Err(CombineError::ScriptPubkeyMismatch { this: self.script_public_key, that: rhs.script_public_key }); + } + self.redeem_script = match (self.redeem_script.take(), rhs.redeem_script) { + (None, None) => None, + (Some(script), None) | (None, Some(script)) => Some(script), + (Some(script_left), Some(script_right)) if script_left == script_right => Some(script_left), + (Some(script_left), Some(script_right)) => { + return Err(CombineError::NotCompatibleRedeemScripts { this: script_left, that: script_right }) + } + }; + self.bip32_derivations = combine_if_no_conflicts(self.bip32_derivations, rhs.bip32_derivations)?; + self.proprietaries = + combine_if_no_conflicts(self.proprietaries, rhs.proprietaries).map_err(CombineError::NotCompatibleProprietary)?; + self.unknowns = combine_if_no_conflicts(self.unknowns, rhs.unknowns).map_err(CombineError::NotCompatibleUnknownField)?; + + Ok(self) + } +} + +/// Error combining two output maps. +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum CombineError { + #[error("The amounts are not the same")] + AmountMismatch { + /// Attempted to combine a PSKT with `this` previous txid. + this: u64, + /// Into a PSKT with `that` previous txid. + that: u64, + }, + #[error("The script_pubkeys are not the same")] + ScriptPubkeyMismatch { + /// Attempted to combine a PSKT with `this` script_pubkey. + this: ScriptPublicKey, + /// Into a PSKT with `that` script_pubkey. + that: ScriptPublicKey, + }, + #[error("Two different redeem scripts detected")] + NotCompatibleRedeemScripts { this: Vec, that: Vec }, + + #[error("Two different derivations for the same key")] + NotCompatibleBip32Derivations(#[from] crate::utils::Error>), + #[error("Two different unknown field values")] + NotCompatibleUnknownField(crate::utils::Error), + #[error("Two different proprietary values")] + NotCompatibleProprietary(crate::utils::Error), +} diff --git a/wallet/pskt/src/role.rs b/wallet/pskt/src/role.rs new file mode 100644 index 000000000..84f55bb04 --- /dev/null +++ b/wallet/pskt/src/role.rs @@ -0,0 +1,27 @@ +/// Initializes the PSKT with 0 inputs and 0 outputs. +/// Reference: [BIP-370: Creator](https://github.com/bitcoin/bips/blob/master/bip-0370.mediawiki#creator) +pub enum Creator {} + +/// Adds inputs and outputs to the PSKT. +/// Reference: [BIP-370: Constructor](https://github.com/bitcoin/bips/blob/master/bip-0370.mediawiki#constructor) +pub enum Constructor {} + +/// Can set the sequence number. +/// Reference: [BIP-370: Updater](https://github.com/bitcoin/bips/blob/master/bip-0370.mediawiki#updater) +pub enum Updater {} + +/// Creates cryptographic signatures for the inputs using private keys. +/// Reference: [BIP-370: Signer](https://github.com/bitcoin/bips/blob/master/bip-0370.mediawiki#signer) +pub enum Signer {} + +/// Merges multiple PSKTs into one. +/// Reference: [BIP-174: Combiner](https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki#combiner) +pub enum Combiner {} + +/// Completes the PSKT, ensuring all inputs have valid signatures, and finalizes the transaction. +/// Reference: [BIP-174: Input Finalizer](https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki#input-finalizer) +pub enum Finalizer {} + +/// Extracts the final transaction from the PSKT once all parts are in place and the PSKT is fully signed. +/// Reference: [BIP-370: Transaction Extractor](https://github.com/bitcoin/bips/blob/master/bip-0370.mediawiki#transaction-extractor) +pub enum Extractor {} diff --git a/wallet/pskt/src/utils.rs b/wallet/pskt/src/utils.rs new file mode 100644 index 000000000..28b7959ed --- /dev/null +++ b/wallet/pskt/src/utils.rs @@ -0,0 +1,29 @@ +use std::collections::BTreeMap; + +// todo optimize without cloning +pub fn combine_if_no_conflicts(mut lhs: BTreeMap, rhs: BTreeMap) -> Result, Error> +where + V: Eq + Clone, + K: Ord + Clone, +{ + if lhs.len() > rhs.len() { + if let Some((field, rhs, lhs)) = + rhs.iter().map(|(k, v)| (k, v, lhs.get(k))).find(|(_, v, rhs_v)| rhs_v.is_some_and(|rv| rv != *v)) + { + Err(Error { field: field.clone(), lhs: lhs.unwrap().clone(), rhs: rhs.clone() }) + } else { + lhs.extend(rhs); + Ok(lhs) + } + } else { + combine_if_no_conflicts(rhs, lhs) + } +} + +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +#[error("Conflict")] +pub struct Error { + pub field: K, + pub lhs: V, + pub rhs: V, +} From d41073b7608ca2ac65a4f7bc6f831adaeb4f6b19 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 14 Jul 2024 09:21:09 +0000 Subject: [PATCH 38/50] Squashed commit of the following: commit 5b59ea98d2d807e1ceff4f458b19f3f37d321be2 Author: max143672 Date: Tue Jun 25 19:28:11 2024 +0300 suppress warnings commit ed3f76bd9f1a928dd1cb72ba8b3355e0b90de8de Author: max143672 Date: Tue Jun 25 15:27:17 2024 +0300 style: fmt commit 0e4629dfcdaad279f79fff66202ba569b017dc23 Author: max143672 Date: Tue Jun 25 15:26:07 2024 +0300 add bench with custom threadpool commit 5dc827b85140985a2cbfdd31686b6ca24682c882 Author: max143672 Date: Tue Jun 25 15:20:28 2024 +0300 fix clippy commit 1e6fd8ec493e361bf6f7d1ba61fbb7764794616b Author: max143672 Date: Tue Jun 25 11:51:54 2024 +0300 refactor check_scripts fn, fix tests commit 5b76a6445f576c918783c52d29a8456d41af6644 Author: max143672 Date: Mon Jun 24 22:52:23 2024 +0300 apply par iter to `check_scripts` commit 93c5b1adafe9e0ff7c28c2c48236985a9b643175 Author: max143672 Date: Mon Jun 24 01:16:51 2024 +0300 remove scc commit de650c32242dd2a6e44283e44c4fc94d3405e816 Author: max143672 Date: Mon Jun 24 01:07:00 2024 +0300 rollback rwlock and indexmap. commit 5f3fe0280de7a8145e7e51856acb2b61e003ccf9 Author: max143672 Date: Mon Jun 24 00:37:12 2024 +0300 dont apply cache commit 258b40fa7057a93886765c7dc398fc37844bf1dc Author: max143672 Date: Sun Jun 23 14:39:59 2024 +0300 use hashcache commit 3edd435b8109b7311c6f6ea89de964acd7e1ab85 Author: max143672 Date: Sun Jun 23 00:28:22 2024 +0300 use concurrent cache commit 0c2acdafbd154f7acbe946df20f9dcac7421b432 Author: max143672 Date: Sun Jun 23 00:08:52 2024 +0300 use upgreadable read commit 91364d132cedd69b95e0eed310bbcfd0ff88f127 Author: max143672 Date: Sun Jun 23 00:06:52 2024 +0300 fix benches commit d3d0716bac1881fa042853f64582a48a4c402b18 Author: max143672 Date: Sat Jun 22 11:54:43 2024 +0300 fix par versions commit b756df4305ccfdae95546ab279cde0f8fa9e38ff Author: max143672 Date: Fri Jun 21 22:39:28 2024 +0300 use cache per iteration per function commit cfcd7e13b12e080a65761004f6a7d3b2033804a5 Author: max143672 Date: Fri Jun 21 21:38:07 2024 +0300 benches are implemented commit 25f1087c6caaad446c5f91e32ac770e3bf9d2d9a Author: max143672 Date: Fri Jun 21 20:22:14 2024 +0300 sighash reused trait --- Cargo.lock | 2 + consensus/Cargo.toml | 5 + consensus/benches/check_scripts.rs | 126 +++++++++++ consensus/client/src/sign.rs | 6 +- consensus/client/src/signing.rs | 10 +- consensus/core/Cargo.toml | 1 + consensus/core/src/hashing/sighash.rs | 195 +++++++++++++----- consensus/core/src/sign.rs | 18 +- .../pipeline/virtual_processor/processor.rs | 9 +- .../transaction_validator_populated.rs | 84 ++++++-- consensus/wasm/src/utils.rs | 6 +- crypto/txscript/src/caches.rs | 2 +- crypto/txscript/src/lib.rs | 58 +++--- crypto/txscript/src/opcodes/macros.rs | 22 +- crypto/txscript/src/opcodes/mod.rs | 70 ++++--- crypto/txscript/src/standard/multisig.rs | 10 +- .../src/mempool/check_transaction_standard.rs | 9 +- wallet/pskt/examples/multisig.rs | 8 +- wallet/pskt/src/lib.rs | 7 +- wasm/build/docs/typedoc.json | 4 +- 20 files changed, 487 insertions(+), 165 deletions(-) create mode 100644 consensus/benches/check_scripts.rs diff --git a/Cargo.lock b/Cargo.lock index b70409bb7..0716fa34d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2601,6 +2601,7 @@ dependencies = [ "futures-util", "indexmap 2.2.6", "itertools 0.11.0", + "kaspa-addresses", "kaspa-consensus-core", "kaspa-consensus-notify", "kaspa-consensusmanager", @@ -2662,6 +2663,7 @@ dependencies = [ name = "kaspa-consensus-core" version = "0.14.2" dependencies = [ + "arc-swap", "async-trait", "bincode", "borsh", diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index f151e404b..7082ed7b1 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -53,11 +53,16 @@ serde_json.workspace = true flate2.workspace = true rand_distr.workspace = true kaspa-txscript-errors.workspace = true +kaspa-addresses.workspace = true [[bench]] name = "hash_benchmarks" harness = false +[[bench]] +name = "check_scripts" +harness = false + [features] html_reports = [] devnet-prealloc = ["kaspa-consensus-core/devnet-prealloc"] diff --git a/consensus/benches/check_scripts.rs b/consensus/benches/check_scripts.rs new file mode 100644 index 000000000..b6a8402d4 --- /dev/null +++ b/consensus/benches/check_scripts.rs @@ -0,0 +1,126 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion, SamplingMode}; +use kaspa_addresses::{Address, Prefix, Version}; +use kaspa_consensus::processes::transaction_validator::transaction_validator_populated::{ + check_scripts_par_iter, check_scripts_par_iter_thread, check_scripts_single_threaded, +}; +use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}; +use kaspa_consensus_core::hashing::sighash_type::SIG_HASH_ALL; +use kaspa_consensus_core::subnets::SubnetworkId; +use kaspa_consensus_core::tx::{MutableTransaction, Transaction, TransactionInput, TransactionOutpoint, UtxoEntry}; +use kaspa_txscript::caches::Cache; +use kaspa_txscript::pay_to_address_script; +use rand::{thread_rng, Rng}; +use secp256k1::Keypair; +use std::thread::available_parallelism; + +// You may need to add more detailed mocks depending on your actual code. +fn mock_tx(inputs_count: usize, non_uniq_signatures: usize) -> (Transaction, Vec) { + let reused_values = SigHashReusedValuesUnsync::new(); + let dummy_prev_out = TransactionOutpoint::new(kaspa_hashes::Hash::from_u64_word(1), 1); + let mut tx = Transaction::new( + 0, + vec![], + vec![], + 0, + SubnetworkId::from_bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), + 0, + vec![], + ); + let mut utxos = vec![]; + let mut kps = vec![]; + for _ in 0..inputs_count - non_uniq_signatures { + let kp = Keypair::new(secp256k1::SECP256K1, &mut thread_rng()); + tx.inputs.push(TransactionInput { previous_outpoint: dummy_prev_out, signature_script: vec![], sequence: 0, sig_op_count: 1 }); + let address = Address::new(Prefix::Mainnet, Version::PubKey, &kp.x_only_public_key().0.serialize()); + utxos.push(UtxoEntry { + amount: thread_rng().gen::() as u64, + script_public_key: pay_to_address_script(&address), + block_daa_score: 333, + is_coinbase: false, + }); + kps.push(kp); + } + for _ in 0..non_uniq_signatures { + let kp = kps.last().unwrap(); + tx.inputs.push(TransactionInput { previous_outpoint: dummy_prev_out, signature_script: vec![], sequence: 0, sig_op_count: 1 }); + let address = Address::new(Prefix::Mainnet, Version::PubKey, &kp.x_only_public_key().0.serialize()); + utxos.push(UtxoEntry { + amount: thread_rng().gen::() as u64, + script_public_key: pay_to_address_script(&address), + block_daa_score: 444, + is_coinbase: false, + }); + } + for (i, kp) in kps.iter().enumerate().take(inputs_count - non_uniq_signatures) { + let mut_tx = MutableTransaction::with_entries(&tx, utxos.clone()); + let sig_hash = calc_schnorr_signature_hash(&mut_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + let sig: [u8; 64] = *kp.sign_schnorr(msg).as_ref(); + // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) + tx.inputs[i].signature_script = std::iter::once(65u8).chain(sig).chain([SIG_HASH_ALL.to_u8()]).collect(); + } + let length = tx.inputs.len(); + for i in (inputs_count - non_uniq_signatures)..length { + let kp = kps.last().unwrap(); + let mut_tx = MutableTransaction::with_entries(&tx, utxos.clone()); + let sig_hash = calc_schnorr_signature_hash(&mut_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); + let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); + let sig: [u8; 64] = *kp.sign_schnorr(msg).as_ref(); + // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) + tx.inputs[i].signature_script = std::iter::once(65u8).chain(sig).chain([SIG_HASH_ALL.to_u8()]).collect(); + } + (tx, utxos) +} + +fn benchmark_check_scripts(c: &mut Criterion) { + for inputs_count in [100, 50, 25, 10, 5, 2] { + for non_uniq_signatures in [0, inputs_count / 2] { + let (tx, utxos) = mock_tx(inputs_count, non_uniq_signatures); + let mut group = c.benchmark_group(format!("inputs: {inputs_count}, non uniq: {non_uniq_signatures}")); + group.sampling_mode(SamplingMode::Flat); + + group.bench_function("single_thread", |b| { + let tx = MutableTransaction::with_entries(&tx, utxos.clone()); + let cache = Cache::new(inputs_count as u64); + b.iter(|| { + cache.map.write().clear(); + check_scripts_single_threaded(black_box(&cache), black_box(&tx.as_verifiable())).unwrap(); + }) + }); + + group.bench_function("rayon par iter", |b| { + let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone()); + let cache = Cache::new(inputs_count as u64); + b.iter(|| { + cache.map.write().clear(); + check_scripts_par_iter(black_box(&cache), black_box(&tx.as_verifiable())).unwrap(); + }) + }); + + for i in (2..=available_parallelism().unwrap().get()).step_by(2) { + if inputs_count >= i { + group.bench_function(&format!("rayon, custom threadpool, thread count {i}"), |b| { + let tx = MutableTransaction::with_entries(tx.clone(), utxos.clone()); + let cache = Cache::new(inputs_count as u64); + let pool = rayon::ThreadPoolBuilder::new().num_threads(i).build().unwrap(); + b.iter(|| { + // Create a custom thread pool with the specified number of threads + cache.map.write().clear(); + check_scripts_par_iter_thread(black_box(&cache), black_box(&tx.as_verifiable()), black_box(&pool)) + .unwrap(); + }) + }); + } + } + } + } +} + +criterion_group! { + name = benches; + // This can be any expression that returns a `Criterion` object. + config = Criterion::default().with_output_color(true).measurement_time(std::time::Duration::new(20, 0)); + targets = benchmark_check_scripts +} + +criterion_main!(benches); diff --git a/consensus/client/src/sign.rs b/consensus/client/src/sign.rs index fdab66a60..a92abb90d 100644 --- a/consensus/client/src/sign.rs +++ b/consensus/client/src/sign.rs @@ -3,7 +3,7 @@ use core::iter::once; use itertools::Itertools; use kaspa_consensus_core::{ hashing::{ - sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, sighash_type::SIG_HASH_ALL, }, tx::PopulatedTransaction, @@ -40,7 +40,7 @@ pub fn sign_with_multiple_v3(tx: Transaction, privkeys: &[[u8; 32]]) -> crate::r map.insert(script_pub_key_script, schnorr_key); } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let mut additional_signatures_required = false; { let input_len = tx.inner().inputs.len(); @@ -55,7 +55,7 @@ pub fn sign_with_multiple_v3(tx: Transaction, privkeys: &[[u8; 32]]) -> crate::r }; let script = script_pub_key.script(); if let Some(schnorr_key) = map.get(script) { - let sig_hash = calc_schnorr_signature_hash(&populated_transaction, i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&populated_transaction, i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) diff --git a/consensus/client/src/signing.rs b/consensus/client/src/signing.rs index ef993d011..f7fe8cee6 100644 --- a/consensus/client/src/signing.rs +++ b/consensus/client/src/signing.rs @@ -75,7 +75,7 @@ impl SigHashCache { } } - pub fn sig_op_counts_hash(&mut self, tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { + pub fn sig_op_counts_hash(&mut self, tx: &Transaction, hash_type: SigHashType, reused_values: &SigHashReusedValues) -> Hash { if hash_type.is_sighash_anyone_can_pay() { return ZERO_HASH; } @@ -185,16 +185,16 @@ pub fn calc_schnorr_signature_hash( let mut hasher = TransactionSigningHash::new(); hasher .write_u16(tx.version) - .update(previous_outputs_hash(&tx, hash_type, &mut reused_values)) - .update(sequences_hash(&tx, hash_type, &mut reused_values)) - .update(sig_op_counts_hash(&tx, hash_type, &mut reused_values)); + .update(previous_outputs_hash(&tx, hash_type, &reused_values)) + .update(sequences_hash(&tx, hash_type, &reused_values)) + .update(sig_op_counts_hash(&tx, hash_type, &reused_values)); hash_outpoint(&mut hasher, input.previous_outpoint); hash_script_public_key(&mut hasher, &utxo.script_public_key); hasher .write_u64(utxo.amount) .write_u64(input.sequence) .write_u8(input.sig_op_count) - .update(outputs_hash(&tx, hash_type, &mut reused_values, input_index)) + .update(outputs_hash(&tx, hash_type, &reused_values, input_index)) .write_u64(tx.lock_time) .update(&tx.subnetwork_id) .write_u64(tx.gas) diff --git a/consensus/core/Cargo.toml b/consensus/core/Cargo.toml index 4e4bd9ea0..eb22eb728 100644 --- a/consensus/core/Cargo.toml +++ b/consensus/core/Cargo.toml @@ -15,6 +15,7 @@ wasm32-sdk = [] default = [] [dependencies] +arc-swap.workspace = true async-trait.workspace = true borsh.workspace = true cfg-if.workspace = true diff --git a/consensus/core/src/hashing/sighash.rs b/consensus/core/src/hashing/sighash.rs index c1b6133e8..f237e773c 100644 --- a/consensus/core/src/hashing/sighash.rs +++ b/consensus/core/src/hashing/sighash.rs @@ -1,4 +1,7 @@ +use arc_swap::ArcSwapOption; use kaspa_hashes::{Hash, Hasher, HasherBase, TransactionSigningHash, TransactionSigningHashECDSA, ZERO_HASH}; +use std::cell::Cell; +use std::sync::Arc; use crate::{ subnets::SUBNETWORK_ID_NATIVE, @@ -11,72 +14,174 @@ use super::{sighash_type::SigHashType, HasherExtensions}; /// the same for all transaction inputs. /// Reuse of such values prevents the quadratic hashing problem. #[derive(Default)] -pub struct SigHashReusedValues { - previous_outputs_hash: Option, - sequences_hash: Option, - sig_op_counts_hash: Option, - outputs_hash: Option, +pub struct SigHashReusedValuesUnsync { + previous_outputs_hash: Cell>, + sequences_hash: Cell>, + sig_op_counts_hash: Cell>, + outputs_hash: Cell>, } -impl SigHashReusedValues { +impl SigHashReusedValuesUnsync { pub fn new() -> Self { - Self { previous_outputs_hash: None, sequences_hash: None, sig_op_counts_hash: None, outputs_hash: None } + Self::default() } } -pub fn previous_outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { +#[derive(Default)] +pub struct SigHashReusedValuesSync { + previous_outputs_hash: ArcSwapOption, + sequences_hash: ArcSwapOption, + sig_op_counts_hash: ArcSwapOption, + outputs_hash: ArcSwapOption, +} + +impl SigHashReusedValuesSync { + pub fn new() -> Self { + Self::default() + } +} + +pub trait SigHashReusedValues { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash; + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash; + + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash; + + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash; +} + +impl SigHashReusedValues for Arc { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.as_ref().previous_outputs_hash(set) + } + + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.as_ref().sequences_hash(set) + } + + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.as_ref().sig_op_counts_hash(set) + } + + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.as_ref().outputs_hash(set) + } +} + +impl SigHashReusedValues for SigHashReusedValuesUnsync { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.previous_outputs_hash.get().unwrap_or_else(|| { + let hash = set(); + self.previous_outputs_hash.set(Some(hash)); + hash + }) + } + + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.sequences_hash.get().unwrap_or_else(|| { + let hash = set(); + self.sequences_hash.set(Some(hash)); + hash + }) + } + + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.sig_op_counts_hash.get().unwrap_or_else(|| { + let hash = set(); + self.sig_op_counts_hash.set(Some(hash)); + hash + }) + } + + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + self.outputs_hash.get().unwrap_or_else(|| { + let hash = set(); + self.outputs_hash.set(Some(hash)); + hash + }) + } +} + +impl SigHashReusedValues for SigHashReusedValuesSync { + fn previous_outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.previous_outputs_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.previous_outputs_hash.rcu(|_| Arc::new(hash)); + hash + } + + fn sequences_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.sequences_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.sequences_hash.rcu(|_| Arc::new(hash)); + hash + } + + fn sig_op_counts_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.sig_op_counts_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.sig_op_counts_hash.rcu(|_| Arc::new(hash)); + hash + } + + fn outputs_hash(&self, set: impl Fn() -> Hash) -> Hash { + if let Some(value) = self.outputs_hash.load().as_ref() { + return **value; + } + let hash = set(); + self.outputs_hash.rcu(|_| Arc::new(hash)); + hash + } +} + +pub fn previous_outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash { if hash_type.is_sighash_anyone_can_pay() { return ZERO_HASH; } - - if let Some(previous_outputs_hash) = reused_values.previous_outputs_hash { - previous_outputs_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for input in tx.inputs.iter() { hasher.update(input.previous_outpoint.transaction_id.as_bytes()); hasher.write_u32(input.previous_outpoint.index); } - let previous_outputs_hash = hasher.finalize(); - reused_values.previous_outputs_hash = Some(previous_outputs_hash); - previous_outputs_hash - } + hasher.finalize() + }; + reused_values.previous_outputs_hash(hash) } -pub fn sequences_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { +pub fn sequences_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash { if hash_type.is_sighash_single() || hash_type.is_sighash_anyone_can_pay() || hash_type.is_sighash_none() { return ZERO_HASH; } - - if let Some(sequences_hash) = reused_values.sequences_hash { - sequences_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for input in tx.inputs.iter() { hasher.write_u64(input.sequence); } - let sequence_hash = hasher.finalize(); - reused_values.sequences_hash = Some(sequence_hash); - sequence_hash - } + hasher.finalize() + }; + reused_values.sequences_hash(hash) } -pub fn sig_op_counts_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues) -> Hash { +pub fn sig_op_counts_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues) -> Hash { if hash_type.is_sighash_anyone_can_pay() { return ZERO_HASH; } - if let Some(sig_op_counts_hash) = reused_values.sig_op_counts_hash { - sig_op_counts_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for input in tx.inputs.iter() { hasher.write_u8(input.sig_op_count); } - let sig_op_counts_hash = hasher.finalize(); - reused_values.sig_op_counts_hash = Some(sig_op_counts_hash); - sig_op_counts_hash - } + hasher.finalize() + }; + reused_values.sig_op_counts_hash(hash) } pub fn payload_hash(tx: &Transaction) -> Hash { @@ -92,7 +197,7 @@ pub fn payload_hash(tx: &Transaction) -> Hash { hasher.finalize() } -pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mut SigHashReusedValues, input_index: usize) -> Hash { +pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &impl SigHashReusedValues, input_index: usize) -> Hash { if hash_type.is_sighash_none() { return ZERO_HASH; } @@ -107,19 +212,15 @@ pub fn outputs_hash(tx: &Transaction, hash_type: SigHashType, reused_values: &mu hash_output(&mut hasher, &tx.outputs[input_index]); return hasher.finalize(); } - - // Otherwise, return hash of all outputs. Re-use hash if available. - if let Some(outputs_hash) = reused_values.outputs_hash { - outputs_hash - } else { + let hash = || { let mut hasher = TransactionSigningHash::new(); for output in tx.outputs.iter() { hash_output(&mut hasher, output); } - let outputs_hash = hasher.finalize(); - reused_values.outputs_hash = Some(outputs_hash); - outputs_hash - } + hasher.finalize() + }; + // Otherwise, return hash of all outputs. Re-use hash if available. + reused_values.outputs_hash(hash) } pub fn hash_outpoint(hasher: &mut impl Hasher, outpoint: TransactionOutpoint) { @@ -141,7 +242,7 @@ pub fn calc_schnorr_signature_hash( verifiable_tx: &impl VerifiableTransaction, input_index: usize, hash_type: SigHashType, - reused_values: &mut SigHashReusedValues, + reused_values: &impl SigHashReusedValues, ) -> Hash { let input = verifiable_tx.populated_input(input_index); let tx = verifiable_tx.tx(); @@ -170,7 +271,7 @@ pub fn calc_ecdsa_signature_hash( tx: &impl VerifiableTransaction, input_index: usize, hash_type: SigHashType, - reused_values: &mut SigHashReusedValues, + reused_values: &impl SigHashReusedValues, ) -> Hash { let hash = calc_schnorr_signature_hash(tx, input_index, hash_type, reused_values); let mut hasher = TransactionSigningHashECDSA::new(); @@ -573,9 +674,9 @@ mod tests { } } let populated_tx = PopulatedTransaction::new(&tx, entries); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); assert_eq!( - calc_schnorr_signature_hash(&populated_tx, test.input_index, test.hash_type, &mut reused_values).to_string(), + calc_schnorr_signature_hash(&populated_tx, test.input_index, test.hash_type, &reused_values).to_string(), test.expected_hash, "test {} failed", test.name diff --git a/consensus/core/src/sign.rs b/consensus/core/src/sign.rs index dee0d3844..55513be9d 100644 --- a/consensus/core/src/sign.rs +++ b/consensus/core/src/sign.rs @@ -1,6 +1,6 @@ use crate::{ hashing::{ - sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, sighash_type::SIG_HASH_ALL, }, tx::SignableTransaction, @@ -84,9 +84,9 @@ pub fn sign(mut signable_tx: SignableTransaction, schnorr_key: secp256k1::Keypai signable_tx.tx.inputs[i].sig_op_count = 1; } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for i in 0..signable_tx.tx.inputs.len() { - let sig_hash = calc_schnorr_signature_hash(&signable_tx.as_verifiable(), i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&signable_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) @@ -106,11 +106,11 @@ pub fn sign_with_multiple(mut mutable_tx: SignableTransaction, privkeys: Vec<[u8 mutable_tx.tx.inputs[i].sig_op_count = 1; } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for i in 0..mutable_tx.tx.inputs.len() { let script = mutable_tx.entries[i].as_ref().unwrap().script_public_key.script(); if let Some(schnorr_key) = map.get(script) { - let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) @@ -132,12 +132,12 @@ pub fn sign_with_multiple_v2(mut mutable_tx: SignableTransaction, privkeys: &[[u map.insert(script_pub_key_script, schnorr_key); } - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let mut additional_signatures_required = false; for i in 0..mutable_tx.tx.inputs.len() { let script = mutable_tx.entries[i].as_ref().unwrap().script_public_key.script(); if let Some(schnorr_key) = map.get(script) { - let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(&mutable_tx.as_verifiable(), i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) @@ -154,7 +154,7 @@ pub fn sign_with_multiple_v2(mut mutable_tx: SignableTransaction, privkeys: &[[u } pub fn verify(tx: &impl crate::tx::VerifiableTransaction) -> Result<(), Error> { - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for (i, (input, entry)) in tx.populated_inputs().enumerate() { if input.signature_script.is_empty() { return Err(Error::Message(format!("Signature is empty for input: {i}"))); @@ -162,7 +162,7 @@ pub fn verify(tx: &impl crate::tx::VerifiableTransaction) -> Result<(), Error> { let pk = &entry.script_public_key.script()[1..33]; let pk = secp256k1::XOnlyPublicKey::from_slice(pk)?; let sig = secp256k1::schnorr::Signature::from_slice(&input.signature_script[1..65])?; - let sig_hash = calc_schnorr_signature_hash(tx, i, SIG_HASH_ALL, &mut reused_values); + let sig_hash = calc_schnorr_signature_hash(tx, i, SIG_HASH_ALL, &reused_values); let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice())?; sig.verify(&msg, &pk)?; } diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index db8efed3a..3bfd3a465 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -770,7 +770,14 @@ impl VirtualStateProcessor { let virtual_utxo_view = &virtual_read.utxo_set; let virtual_daa_score = virtual_state.daa_score; let virtual_past_median_time = virtual_state.past_median_time; - self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time) + if mutable_tx.tx.inputs.len() > 1 { + // use pool to apply par_iter to inputs + self.thread_pool.install(|| { + self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time) + }) + } else { + self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time) + } } pub fn validate_mempool_transactions_in_parallel(&self, mutable_txs: &mut [MutableTransaction]) -> Vec> { diff --git a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs index 696b9a9d4..e53200fec 100644 --- a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs +++ b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs @@ -1,7 +1,12 @@ use crate::constants::{MAX_SOMPI, SEQUENCE_LOCK_TIME_DISABLED, SEQUENCE_LOCK_TIME_MASK}; -use kaspa_consensus_core::{hashing::sighash::SigHashReusedValues, tx::VerifiableTransaction}; +use kaspa_consensus_core::hashing::sighash::{SigHashReusedValues, SigHashReusedValuesSync}; +use kaspa_consensus_core::{hashing::sighash::SigHashReusedValuesUnsync, tx::VerifiableTransaction}; use kaspa_core::warn; -use kaspa_txscript::{get_sig_op_count, TxScriptEngine}; +use kaspa_txscript::caches::Cache; +use kaspa_txscript::{get_sig_op_count, SigCacheKey, TxScriptEngine}; +use rayon::iter::IntoParallelIterator; +use rayon::ThreadPool; +use std::sync::Arc; use super::{ errors::{TxResult, TxRuleError}, @@ -24,7 +29,7 @@ pub enum TxValidationFlags { impl TransactionValidator { pub fn validate_populated_transaction_and_get_fee( &self, - tx: &impl VerifiableTransaction, + tx: &(impl VerifiableTransaction + std::marker::Sync), pov_daa_score: u64, flags: TxValidationFlags, ) -> TxResult { @@ -42,7 +47,7 @@ impl TransactionValidator { Self::check_sequence_lock(tx, pov_daa_score)?; match flags { TxValidationFlags::Full | TxValidationFlags::SkipMassCheck => { - Self::check_sig_op_counts(tx)?; + Self::check_sig_op_counts::<_, SigHashReusedValuesUnsync>(tx)?; self.check_scripts(tx)?; } TxValidationFlags::SkipScriptChecks => {} @@ -134,9 +139,9 @@ impl TransactionValidator { Ok(()) } - fn check_sig_op_counts(tx: &T) -> TxResult<()> { + fn check_sig_op_counts(tx: &T) -> TxResult<()> { for (i, (input, entry)) in tx.populated_inputs().enumerate() { - let calculated = get_sig_op_count::(&input.signature_script, &entry.script_public_key); + let calculated = get_sig_op_count::(&input.signature_script, &entry.script_public_key); if calculated != input.sig_op_count as u64 { return Err(TxRuleError::WrongSigOpCount(i, input.sig_op_count as u64, calculated)); } @@ -144,16 +149,64 @@ impl TransactionValidator { Ok(()) } - pub fn check_scripts(&self, tx: &impl VerifiableTransaction) -> TxResult<()> { - let mut reused_values = SigHashReusedValues::new(); - for (i, (input, entry)) in tx.populated_inputs().enumerate() { - let mut engine = TxScriptEngine::from_transaction_input(tx, input, i, entry, &mut reused_values, &self.sig_cache) - .map_err(TxRuleError::SignatureInvalid)?; - engine.execute().map_err(TxRuleError::SignatureInvalid)?; - } + pub fn check_scripts(&self, tx: &(impl VerifiableTransaction + std::marker::Sync)) -> TxResult<()> { + check_scripts(&self.sig_cache, tx) + } +} - Ok(()) +pub fn check_scripts(sig_cache: &Cache, tx: &(impl VerifiableTransaction + Sync)) -> TxResult<()> { + if tx.inputs().len() > 1 { + check_scripts_par_iter(sig_cache, tx) + } else { + check_scripts_single_threaded(sig_cache, tx) + } +} + +pub fn check_scripts_single_threaded(sig_cache: &Cache, tx: &impl VerifiableTransaction) -> TxResult<()> { + let reused_values = SigHashReusedValuesUnsync::new(); + for (i, (input, entry)) in tx.populated_inputs().enumerate() { + let mut engine = TxScriptEngine::from_transaction_input(tx, input, i, entry, &reused_values, sig_cache) + .map_err(TxRuleError::SignatureInvalid)?; + engine.execute().map_err(TxRuleError::SignatureInvalid)?; } + Ok(()) +} + +pub fn check_scripts_par_iter( + sig_cache: &Cache, + tx: &(impl VerifiableTransaction + std::marker::Sync), +) -> TxResult<()> { + use rayon::iter::ParallelIterator; + let reused_values = std::sync::Arc::new(SigHashReusedValuesSync::new()); + (0..tx.inputs().len()) + .into_par_iter() + .try_for_each(|idx| { + let reused_values = reused_values.clone(); // Clone the Arc to share ownership + let (input, utxo) = tx.populated_input(idx); + let mut engine = TxScriptEngine::from_transaction_input(tx, input, idx, utxo, &reused_values, sig_cache)?; + engine.execute() + }) + .map_err(TxRuleError::SignatureInvalid) +} + +pub fn check_scripts_par_iter_thread( + sig_cache: &Cache, + tx: &(impl VerifiableTransaction + std::marker::Sync), + pool: &ThreadPool, +) -> TxResult<()> { + use rayon::iter::ParallelIterator; + pool.install(|| { + let reused_values = Arc::new(SigHashReusedValuesSync::new()); + (0..tx.inputs().len()) + .into_par_iter() + .try_for_each(|idx| { + let reused_values = reused_values.clone(); // Clone the Arc to share ownership + let (input, utxo) = tx.populated_input(idx); + let mut engine = TxScriptEngine::from_transaction_input(tx, input, idx, utxo, &reused_values, sig_cache)?; + engine.execute() + }) + .map_err(TxRuleError::SignatureInvalid) + }) } #[cfg(test)] @@ -161,6 +214,7 @@ mod tests { use super::super::errors::TxRuleError; use core::str::FromStr; use itertools::Itertools; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::sign::sign; use kaspa_consensus_core::subnets::SubnetworkId; use kaspa_consensus_core::tx::{MutableTransaction, PopulatedTransaction, ScriptVec, TransactionId, UtxoEntry}; @@ -679,6 +733,6 @@ mod tests { let signed_tx = sign(MutableTransaction::with_entries(unsigned_tx, entries), schnorr_key); let populated_tx = signed_tx.as_verifiable(); assert_eq!(tv.check_scripts(&populated_tx), Ok(())); - assert_eq!(TransactionValidator::check_sig_op_counts(&populated_tx), Ok(())); + assert_eq!(TransactionValidator::check_sig_op_counts::<_, SigHashReusedValuesUnsync>(&populated_tx), Ok(())); } } diff --git a/consensus/wasm/src/utils.rs b/consensus/wasm/src/utils.rs index 0139b573f..b70664e1e 100644 --- a/consensus/wasm/src/utils.rs +++ b/consensus/wasm/src/utils.rs @@ -1,5 +1,5 @@ use crate::result::Result; -use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}; +use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}; use kaspa_consensus_core::hashing::sighash_type::SIG_HASH_ALL; use kaspa_consensus_core::tx; @@ -9,9 +9,9 @@ pub fn script_hashes(mut mutable_tx: tx::SignableTransaction) -> Result { // We use IndexMap and not HashMap, because it makes it cheaper to remove a random element when the cache is full. - map: Arc>>, + pub map: Arc>>, size: usize, counters: Arc, } diff --git a/crypto/txscript/src/lib.rs b/crypto/txscript/src/lib.rs index 77cef45bc..bcd234c55 100644 --- a/crypto/txscript/src/lib.rs +++ b/crypto/txscript/src/lib.rs @@ -41,6 +41,8 @@ pub const MAX_PUB_KEYS_PER_MUTLTISIG: i32 = 20; // Note that this includes OP_RESERVED which counts as a push operation. pub const NO_COST_OPCODE: u8 = 0x60; +type DynOpcodeImplementation = Box>; + #[derive(Clone, Hash, PartialEq, Eq)] enum Signature { Secp256k1(secp256k1::schnorr::Signature), @@ -66,7 +68,7 @@ enum ScriptSource<'a, T: VerifiableTransaction> { StandAloneScripts(Vec<&'a [u8]>), } -pub struct TxScriptEngine<'a, T: VerifiableTransaction> { +pub struct TxScriptEngine<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> { dstack: Stack, astack: Stack, @@ -74,7 +76,7 @@ pub struct TxScriptEngine<'a, T: VerifiableTransaction> { // Outer caches for quicker calculation // TODO:: make it compatible with threading - reused_values: &'a mut SigHashReusedValues, + reused_values: &'a Reused, sig_cache: &'a Cache, cond_stack: Vec, // Following if stacks, and whether it is running @@ -82,30 +84,35 @@ pub struct TxScriptEngine<'a, T: VerifiableTransaction> { num_ops: i32, } -fn parse_script( +fn parse_script( script: &[u8], -) -> impl Iterator>, TxScriptError>> + '_ { +) -> impl Iterator, TxScriptError>> + '_ { script.iter().batching(|it| deserialize_next_opcode(it)) } -pub fn get_sig_op_count(signature_script: &[u8], prev_script_public_key: &ScriptPublicKey) -> u64 { +pub fn get_sig_op_count( + signature_script: &[u8], + prev_script_public_key: &ScriptPublicKey, +) -> u64 { let is_p2sh = ScriptClass::is_pay_to_script_hash(prev_script_public_key.script()); - let script_pub_key_ops = parse_script::(prev_script_public_key.script()).collect_vec(); + let script_pub_key_ops = parse_script::(prev_script_public_key.script()).collect_vec(); if !is_p2sh { return get_sig_op_count_by_opcodes(&script_pub_key_ops); } - let signature_script_ops = parse_script::(signature_script).collect_vec(); + let signature_script_ops = parse_script::(signature_script).collect_vec(); if signature_script_ops.is_empty() || signature_script_ops.iter().any(|op| op.is_err() || !op.as_ref().unwrap().is_push_opcode()) { return 0; } let p2sh_script = signature_script_ops.last().expect("checked if empty above").as_ref().expect("checked if err above").get_data(); - let p2sh_ops = parse_script::(p2sh_script).collect_vec(); + let p2sh_ops = parse_script::(p2sh_script).collect_vec(); get_sig_op_count_by_opcodes(&p2sh_ops) } -fn get_sig_op_count_by_opcodes(opcodes: &[Result>, TxScriptError>]) -> u64 { +fn get_sig_op_count_by_opcodes( + opcodes: &[Result, TxScriptError>], +) -> u64 { // TODO: Check for overflows let mut num_sigs: u64 = 0; for (i, op) in opcodes.iter().enumerate() { @@ -138,12 +145,12 @@ fn get_sig_op_count_by_opcodes(opcodes: &[Result(script: &[u8]) -> bool { - parse_script::(script).enumerate().any(|(index, op)| op.is_err() || (index == 0 && op.unwrap().value() == OpReturn)) +pub fn is_unspendable(script: &[u8]) -> bool { + parse_script::(script).enumerate().any(|(index, op)| op.is_err() || (index == 0 && op.unwrap().value() == OpReturn)) } -impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { - pub fn new(reused_values: &'a mut SigHashReusedValues, sig_cache: &'a Cache) -> Self { +impl<'a, T: VerifiableTransaction, Reused: SigHashReusedValues> TxScriptEngine<'a, T, Reused> { + pub fn new(reused_values: &'a Reused, sig_cache: &'a Cache) -> Self { Self { dstack: vec![], astack: vec![], @@ -160,7 +167,7 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { input: &'a TransactionInput, input_idx: usize, utxo_entry: &'a UtxoEntry, - reused_values: &'a mut SigHashReusedValues, + reused_values: &'a Reused, sig_cache: &'a Cache, ) -> Result { let script_public_key = utxo_entry.script_public_key.script(); @@ -181,7 +188,7 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { } } - pub fn from_script(script: &'a [u8], reused_values: &'a mut SigHashReusedValues, sig_cache: &'a Cache) -> Self { + pub fn from_script(script: &'a [u8], reused_values: &'a Reused, sig_cache: &'a Cache) -> Self { Self { dstack: Default::default(), astack: Default::default(), @@ -198,7 +205,7 @@ impl<'a, T: VerifiableTransaction> TxScriptEngine<'a, T> { return self.cond_stack.is_empty() || *self.cond_stack.last().expect("Checked not empty") == OpCond::True; } - fn execute_opcode(&mut self, opcode: Box>) -> Result<(), TxScriptError> { + fn execute_opcode(&mut self, opcode: DynOpcodeImplementation) -> Result<(), TxScriptError> { // Different from kaspad: Illegal and disabled opcode are checked on execute instead // Note that this includes OP_RESERVED which counts as a push operation. if !opcode.is_push_opcode() { @@ -508,6 +515,7 @@ mod tests { use crate::opcodes::codes::{OpBlake2b, OpCheckSig, OpData1, OpData2, OpData32, OpDup, OpEqual, OpPushData1, OpTrue}; use super::*; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::tx::{ PopulatedTransaction, ScriptPublicKey, Transaction, TransactionId, TransactionOutpoint, TransactionOutput, }; @@ -538,7 +546,7 @@ mod tests { fn run_test_script_cases(test_cases: Vec) { let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for test in test_cases { // Ensure encapsulation of variables (no leaking between tests) @@ -561,7 +569,7 @@ mod tests { let populated_tx = PopulatedTransaction::new(&tx, vec![utxo_entry.clone()]); - let mut vm = TxScriptEngine::from_transaction_input(&populated_tx, &input, 0, &utxo_entry, &mut reused_values, &sig_cache) + let mut vm = TxScriptEngine::from_transaction_input(&populated_tx, &input, 0, &utxo_entry, &reused_values, &sig_cache) .expect("Script creation failed"); assert_eq!(vm.execute(), test.expected_result); } @@ -779,7 +787,7 @@ mod tests { ]; for test in test_cases { - let check = TxScriptEngine::::check_pub_key_encoding(test.key); + let check = TxScriptEngine::::check_pub_key_encoding(test.key); if test.is_valid { assert_eq!( check, @@ -876,7 +884,10 @@ mod tests { for test in tests { assert_eq!( - get_sig_op_count::(test.signature_script, &test.prev_script_public_key), + get_sig_op_count::( + test.signature_script, + &test.prev_script_public_key + ), test.expected_sig_ops, "failed for '{}'", test.name @@ -905,7 +916,7 @@ mod tests { for test in tests { assert_eq!( - is_unspendable::(test.script_public_key), + is_unspendable::(test.script_public_key), test.expected, "failed for '{}'", test.name @@ -925,6 +936,7 @@ mod bitcoind_tests { use super::*; use crate::script_builder::ScriptBuilderError; use kaspa_consensus_core::constants::MAX_TX_IN_SEQUENCE_NUM; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::tx::{ PopulatedTransaction, ScriptPublicKey, Transaction, TransactionId, TransactionOutpoint, TransactionOutput, }; @@ -1015,13 +1027,13 @@ mod bitcoind_tests { // Run transaction let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let mut vm = TxScriptEngine::from_transaction_input( &populated_tx, &populated_tx.tx().inputs[0], 0, &populated_tx.entries[0], - &mut reused_values, + &reused_values, &sig_cache, ) .map_err(UnifiedError::TxScriptError)?; diff --git a/crypto/txscript/src/opcodes/macros.rs b/crypto/txscript/src/opcodes/macros.rs index b3db98829..c4d161d40 100644 --- a/crypto/txscript/src/opcodes/macros.rs +++ b/crypto/txscript/src/opcodes/macros.rs @@ -6,9 +6,9 @@ macro_rules! opcode_serde { [[self.value()].as_slice(), length.to_le_bytes().as_slice(), self.data.as_slice()].concat() } - fn deserialize<'i, I: Iterator, T: VerifiableTransaction>( + fn deserialize<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>( it: &mut I, - ) -> Result>, TxScriptError> { + ) -> Result>, TxScriptError> { match it.take(size_of::<$type>()).copied().collect::>().try_into() { Ok(bytes) => { let length = <$type>::from_le_bytes(bytes) as usize; @@ -32,9 +32,9 @@ macro_rules! opcode_serde { [[self.value()].as_slice(), self.data.clone().as_slice()].concat() } - fn deserialize<'i, I: Iterator, T: VerifiableTransaction>( + fn deserialize<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>( it: &mut I, - ) -> Result>, TxScriptError> { + ) -> Result>, TxScriptError> { // Static length includes the opcode itself let data: Vec = it.take($length - 1).copied().collect(); Self::new(data) @@ -44,7 +44,7 @@ macro_rules! opcode_serde { macro_rules! opcode_init { ($type:ty) => { - fn new(data: Vec) -> Result>, TxScriptError> { + fn new(data: Vec) -> Result>, TxScriptError> { if data.len() > <$type>::MAX as usize { return Err(TxScriptError::MalformedPush(<$type>::MAX as usize, data.len())); } @@ -52,7 +52,7 @@ macro_rules! opcode_init { } }; ($length: literal) => { - fn new(data: Vec) -> Result>, TxScriptError> { + fn new(data: Vec) -> Result>, TxScriptError> { if data.len() != $length - 1 { return Err(TxScriptError::MalformedPush($length - 1, data.len())); } @@ -69,20 +69,20 @@ macro_rules! opcode_impl { opcode_serde!($length); } - impl OpCodeExecution for $name { - fn empty() -> Result>, TxScriptError> { + impl OpCodeExecution for $name { + fn empty() -> Result>, TxScriptError> { Self::new(vec![]) } opcode_init!($length); #[allow(unused_variables)] - fn execute(&$self, $vm: &mut TxScriptEngine) -> OpCodeResult { + fn execute(&$self, $vm: &mut TxScriptEngine) -> OpCodeResult { $code } } - impl OpCodeImplementation for $name {} + impl OpCodeImplementation for $name {} } } @@ -111,7 +111,7 @@ macro_rules! opcode_list { )? )* - pub fn deserialize_next_opcode<'i, I: Iterator, T: VerifiableTransaction>(it: &mut I) -> Option>, TxScriptError>> { + pub fn deserialize_next_opcode<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>(it: &mut I) -> Option>, TxScriptError>> { match it.next() { Some(opcode_num) => match opcode_num { $( diff --git a/crypto/txscript/src/opcodes/mod.rs b/crypto/txscript/src/opcodes/mod.rs index 5d6096b7a..7b66da27f 100644 --- a/crypto/txscript/src/opcodes/mod.rs +++ b/crypto/txscript/src/opcodes/mod.rs @@ -10,6 +10,7 @@ use crate::{ }; use blake2b_simd::Params; use core::cmp::{max, min}; +use kaspa_consensus_core::hashing::sighash::SigHashReusedValues; use kaspa_consensus_core::hashing::sighash_type::SigHashType; use kaspa_consensus_core::tx::VerifiableTransaction; use sha2::{Digest, Sha256}; @@ -75,28 +76,31 @@ pub trait OpCodeMetadata: Debug { } } -pub trait OpCodeExecution { - fn empty() -> Result>, TxScriptError> +pub trait OpCodeExecution { + fn empty() -> Result>, TxScriptError> where Self: Sized; #[allow(clippy::new_ret_no_self)] - fn new(data: Vec) -> Result>, TxScriptError> + fn new(data: Vec) -> Result>, TxScriptError> where Self: Sized; - fn execute(&self, vm: &mut TxScriptEngine) -> OpCodeResult; + fn execute(&self, vm: &mut TxScriptEngine) -> OpCodeResult; } pub trait OpcodeSerialization { fn serialize(&self) -> Vec; - fn deserialize<'i, I: Iterator, T: VerifiableTransaction>( + fn deserialize<'i, I: Iterator, T: VerifiableTransaction, Reused: SigHashReusedValues>( it: &mut I, - ) -> Result>, TxScriptError> + ) -> Result>, TxScriptError> where Self: Sized; } -pub trait OpCodeImplementation: OpCodeExecution + OpCodeMetadata + OpcodeSerialization {} +pub trait OpCodeImplementation: + OpCodeExecution + OpCodeMetadata + OpcodeSerialization +{ +} impl OpCodeMetadata for OpCode { fn value(&self) -> u8 { @@ -195,13 +199,19 @@ impl OpCodeMetadata for OpCode { // Helpers for some opcodes with shared data #[inline] -fn push_data(data: Vec, vm: &mut TxScriptEngine) -> OpCodeResult { +fn push_data( + data: Vec, + vm: &mut TxScriptEngine, +) -> OpCodeResult { vm.dstack.push(data); Ok(()) } #[inline] -fn push_number(number: i64, vm: &mut TxScriptEngine) -> OpCodeResult { +fn push_number( + number: i64, + vm: &mut TxScriptEngine, +) -> OpCodeResult { vm.dstack.push_item(number); Ok(()) } @@ -960,7 +970,7 @@ opcode_list! { // converts an opcode from the list of Op0 to Op16 to its associated value #[allow(clippy::borrowed_box)] -pub fn to_small_int(opcode: &Box>) -> u8 { +pub fn to_small_int(opcode: &Box>) -> u8 { let value = opcode.value(); if value == codes::OpFalse { return 0; @@ -978,7 +988,7 @@ mod test { use crate::{opcodes, pay_to_address_script, TxScriptEngine, TxScriptError, LOCK_TIME_THRESHOLD}; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::constants::{SOMPI_PER_KASPA, TX_VERSION}; - use kaspa_consensus_core::hashing::sighash::SigHashReusedValues; + use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; use kaspa_consensus_core::tx::{ PopulatedTransaction, ScriptPublicKey, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput, UtxoEntry, @@ -987,21 +997,21 @@ mod test { struct TestCase<'a> { init: Stack, - code: Box>>, + code: Box, SigHashReusedValuesUnsync>>, dstack: Stack, } struct ErrorTestCase<'a> { init: Stack, - code: Box>>, + code: Box, SigHashReusedValuesUnsync>>, error: TxScriptError, } fn run_success_test_cases(tests: Vec) { let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for TestCase { init, code, dstack } in tests { - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let mut vm = TxScriptEngine::new(&reused_values, &cache); vm.dstack = init; code.execute(&mut vm).unwrap_or_else(|_| panic!("Opcode {} should not fail", code.value())); assert_eq!(*vm.dstack, dstack, "OpCode {} Pushed wrong value", code.value()); @@ -1010,9 +1020,9 @@ mod test { fn run_error_test_cases(tests: Vec) { let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); for ErrorTestCase { init, code, error } in tests { - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let mut vm = TxScriptEngine::new(&reused_values, &cache); vm.dstack.clone_from(&init); assert_eq!( code.execute(&mut vm) @@ -1027,7 +1037,7 @@ mod test { #[test] fn test_opcode_disabled() { - let tests: Vec>> = vec![ + let tests: Vec>> = vec![ opcodes::OpCat::empty().expect("Should accept empty"), opcodes::OpSubStr::empty().expect("Should accept empty"), opcodes::OpLeft::empty().expect("Should accept empty"), @@ -1046,8 +1056,8 @@ mod test { ]; let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let reused_values = SigHashReusedValuesUnsync::new(); + let mut vm = TxScriptEngine::new(&reused_values, &cache); for pop in tests { match pop.execute(&mut vm) { @@ -1059,7 +1069,7 @@ mod test { #[test] fn test_opcode_reserved() { - let tests: Vec>> = vec![ + let tests: Vec>> = vec![ opcodes::OpReserved::empty().expect("Should accept empty"), opcodes::OpVer::empty().expect("Should accept empty"), opcodes::OpVerIf::empty().expect("Should accept empty"), @@ -1069,8 +1079,8 @@ mod test { ]; let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let reused_values = SigHashReusedValuesUnsync::new(); + let mut vm = TxScriptEngine::new(&reused_values, &cache); for pop in tests { match pop.execute(&mut vm) { @@ -1082,7 +1092,7 @@ mod test { #[test] fn test_opcode_invalid() { - let tests: Vec>> = vec![ + let tests: Vec>> = vec![ opcodes::OpUnknown166::empty().expect("Should accept empty"), opcodes::OpUnknown167::empty().expect("Should accept empty"), opcodes::OpUnknown178::empty().expect("Should accept empty"), @@ -1160,8 +1170,8 @@ mod test { ]; let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); - let mut vm = TxScriptEngine::new(&mut reused_values, &cache); + let reused_values = SigHashReusedValuesUnsync::new(); + let mut vm = TxScriptEngine::new(&reused_values, &cache); for pop in tests { match pop.execute(&mut vm) { @@ -2741,7 +2751,7 @@ mod test { let (base_tx, input, utxo_entry) = make_mock_transaction(1); let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let code = opcodes::OpCheckLockTimeVerify::empty().expect("Should accept empty"); @@ -2753,7 +2763,7 @@ mod test { ] { let mut tx = base_tx.clone(); tx.0.lock_time = tx_lock_time; - let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &mut reused_values, &sig_cache) + let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache) .expect("Shouldn't fail"); vm.dstack = vec![lock_time.clone()]; match code.execute(&mut vm) { @@ -2783,7 +2793,7 @@ mod test { let (tx, base_input, utxo_entry) = make_mock_transaction(1); let sig_cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let code = opcodes::OpCheckSequenceVerify::empty().expect("Should accept empty"); @@ -2796,7 +2806,7 @@ mod test { ] { let mut input = base_input.clone(); input.sequence = tx_sequence; - let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &mut reused_values, &sig_cache) + let mut vm = TxScriptEngine::from_transaction_input(&tx, &input, 0, &utxo_entry, &reused_values, &sig_cache) .expect("Shouldn't fail"); vm.dstack = vec![sequence.clone()]; match code.execute(&mut vm) { diff --git a/crypto/txscript/src/standard/multisig.rs b/crypto/txscript/src/standard/multisig.rs index 79c74c7b3..cbd9dbe6d 100644 --- a/crypto/txscript/src/standard/multisig.rs +++ b/crypto/txscript/src/standard/multisig.rs @@ -74,7 +74,7 @@ mod tests { use core::str::FromStr; use kaspa_consensus_core::{ hashing::{ - sighash::{calc_ecdsa_signature_hash, calc_schnorr_signature_hash, SigHashReusedValues}, + sighash::{calc_ecdsa_signature_hash, calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, sighash_type::SIG_HASH_ALL, }, subnets::SubnetworkId, @@ -154,11 +154,11 @@ mod tests { }]; let mut tx = MutableTransaction::with_entries(tx, entries); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); let sig_hash = if !is_ecdsa { - calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &mut reused_values) + calc_schnorr_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values) } else { - calc_ecdsa_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &mut reused_values) + calc_ecdsa_signature_hash(&tx.as_verifiable(), 0, SIG_HASH_ALL, &reused_values) }; let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice()).unwrap(); let signatures: Vec<_> = inputs @@ -184,7 +184,7 @@ mod tests { let (input, entry) = tx.populated_inputs().next().unwrap(); let cache = Cache::new(10_000); - let mut engine = TxScriptEngine::from_transaction_input(&tx, input, 0, entry, &mut reused_values, &cache).unwrap(); + let mut engine = TxScriptEngine::from_transaction_input(&tx, input, 0, entry, &reused_values, &cache).unwrap(); assert_eq!(engine.execute().is_ok(), is_ok); } #[test] diff --git a/mining/src/mempool/check_transaction_standard.rs b/mining/src/mempool/check_transaction_standard.rs index e759a9e50..060677a1e 100644 --- a/mining/src/mempool/check_transaction_standard.rs +++ b/mining/src/mempool/check_transaction_standard.rs @@ -2,6 +2,7 @@ use crate::mempool::{ errors::{NonStandardError, NonStandardResult}, Mempool, }; +use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::{ constants::{MAX_SCRIPT_PUBLIC_KEY_VERSION, MAX_SOMPI}, mass, @@ -114,7 +115,7 @@ impl Mempool { /// It is exposed by [MiningManager] for use by transaction generators and wallets. pub(crate) fn is_transaction_output_dust(&self, transaction_output: &TransactionOutput) -> bool { // Unspendable outputs are considered dust. - if is_unspendable::(transaction_output.script_public_key.script()) { + if is_unspendable::(transaction_output.script_public_key.script()) { return true; } @@ -175,7 +176,6 @@ impl Mempool { if contextual_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { return Err(NonStandardError::RejectContextualMass(transaction_id, contextual_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); } - for (i, input) in transaction.tx.inputs.iter().enumerate() { // It is safe to elide existence and index checks here since // they have already been checked prior to calling this @@ -188,7 +188,10 @@ impl Mempool { ScriptClass::PubKey => {} ScriptClass::PubKeyECDSA => {} ScriptClass::ScriptHash => { - get_sig_op_count::(&input.signature_script, &entry.script_public_key); + get_sig_op_count::( + &input.signature_script, + &entry.script_public_key, + ); let num_sig_ops = 1; if num_sig_ops > MAX_STANDARD_P2SH_SIG_OPS { return Err(NonStandardError::RejectSignatureCount(transaction_id, i, num_sig_ops, MAX_STANDARD_P2SH_SIG_OPS)); diff --git a/wallet/pskt/examples/multisig.rs b/wallet/pskt/examples/multisig.rs index a34bef9b5..96659d294 100644 --- a/wallet/pskt/examples/multisig.rs +++ b/wallet/pskt/examples/multisig.rs @@ -1,5 +1,5 @@ use kaspa_consensus_core::{ - hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, tx::{TransactionId, TransactionOutpoint, UtxoEntry}, }; use kaspa_txscript::{multisig_redeem_script, opcodes::codes::OpData65, pay_to_script_hash_script, script_builder::ScriptBuilder}; @@ -49,8 +49,8 @@ fn main() { println!("Serialized after setting sequence: {}", ser_updated); let signer_pskt: PSKT = serde_json::from_str(&ser_updated).expect("Failed to deserialize"); - let mut reused_values = SigHashReusedValues::new(); - let mut sign = |signer_pskt: PSKT, kp: &Keypair| { + let reused_values = SigHashReusedValuesUnsync::new(); + let sign = |signer_pskt: PSKT, kp: &Keypair| { signer_pskt .pass_signature_sync(|tx, sighash| -> Result, String> { let tx = dbg!(tx); @@ -59,7 +59,7 @@ fn main() { .iter() .enumerate() .map(|(idx, _input)| { - let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &mut reused_values); + let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &reused_values); let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); Ok(SignInputOk { signature: Signature::Schnorr(kp.sign_schnorr(msg)), diff --git a/wallet/pskt/src/lib.rs b/wallet/pskt/src/lib.rs index e26d5c9ea..060aded57 100644 --- a/wallet/pskt/src/lib.rs +++ b/wallet/pskt/src/lib.rs @@ -1,4 +1,5 @@ use kaspa_bip32::{secp256k1, DerivationPath, KeyFingerprint}; +use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use serde::{Deserialize, Serialize}; use serde_repr::{Deserialize_repr, Serialize_repr}; use std::{collections::BTreeMap, fmt::Display, fmt::Formatter, future::Future, marker::PhantomData, ops::Deref}; @@ -17,7 +18,7 @@ pub use global::{Global, GlobalBuilder}; pub use input::{Input, InputBuilder}; use kaspa_consensus_core::tx::UtxoEntry; use kaspa_consensus_core::{ - hashing::{sighash::SigHashReusedValues, sighash_type::SigHashType}, + hashing::sighash_type::SigHashType, subnets::SUBNETWORK_ID_NATIVE, tx::{MutableTransaction, SignableTransaction, Transaction, TransactionId, TransactionInput, TransactionOutput}, }; @@ -397,10 +398,10 @@ impl PSKT { { let tx = tx.as_verifiable(); let cache = Cache::new(10_000); - let mut reused_values = SigHashReusedValues::new(); + let reused_values = SigHashReusedValuesUnsync::new(); tx.populated_inputs().enumerate().try_for_each(|(idx, (input, entry))| { - TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &mut reused_values, &cache)?.execute()?; + TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &reused_values, &cache)?.execute()?; >::Ok(()) })?; } diff --git a/wasm/build/docs/typedoc.json b/wasm/build/docs/typedoc.json index b89af0882..db308c812 100644 --- a/wasm/build/docs/typedoc.json +++ b/wasm/build/docs/typedoc.json @@ -1,7 +1,7 @@ { "$schema": "https://typedoc.org/schema.json", - "treatWarningsAsErrors": true, + "treatWarningsAsErrors": false, "cleanOutputDir": true, "disableSources": true, - "categoryOrder": ["*", "Other"], + "categoryOrder": ["*", "Other"] } From 9e9b1da4e088f4678d067b8f67242a93e22e4e83 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 14 Jul 2024 13:05:25 +0000 Subject: [PATCH 39/50] increase caches (reachability sets is the most important) --- consensus/src/consensus/storage.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index e170ace04..55a9f7f44 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -83,17 +83,17 @@ impl ConsensusStorage { // Lower and upper bounds let pruning_depth = params.pruning_depth as usize; let pruning_size_for_caches = (params.pruning_depth + params.finality_depth) as usize; // Upper bound for any block/header related data - let level_lower_bound = 2 * params.pruning_proof_m as usize; // Number of items lower bound for level-related caches + let level_lower_bound = 4 * params.pruning_proof_m as usize; // Number of items lower bound for level-related caches // Budgets in bytes. All byte budgets overall sum up to ~1GB of memory (which obviously takes more low level alloc space) let daa_excluded_budget = scaled(30_000_000); let statuses_budget = scaled(30_000_000); - let reachability_data_budget = scaled(20_000_000); - let reachability_sets_budget = scaled(20_000_000); // x 2 for tree children and future covering set + let reachability_data_budget = scaled(200_000_000); + let reachability_sets_budget = scaled(200_000_000); // x 2 for tree children and future covering set let ghostdag_compact_budget = scaled(15_000_000); let headers_compact_budget = scaled(5_000_000); - let parents_budget = scaled(40_000_000); // x 3 for reachability and levels - let children_budget = scaled(5_000_000); // x 3 for reachability and levels + let parents_budget = scaled(80_000_000); // x 3 for reachability and levels + let children_budget = scaled(20_000_000); // x 3 for reachability and levels let ghostdag_budget = scaled(80_000_000); // x 2 for levels let headers_budget = scaled(80_000_000); let transactions_budget = scaled(40_000_000); From e65789dc27dd7f2f79df3786efeac3a0710243a6 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 14 Jul 2024 13:06:32 +0000 Subject: [PATCH 40/50] experiment with reachability alg consts --- consensus/core/src/config/constants.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/core/src/config/constants.rs b/consensus/core/src/config/constants.rs index c4635083b..bab5f8a32 100644 --- a/consensus/core/src/config/constants.rs +++ b/consensus/core/src/config/constants.rs @@ -113,11 +113,11 @@ pub mod perf { use crate::config::params::Params; /// The default target depth for reachability reindexes. - pub const DEFAULT_REINDEX_DEPTH: u64 = 100; + pub const DEFAULT_REINDEX_DEPTH: u64 = 1000; /// The default slack interval used by the reachability /// algorithm to encounter for blocks out of the selected chain. - pub const DEFAULT_REINDEX_SLACK: u64 = 1 << 12; + pub const DEFAULT_REINDEX_SLACK: u64 = 1 << 14; const BASELINE_HEADER_DATA_CACHE_SIZE: usize = 10_000; const BASELINE_BLOCK_DATA_CACHE_SIZE: usize = 200; From cd5dad15ffc37a856b1d936b53fc2ff062712ba6 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 14 Jul 2024 13:07:45 +0000 Subject: [PATCH 41/50] optimize low levels (turned out to be negligible) --- consensus/src/processes/parents_builder.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/consensus/src/processes/parents_builder.rs b/consensus/src/processes/parents_builder.rs index 49b3822d9..6e9fff943 100644 --- a/consensus/src/processes/parents_builder.rs +++ b/consensus/src/processes/parents_builder.rs @@ -59,6 +59,10 @@ impl let mut parents = Vec::with_capacity(self.max_block_level as usize); for block_level in 0..self.max_block_level { + if direct_parent_headers.iter().all(|h| block_level <= h.block_level) { + parents.push(direct_parents.iter().copied().collect_vec()); + continue; + } // Direct parents are guaranteed to be in one another's anticones so add them all to // all the block levels they occupy. let mut level_candidates_to_reference_blocks = direct_parent_headers From 4452295491a6798f4723ce1dee8928d08577d791 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 14 Jul 2024 13:08:05 +0000 Subject: [PATCH 42/50] count utxo entries --- consensus/src/pipeline/pruning_processor/processor.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index bbc1ea9a9..546b5c37b 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -228,12 +228,14 @@ impl PruningProcessor { info!("Verifying the new pruning point UTXO commitment (sanity test)"); let commitment = self.headers_store.get_header(pruning_point).unwrap().utxo_commitment; let mut multiset = MuHash::new(); + let mut count = 0; let pruning_utxoset_read = self.pruning_utxoset_stores.read(); for (outpoint, entry) in pruning_utxoset_read.utxo_set.iterator().map(|r| r.unwrap()) { multiset.add_utxo(&outpoint, &entry); + count += 1; } assert_eq!(multiset.finalize(), commitment, "Updated pruning point utxo set does not match the header utxo commitment"); - info!("Pruning point UTXO commitment was verified correctly (sanity test)"); + info!("Pruning point UTXO commitment was verified correctly over {} UTXO entries (sanity test)", count); } fn prune(&self, new_pruning_point: Hash) { From 4e9781114b82071d836ec2b59bbf282bfd425a10 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 14 Jul 2024 13:14:04 +0000 Subject: [PATCH 43/50] reduce log verbosity --- consensus/src/processes/ghostdag/ordering.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/src/processes/ghostdag/ordering.rs b/consensus/src/processes/ghostdag/ordering.rs index cb73c3398..83749c2e3 100644 --- a/consensus/src/processes/ghostdag/ordering.rs +++ b/consensus/src/processes/ghostdag/ordering.rs @@ -1,7 +1,7 @@ use std::cmp::Ordering; use kaspa_consensus_core::BlueWorkType; -use kaspa_core::warn; +use kaspa_core::debug; use kaspa_hashes::Hash; use kaspa_math::Uint192; use serde::{Deserialize, Serialize}; @@ -66,7 +66,7 @@ impl Date: Sun, 14 Jul 2024 14:49:49 +0000 Subject: [PATCH 44/50] fix an edge case important for tests --- consensus/src/processes/parents_builder.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/consensus/src/processes/parents_builder.rs b/consensus/src/processes/parents_builder.rs index 6e9fff943..2e6d5ae42 100644 --- a/consensus/src/processes/parents_builder.rs +++ b/consensus/src/processes/parents_builder.rs @@ -56,10 +56,17 @@ impl let origin_children_headers = origin_children.iter().copied().map(|parent| self.headers_store.get_header(parent).unwrap()).collect_vec(); - let mut parents = Vec::with_capacity(self.max_block_level as usize); + // First, handle the genesis parent case. This avoids the need to check this possibility within the loop below. + if direct_parents == [self.genesis_hash] { + return vec![vec![self.genesis_hash]]; + } + + // Full capacity of max levels is unexpected + let mut parents = Vec::with_capacity(self.max_block_level as usize / 2); for block_level in 0..self.max_block_level { if direct_parent_headers.iter().all(|h| block_level <= h.block_level) { + // Optimize the common case where lower levels are identical to level 0 parents.push(direct_parents.iter().copied().collect_vec()); continue; } From 240c3ade694a1b2ae3e52f47eb268ad24598d237 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 14 Jul 2024 16:49:08 +0000 Subject: [PATCH 45/50] relax proof revalidation requirement --- .../pipeline/pruning_processor/processor.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 546b5c37b..d0d4dbbc1 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -478,17 +478,18 @@ impl PruningProcessor { .collect() } - fn assert_proof_rebuilding(&self, ref_proof: Arc, new_pruning_point: Hash) { + fn assert_proof_rebuilding(&self, _ref_proof: Arc, new_pruning_point: Hash) { info!("Rebuilding the pruning proof after pruning data (sanity test)"); - let proof_hashes = ref_proof.iter().flatten().map(|h| h.hash).collect::>(); + // let proof_hashes = ref_proof.iter().flatten().map(|h| h.hash).collect::>(); let built_proof = self.pruning_proof_manager.build_pruning_point_proof(new_pruning_point); - let built_proof_hashes = built_proof.iter().flatten().map(|h| h.hash).collect::>(); - assert_eq!(proof_hashes.len(), built_proof_hashes.len(), "Rebuilt proof does not match the expected reference"); - for (i, (a, b)) in proof_hashes.into_iter().zip(built_proof_hashes).enumerate() { - if a != b { - panic!("Proof built following pruning does not match the previous proof: built[{}]={}, prev[{}]={}", i, b, i, a); - } - } + // let built_proof_hashes = built_proof.iter().flatten().map(|h| h.hash).collect::>(); + self.pruning_proof_manager.validate_pruning_point_proof(&built_proof).expect("Rebuilt proof does not validate correctly"); + // assert_eq!(proof_hashes.len(), built_proof_hashes.len(), "Rebuilt proof does not match the expected reference"); + // for (i, (a, b)) in proof_hashes.into_iter().zip(built_proof_hashes).enumerate() { + // if a != b { + // panic!("Proof built following pruning does not match the previous proof: built[{}]={}, prev[{}]={}", i, b, i, a); + // } + // } info!("Proof was rebuilt successfully following pruning"); } From 3999050b44621f7dbf9f7353919ed15d537ff63f Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 14 Jul 2024 18:21:43 +0000 Subject: [PATCH 46/50] patch proof validation (disallowed for mainnet) --- consensus/src/processes/pruning_proof/mod.rs | 58 ++++++++++---------- kaspad/src/daemon.rs | 3 +- 2 files changed, 32 insertions(+), 29 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 34ae371db..ce4c9ba12 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -12,7 +12,7 @@ use std::{ }; use itertools::Itertools; -use kaspa_math::int::SignedInteger; +// use kaspa_math::int::SignedInteger; use parking_lot::{Mutex, RwLock}; use rocksdb::WriteBatch; @@ -638,7 +638,7 @@ impl PruningProofManager { let ghostdag_stores = stores_and_processes.ghostdag_stores; let pruning_read = self.pruning_point_store.read(); - let relations_read = self.relations_stores.read(); + // let relations_read = self.relations_stores.read(); let current_pp = pruning_read.get().unwrap().pruning_point; let current_pp_header = self.headers_store.get_header_with_block_level(current_pp).unwrap(); @@ -682,16 +682,16 @@ impl PruningProofManager { None, false, ); - let common_ancestor_blue_work = ghostdag_stores[level_idx].get_blue_work(common_ancestor).unwrap(); - let selected_tip_blue_work_diff = - SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(common_ancestor_blue_work); - for parent in self.parents_manager.parents_at_level(¤t_pp_header.header, level).iter().copied() { - let parent_blue_work = ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); - let parent_blue_work_diff = SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_blue_work); - if parent_blue_work_diff >= selected_tip_blue_work_diff { - return Err(PruningImportError::PruningProofInsufficientBlueWork); - } - } + // let common_ancestor_blue_work = ghostdag_stores[level_idx].get_blue_work(common_ancestor).unwrap(); + // let selected_tip_blue_work_diff = + // SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(common_ancestor_blue_work); + // for parent in self.parents_manager.parents_at_level(¤t_pp_header.header, level).iter().copied() { + // let parent_blue_work = ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); + // let parent_blue_work_diff = SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_blue_work); + // if parent_blue_work_diff >= selected_tip_blue_work_diff { + // return Err(PruningImportError::PruningProofInsufficientBlueWork); + // } + // } return Ok(()); } @@ -716,25 +716,27 @@ impl PruningProofManager { continue; } - match relations_read[level_idx].get_parents(current_pp).unwrap_option() { - Some(parents) => { - if parents - .iter() - .copied() - .any(|parent| ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) - { - return Ok(()); - } - } - None => { - // If the current pruning point doesn't have a parent at this level, we consider the proof state to be better. - return Ok(()); - } - } + return Ok(()); + + // match relations_read[level_idx].get_parents(current_pp).unwrap_option() { + // Some(parents) => { + // if parents + // .iter() + // .copied() + // .any(|parent| ghostdag_stores[level_idx].get_blue_score(parent).unwrap() < 2 * self.pruning_proof_m) + // { + // return Ok(()); + // } + // } + // None => { + // // If the current pruning point doesn't have a parent at this level, we consider the proof state to be better. + // return Ok(()); + // } + // } } drop(pruning_read); - drop(relations_read); + // drop(relations_read); drop(stores_and_processes.db_lifetime); Err(PruningImportError::PruningProofNotEnoughHeaders) diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 2df03bab8..1e01cd914 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -3,7 +3,7 @@ use std::{fs, path::PathBuf, process::exit, sync::Arc, time::Duration}; use async_channel::unbounded; use kaspa_consensus_core::{ config::ConfigBuilder, - errors::config::{ConfigError, ConfigResult}, + errors::config::{ConfigError, ConfigResult}, network::{NetworkId, NetworkType}, }; use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; use kaspa_core::{core::Core, info, trace}; @@ -204,6 +204,7 @@ pub fn create_core(args: Args, fd_total_budget: i32) -> (Arc, Arc (Arc, Arc) { let network = args.network(); + assert_ne!(network.network_type(), NetworkType::Mainnet, "Experimental version; Mainnet is disallowed"); let mut fd_remaining = fd_total_budget; let utxo_files_limit = if args.utxoindex { let utxo_files_limit = fd_remaining * 10 / 100; From efbb083b139c6cbe1cc46999fe4986595212725d Mon Sep 17 00:00:00 2001 From: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Date: Sun, 14 Jul 2024 13:01:33 -0600 Subject: [PATCH 47/50] Revert "Various fixes" This reverts commit bc56e65d5dd93d17c00e12e9f2c05e0a924e24b5. This experimental commit requires a bit more thinking to apply, and optimization can be deferred. --- consensus/src/processes/pruning_proof/mod.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 34ae371db..e0ca2a1e8 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -868,7 +868,8 @@ impl PruningProofManager { } if current_header.direct_parents().is_empty() // Stop at genesis - || (pp_header.header.blue_score >= current_header.blue_score + required_level_0_depth + // Need to ensure this does the same 2M+1 depth that block_at_depth does + || (pp_header.header.blue_score > current_header.blue_score + required_level_0_depth && intersected_with_required_block_chain) { break current_header; @@ -915,8 +916,9 @@ impl PruningProofManager { true, ); + // Need to ensure this does the same 2M+1 depth that block_at_depth does if has_required_block - && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() >= required_level_depth) + && (root == self.genesis_hash || ghostdag_store.get_blue_score(selected_tip).unwrap() > required_level_depth) { break Ok((ghostdag_store, selected_tip, root)); } @@ -1014,8 +1016,7 @@ impl PruningProofManager { let mut headers = Vec::with_capacity(2 * self.pruning_proof_m as usize); let mut queue = BinaryHeap::>::new(); let mut visited = BlockHashSet::new(); - // Still use "old_root" to make sure we use the minimum amount of records for the proof - queue.push(Reverse(SortableBlock::new(old_root, self.headers_store.get_header(old_root).unwrap().blue_work))); + queue.push(Reverse(SortableBlock::new(root, self.headers_store.get_header(root).unwrap().blue_work))); while let Some(current) = queue.pop() { let current = current.0.hash; if !visited.insert(current) { @@ -1157,7 +1158,7 @@ impl PruningProofManager { let mut current_gd = high_gd; let mut current = high; let mut res = vec![current]; - while current_gd.blue_score + depth > high_gd.blue_score { + while current_gd.blue_score + depth >= high_gd.blue_score { if current_gd.selected_parent.is_origin() { break; } @@ -1185,7 +1186,7 @@ impl PruningProofManager { .map_err(|err| PruningProofManagerInternalError::BlockAtDepth(format!("high: {high}, depth: {depth}, {err}")))?; let mut current_gd = high_gd; let mut current = high; - while current_gd.blue_score + depth > high_gd.blue_score { + while current_gd.blue_score + depth >= high_gd.blue_score { if current_gd.selected_parent.is_origin() { break; } From fde1d7c16053cef841c94ee1e8a63890689cc77b Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 14 Jul 2024 22:24:07 +0000 Subject: [PATCH 48/50] lint --- kaspad/src/daemon.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 1e01cd914..15be08f91 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -3,7 +3,8 @@ use std::{fs, path::PathBuf, process::exit, sync::Arc, time::Duration}; use async_channel::unbounded; use kaspa_consensus_core::{ config::ConfigBuilder, - errors::config::{ConfigError, ConfigResult}, network::{NetworkId, NetworkType}, + errors::config::{ConfigError, ConfigResult}, + network::NetworkType, }; use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; use kaspa_core::{core::Core, info, trace}; From b39377766036a9fff32f9034d57e6c7fa8bd588c Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Sun, 14 Jul 2024 22:37:50 +0000 Subject: [PATCH 49/50] increase pruning depth so that new reachability depth is covered --- simpa/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 368b52344..c586fd786 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -314,7 +314,7 @@ fn apply_args_to_consensus_params(args: &Args, params: &mut Params) { params.legacy_timestamp_deviation_tolerance = 16; params.new_timestamp_deviation_tolerance = 16; params.sampled_difficulty_window_size = params.sampled_difficulty_window_size.min(32); - params.finality_depth = 128; + params.finality_depth = 512; params.merge_depth = 128; params.mergeset_size_limit = 32; params.pruning_depth = params.anticone_finalization_depth(); From aad317d9986070ca5cc64c0ce9bc0ea81c748504 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Mon, 15 Jul 2024 09:31:19 +0000 Subject: [PATCH 50/50] chain depth of 100 is sufficient --- consensus/core/src/config/constants.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/core/src/config/constants.rs b/consensus/core/src/config/constants.rs index bab5f8a32..0c021c99d 100644 --- a/consensus/core/src/config/constants.rs +++ b/consensus/core/src/config/constants.rs @@ -113,7 +113,7 @@ pub mod perf { use crate::config::params::Params; /// The default target depth for reachability reindexes. - pub const DEFAULT_REINDEX_DEPTH: u64 = 1000; + pub const DEFAULT_REINDEX_DEPTH: u64 = 100; /// The default slack interval used by the reachability /// algorithm to encounter for blocks out of the selected chain.