Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a --create-object-mappings node CLI option #3125

Merged
merged 2 commits into from
Oct 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions crates/sc-consensus-subspace/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,3 @@ tracing = "0.1.40"
#substrate-test-runtime = { version = "2.0.0", path = "../../substrate/substrate-test-runtime" }
#substrate-test-runtime-client = { version = "2.0.0", path = "../../substrate/substrate-test-runtime-client" }
#tokio = "1.27.0"

[features]
# Temporary feature, TODO: replace with a CLI option
full-archive = []
104 changes: 66 additions & 38 deletions crates/sc-consensus-subspace/src/archiver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -360,6 +360,7 @@ fn find_last_archived_block<Block, Client, AS>(
client: &Client,
segment_headers_store: &SegmentHeadersStore<AS>,
best_block_to_archive: NumberFor<Block>,
create_object_mappings: bool,
) -> sp_blockchain::Result<Option<(SegmentHeader, SignedBlock<Block>, BlockObjectMapping)>>
where
Block: BlockT,
Expand Down Expand Up @@ -399,13 +400,17 @@ where
.block(last_archived_block_hash)?
.expect("Last archived block must always be retrievable; qed");

let block_object_mappings = client
.runtime_api()
.extract_block_object_mapping(
*last_archived_block.block.header().parent_hash(),
last_archived_block.block.clone(),
)
.unwrap_or_default();
let block_object_mappings = if create_object_mappings {
client
.runtime_api()
.extract_block_object_mapping(
*last_archived_block.block.header().parent_hash(),
last_archived_block.block.clone(),
)
.unwrap_or_default()
} else {
BlockObjectMapping::default()
};

return Ok(Some((
last_segment_header,
Expand Down Expand Up @@ -537,6 +542,7 @@ fn initialize_archiver<Block, Client, AS>(
segment_headers_store: &SegmentHeadersStore<AS>,
subspace_link: &SubspaceLink<Block>,
client: &Client,
create_object_mappings: bool,
) -> sp_blockchain::Result<InitializedArchiver<Block>>
where
Block: BlockT,
Expand All @@ -562,8 +568,12 @@ where
best_block_to_archive = best_block_number;
}

let maybe_last_archived_block =
find_last_archived_block(client, segment_headers_store, best_block_to_archive.into())?;
let maybe_last_archived_block = find_last_archived_block(
client,
segment_headers_store,
best_block_to_archive.into(),
create_object_mappings,
)?;

let have_last_segment_header = maybe_last_archived_block.is_some();
let mut best_archived_block = None;
Expand Down Expand Up @@ -654,12 +664,16 @@ where
.block(block_hash)?
.expect("All blocks since last archived must be present; qed");

let block_object_mappings = runtime_api
.extract_block_object_mapping(
*block.block.header().parent_hash(),
block.block.clone(),
)
.unwrap_or_default();
let block_object_mappings = if create_object_mappings {
runtime_api
.extract_block_object_mapping(
*block.block.header().parent_hash(),
block.block.clone(),
)
.unwrap_or_default()
} else {
BlockObjectMapping::default()
};

Ok((block, block_object_mappings))
},
Expand All @@ -686,14 +700,10 @@ where
);

let block_outcome = archiver.add_block(encoded_block, block_object_mappings, false);
// RPC clients only want these mappings in full mapping mode
// TODO: turn this into a command-line argument named `--full-mapping`
if cfg!(feature = "full-archive") {
send_object_mapping_notification(
&subspace_link.object_mapping_notification_sender,
block_outcome.object_mapping,
);
}
send_object_mapping_notification(
&subspace_link.object_mapping_notification_sender,
block_outcome.object_mapping,
);
let new_segment_headers: Vec<SegmentHeader> = block_outcome
.archived_segments
.iter()
Expand Down Expand Up @@ -775,8 +785,8 @@ fn finalize_block<Block, Backend, Client>(
/// processing, which is necessary for ensuring that when the next block is imported, inherents will
/// contain segment header of newly archived block (must happen exactly in the next block).
///
/// When a block with object mappings is produced, notification ([`SubspaceLink::object_mapping_notification_stream`])
/// will be sent.
/// If `create_object_mappings` is set, when a block with object mappings is archived, notification
/// ([`SubspaceLink::object_mapping_notification_stream`]) will be sent.
///
/// Once segment header is archived, notification ([`SubspaceLink::archived_segment_notification_stream`])
/// will be sent and archiver will be paused until all receivers have provided an acknowledgement
Expand All @@ -791,6 +801,7 @@ pub fn create_subspace_archiver<Block, Backend, Client, AS, SO>(
client: Arc<Client>,
sync_oracle: SubspaceSyncOracle<SO>,
telemetry: Option<TelemetryHandle>,
create_object_mappings: bool,
) -> sp_blockchain::Result<impl Future<Output = sp_blockchain::Result<()>> + Send + 'static>
where
Block: BlockT,
Expand All @@ -813,6 +824,7 @@ where
&segment_headers_store,
&subspace_link,
client.as_ref(),
create_object_mappings,
)?)
} else {
None
Expand All @@ -826,7 +838,12 @@ where
Ok(async move {
let archiver = match maybe_archiver {
Some(archiver) => archiver,
None => initialize_archiver(&segment_headers_store, &subspace_link, client.as_ref())?,
None => initialize_archiver(
&segment_headers_store,
&subspace_link,
client.as_ref(),
create_object_mappings,
)?,
};
let confirmation_depth_k = subspace_link.chain_constants.confirmation_depth_k().into();

Expand Down Expand Up @@ -864,9 +881,9 @@ where
"Checking if block needs to be skipped"
);

// TODO: turn this into a command-line argument named `--full-mapping`
let skip_last_archived_blocks = last_archived_block_number > block_number_to_archive
&& !cfg!(feature = "full-archive");
// Skip archived blocks, unless we're producing object mappings for the full history
let skip_last_archived_blocks =
last_archived_block_number > block_number_to_archive && !create_object_mappings;
if best_archived_block_number >= block_number_to_archive || skip_last_archived_blocks {
// This block was already archived, skip
debug!(
Expand All @@ -887,7 +904,12 @@ where
InitializedArchiver {
archiver,
best_archived_block: (best_archived_block_hash, best_archived_block_number),
} = initialize_archiver(&segment_headers_store, &subspace_link, client.as_ref())?;
} = initialize_archiver(
&segment_headers_store,
&subspace_link,
client.as_ref(),
create_object_mappings,
)?;

if best_archived_block_number + One::one() == block_number_to_archive {
// As expected, can continue now
Expand Down Expand Up @@ -928,6 +950,7 @@ where
subspace_link.archived_segment_notification_sender.clone(),
best_archived_block_hash,
block_number_to_archive,
create_object_mappings,
)
.await?;
}
Expand All @@ -948,6 +971,7 @@ async fn archive_block<Block, Backend, Client, AS, SO>(
archived_segment_notification_sender: SubspaceNotificationSender<ArchivedSegmentNotification>,
best_archived_block_hash: Block::Hash,
block_number_to_archive: NumberFor<Block>,
create_object_mappings: bool,
) -> sp_blockchain::Result<(Block::Hash, NumberFor<Block>)>
where
Block: BlockT,
Expand Down Expand Up @@ -992,14 +1016,18 @@ where
)));
}

let block_object_mappings = client
.runtime_api()
.extract_block_object_mapping(parent_block_hash, block.block.clone())
.map_err(|error| {
sp_blockchain::Error::Application(
format!("Failed to retrieve block object mappings: {error}").into(),
)
})?;
let block_object_mappings = if create_object_mappings {
client
.runtime_api()
.extract_block_object_mapping(parent_block_hash, block.block.clone())
.map_err(|error| {
sp_blockchain::Error::Application(
format!("Failed to retrieve block object mappings: {error}").into(),
)
})?
} else {
BlockObjectMapping::default()
};

let encoded_block = encode_block(block);
debug!(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,7 @@ fn main() -> Result<(), Error> {
base: consensus_chain_config,
// Domain node needs slots notifications for bundle production.
force_new_slot_notifications: true,
create_object_mappings: true,
subspace_networking: SubspaceNetworking::Create { config: dsn_config },
dsn_piece_getter: None,
sync: Default::default(),
Expand Down
11 changes: 11 additions & 0 deletions crates/subspace-node/src/commands/run/consensus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -333,6 +333,7 @@ pub(super) struct ConsensusChainOptions {
/// * `--tmp` (unless `--base-path` specified explicitly)
/// * `--force-synced`
/// * `--force-authoring`
/// * `--create-object-mappings`
/// * `--allow-private-ips`
/// * `--rpc-cors all` (unless specified explicitly)
/// * `--dsn-disable-bootstrap-on-start`
Expand Down Expand Up @@ -389,6 +390,13 @@ pub(super) struct ConsensusChainOptions {
#[arg(long)]
force_authoring: bool,

/// Create object mappings for new blocks, and blocks that have already been archived.
/// By default, mappings are not created for any blocks.
///
/// --dev mode enables this option automatically.
#[arg(long)]
create_object_mappings: bool,

/// External entropy, used initially when PoT chain starts to derive the first seed
#[arg(long)]
pot_external_entropy: Option<String>,
Expand Down Expand Up @@ -446,6 +454,7 @@ pub(super) fn create_consensus_chain_configuration(
pool_config,
mut force_synced,
mut force_authoring,
mut create_object_mappings,
pot_external_entropy,
dsn_options,
storage_monitor,
Expand All @@ -466,6 +475,7 @@ pub(super) fn create_consensus_chain_configuration(
tmp = true;
force_synced = true;
force_authoring = true;
create_object_mappings = true;
network_options.allow_private_ips = true;
timekeeper_options.timekeeper = true;

Expand Down Expand Up @@ -677,6 +687,7 @@ pub(super) fn create_consensus_chain_configuration(
base: consensus_chain_config,
// Domain node needs slots notifications for bundle production.
force_new_slot_notifications: domains_enabled,
create_object_mappings,
subspace_networking: SubspaceNetworking::Create { config: dsn_config },
dsn_piece_getter: None,
sync,
Expand Down
2 changes: 2 additions & 0 deletions crates/subspace-service/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,8 @@ pub struct SubspaceConfiguration {
/// Whether slot notifications need to be present even if node is not responsible for block
/// authoring.
pub force_new_slot_notifications: bool,
/// Create object mappings for new blocks, and blocks that have already been archived.
pub create_object_mappings: bool,
/// Subspace networking (DSN).
pub subspace_networking: SubspaceNetworking,
/// DSN piece getter
Expand Down
1 change: 1 addition & 0 deletions crates/subspace-service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1043,6 +1043,7 @@ where
client.clone(),
sync_oracle.clone(),
telemetry.as_ref().map(|telemetry| telemetry.handle()),
config.create_object_mappings,
)
})
.map_err(ServiceError::Client)?;
Expand Down
Loading