Skip to content

Commit

Permalink
Resolve #360 (#456)
Browse files Browse the repository at this point in the history
* Remove NetworkId from processor-messages

Because intent binds to the sender/receiver, it's not needed for intent.

The processor knows what the network is.

The coordinator knows which to use because it's sending this message to the
processor for that network.

Also removes the unused zeroize.

* ProcessorMessage::Completed use Session instead of key

* Move SubstrateSignId to Session

* Finish replacing key with session
  • Loading branch information
kayabaNerve authored Nov 26, 2023
1 parent b79cf8a commit 571195b
Show file tree
Hide file tree
Showing 31 changed files with 304 additions and 455 deletions.
1 change: 0 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 0 additions & 2 deletions common/db/src/create_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,12 +51,10 @@ macro_rules! create_db {
($($arg),*).encode()
)
}
#[allow(dead_code)]
pub fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {
let key = $field_name::key($($arg),*);
txn.put(&key, borsh::to_vec(data).unwrap());
}
#[allow(dead_code)]
pub fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {
getter.get($field_name::key($($arg),*)).map(|data| {
borsh::from_slice(data.as_ref()).unwrap()
Expand Down
8 changes: 0 additions & 8 deletions coordinator/src/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,18 +34,12 @@ impl<D: Db> MainDb<D> {
getter.get(Self::handled_message_key(network, id)).is_some()
}

fn in_tributary_key(set: ValidatorSet) -> Vec<u8> {
Self::main_key(b"in_tributary", set.encode())
}
fn active_tributaries_key() -> Vec<u8> {
Self::main_key(b"active_tributaries", [])
}
fn retired_tributary_key(set: ValidatorSet) -> Vec<u8> {
Self::main_key(b"retired_tributary", set.encode())
}
pub fn in_tributary<G: Get>(getter: &G, set: ValidatorSet) -> bool {
getter.get(Self::in_tributary_key(set)).is_some()
}
pub fn active_tributaries<G: Get>(getter: &G) -> (Vec<u8>, Vec<TributarySpec>) {
let bytes = getter.get(Self::active_tributaries_key()).unwrap_or(vec![]);
let mut bytes_ref: &[u8] = bytes.as_ref();
Expand All @@ -58,8 +52,6 @@ impl<D: Db> MainDb<D> {
(bytes, tributaries)
}
pub fn add_participating_in_tributary(txn: &mut D::Transaction<'_>, spec: &TributarySpec) {
txn.put(Self::in_tributary_key(spec.set()), []);

let key = Self::active_tributaries_key();
let (mut existing_bytes, existing) = Self::active_tributaries(txn);
for tributary in &existing {
Expand Down
103 changes: 34 additions & 69 deletions coordinator/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ pub mod processors;
use processors::Processors;

mod substrate;
use substrate::{CosignTransactions, SubstrateDb};
use substrate::CosignTransactions;

mod cosign_evaluator;
use cosign_evaluator::CosignEvaluator;
Expand Down Expand Up @@ -116,7 +116,7 @@ async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
.send(
set.network,
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
id: processor_messages::key_gen::KeyGenId { set, attempt: 0 },
id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 },
params: frost::ThresholdParams::new(spec.t(), spec.n(), our_i.start).unwrap(),
shares: u16::from(our_i.end) - u16::from(our_i.start),
},
Expand Down Expand Up @@ -195,66 +195,50 @@ async fn handle_processor_message<D: Db, P: P2p>(
// We'll only receive these if we fired GenerateKey, which we'll only do if if we're
// in-set, making the Tributary relevant
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
key_gen::ProcessorMessage::Commitments { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::InvalidCommitments { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::Shares { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::InvalidShare { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::Blame { id, .. } => Some(id.set.session),
key_gen::ProcessorMessage::Commitments { id, .. } => Some(id.session),
key_gen::ProcessorMessage::InvalidCommitments { id, .. } => Some(id.session),
key_gen::ProcessorMessage::Shares { id, .. } => Some(id.session),
key_gen::ProcessorMessage::InvalidShare { id, .. } => Some(id.session),
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => Some(id.session),
key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session),
},
// TODO: Review replacing key with Session in messages?
ProcessorMessage::Sign(inner_msg) => match inner_msg {
// We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing
sign::ProcessorMessage::InvalidParticipant { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
sign::ProcessorMessage::Preprocess { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
sign::ProcessorMessage::Share { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
sign::ProcessorMessage::InvalidParticipant { id, .. } => Some(id.session),
sign::ProcessorMessage::Preprocess { id, .. } => Some(id.session),
sign::ProcessorMessage::Share { id, .. } => Some(id.session),
// While the Processor's Scanner will always emit Completed, that's routed through the
// Signer and only becomes a ProcessorMessage::Completed if the Signer is present and
// confirms it
sign::ProcessorMessage::Completed { key, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, key).unwrap())
}
sign::ProcessorMessage::Completed { session, .. } => Some(*session),
},
ProcessorMessage::Coordinator(inner_msg) => match inner_msg {
// This is a special case as it's relevant to *all* Tributaries for this network
// This is a special case as it's relevant to *all* Tributaries for this network we're
// signing in
// It doesn't return a Tributary to become `relevant_tributary` though
coordinator::ProcessorMessage::SubstrateBlockAck { network, block, plans } => {
assert_eq!(
*network, msg.network,
"processor claimed to be a different network than it was for SubstrateBlockAck",
);

coordinator::ProcessorMessage::SubstrateBlockAck { block, plans } => {
// Get the sessions for these keys
let keys = plans.iter().map(|plan| plan.key.clone()).collect::<HashSet<_>>();
let mut sessions = vec![];
for key in keys {
let session = SubstrateDb::<D>::session_for_key(&txn, &key).unwrap();
// Only keep them if we're in the Tributary AND they haven't been retied
let set = ValidatorSet { network: *network, session };
if MainDb::<D>::in_tributary(&txn, set) && (!MainDb::<D>::is_tributary_retired(&txn, set))
{
sessions.push((session, key));
}
}
let sessions = plans
.iter()
.map(|plan| plan.session)
.filter(|session| {
!MainDb::<D>::is_tributary_retired(&txn, ValidatorSet { network, session: *session })
})
.collect::<HashSet<_>>();

// Ensure we have the Tributaries
for (session, _) in &sessions {
for session in &sessions {
if !tributaries.contains_key(session) {
return false;
}
}

for (session, key) in sessions {
for session in sessions {
let tributary = &tributaries[&session];
let plans = plans
.iter()
.filter_map(|plan| Some(plan.id).filter(|_| plan.key == key))
.filter_map(|plan| Some(plan.id).filter(|_| plan.session == session))
.collect::<Vec<_>>();
PlanIds::set(&mut txn, &tributary.spec.genesis(), *block, &plans);

Expand Down Expand Up @@ -286,18 +270,10 @@ async fn handle_processor_message<D: Db, P: P2p>(
None
}
// We'll only fire these if we are the Substrate signer, making the Tributary relevant
coordinator::ProcessorMessage::InvalidParticipant { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
coordinator::ProcessorMessage::CosignPreprocess { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
coordinator::ProcessorMessage::BatchPreprocess { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
coordinator::ProcessorMessage::SubstrateShare { id, .. } => {
Some(SubstrateDb::<D>::session_for_key(&txn, &id.key).unwrap())
}
coordinator::ProcessorMessage::InvalidParticipant { id, .. } => Some(id.session),
coordinator::ProcessorMessage::CosignPreprocess { id, .. } => Some(id.session),
coordinator::ProcessorMessage::BatchPreprocess { id, .. } => Some(id.session),
coordinator::ProcessorMessage::SubstrateShare { id, .. } => Some(id.session),
coordinator::ProcessorMessage::CosignedBlock { block_number, block, signature } => {
let cosigned_block = CosignedBlock {
network,
Expand Down Expand Up @@ -462,11 +438,6 @@ async fn handle_processor_message<D: Db, P: P2p>(
}]
}
key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {
assert_eq!(
id.set.network, msg.network,
"processor claimed to be a different network than it was for in InvalidShare",
);

// Check if the MuSig signature had any errors as if so, we need to provide
// RemoveParticipant
// As for the safety of calling error_generating_key_pair, the processor is presumed
Expand All @@ -490,11 +461,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
txs
}
key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => {
assert_eq!(
id.set.network, msg.network,
"processor claimed to be a different network than it was for in GeneratedKeyPair",
);
// TODO2: Also check the other KeyGenId fields
// TODO2: Check the KeyGenId fields

// Tell the Tributary the key pair, get back the share for the MuSig signature
let share = crate::tributary::generated_key_pair::<D>(
Expand All @@ -514,11 +481,9 @@ async fn handle_processor_message<D: Db, P: P2p>(
}
}
}
key_gen::ProcessorMessage::Blame { id, participant } => {
assert_eq!(
id.set.network, msg.network,
"processor claimed to be a different network than it was for in Blame",
);
// This is a response to the ordered VerifyBlame, which is why this satisfies the provided
// transaction's needs to be perfectly ordered
key_gen::ProcessorMessage::Blame { id: _, participant } => {
vec![Transaction::RemoveParticipant(participant)]
}
},
Expand Down Expand Up @@ -556,7 +521,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
signed: Transaction::empty_signed(),
})]
}
sign::ProcessorMessage::Completed { key: _, id, tx } => {
sign::ProcessorMessage::Completed { session: _, id, tx } => {
let r = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
#[allow(non_snake_case)]
let R = <Ristretto as Ciphersuite>::generator() * r.deref();
Expand Down
26 changes: 4 additions & 22 deletions coordinator/src/substrate/db.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
use scale::{Encode, Decode};

pub use serai_db::*;
use scale::Encode;

use serai_client::{
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
validator_sets::primitives::{Session, ValidatorSet},
};

pub use serai_db::*;

create_db! {
SubstrateDb {
CosignTriggered: () -> (),
Expand Down Expand Up @@ -86,24 +86,6 @@ impl<D: Db> SubstrateDb<D> {
txn.put(Self::event_key(&id, index), []);
}

fn session_key(key: &[u8]) -> Vec<u8> {
Self::substrate_key(b"session", key)
}
pub fn session_for_key<G: Get>(getter: &G, key: &[u8]) -> Option<Session> {
getter.get(Self::session_key(key)).map(|bytes| Session::decode(&mut bytes.as_ref()).unwrap())
}
pub fn save_session_for_keys(txn: &mut D::Transaction<'_>, key_pair: &KeyPair, session: Session) {
let session = session.encode();
let key_0 = Self::session_key(&key_pair.0);
let existing = txn.get(&key_0);
// This may trigger if 100% of a DKG are malicious, and they create a key equivalent to a prior
// key. Since it requires 100% maliciousness, not just 67% maliciousness, this will only assert
// in a modified-to-be-malicious stack, making it safe
assert!(existing.is_none() || (existing.as_ref() == Some(&session)));
txn.put(key_0, session.clone());
txn.put(Self::session_key(&key_pair.1), session);
}

fn batch_instructions_key(network: NetworkId, id: u32) -> Vec<u8> {
Self::substrate_key(b"batch", (network, id).encode())
}
Expand Down
21 changes: 4 additions & 17 deletions coordinator/src/substrate/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ use tokio::{sync::mpsc, time::sleep};
use crate::{
Db,
processors::Processors,
tributary::{TributarySpec, SeraiBlockNumber, KeyPairDb},
tributary::{TributarySpec, SeraiBlockNumber},
};

mod db;
Expand Down Expand Up @@ -116,19 +116,13 @@ async fn handle_new_set<D: Db>(
Ok(())
}

async fn handle_key_gen<D: Db, Pro: Processors>(
db: &mut D,
async fn handle_key_gen<Pro: Processors>(
processors: &Pro,
serai: &Serai,
block: &Block,
set: ValidatorSet,
key_pair: KeyPair,
) -> Result<(), SeraiError> {
// This has to be saved *before* we send ConfirmKeyPair
let mut txn = db.txn();
SubstrateDb::<D>::save_session_for_keys(&mut txn, &key_pair, set.session);
txn.commit();

processors
.send(
set.network,
Expand All @@ -144,7 +138,7 @@ async fn handle_key_gen<D: Db, Pro: Processors>(
// block which has a time greater than or equal to the Serai time
.unwrap_or(BlockHash([0; 32])),
},
set,
session: set.session,
key_pair,
},
)
Expand Down Expand Up @@ -232,7 +226,6 @@ async fn handle_batch_and_burns<D: Db, Pro: Processors>(
serai_time: block.time().unwrap() / 1000,
network_latest_finalized_block,
},
network,
block: block.number(),
burns: burns.remove(&network).unwrap(),
batches: batches.remove(&network).unwrap(),
Expand Down Expand Up @@ -291,13 +284,7 @@ async fn handle_block<D: Db, Pro: Processors>(
if !SubstrateDb::<D>::handled_event(&db.0, hash, event_id) {
log::info!("found fresh key gen event {:?}", key_gen);
if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen {
// Immediately ensure this key pair is accessible to the tributary, before we fire any
// events off of it
let mut txn = db.0.txn();
KeyPairDb::set(&mut txn, set, &key_pair);
txn.commit();

handle_key_gen(&mut db.0, processors, serai, &block, set, key_pair).await?;
handle_key_gen(processors, serai, &block, set, key_pair).await?;
} else {
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
}
Expand Down
8 changes: 4 additions & 4 deletions coordinator/src/tests/tributary/dkg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ async fn dkg_test() {
assert_eq!(
msgs.pop_front().unwrap(),
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { set: spec.set(), attempt: 0 },
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
Expand All @@ -150,7 +150,7 @@ async fn dkg_test() {
assert_eq!(
msgs.pop_front().unwrap(),
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { set: spec.set(), attempt: 0 },
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
Expand Down Expand Up @@ -214,7 +214,7 @@ async fn dkg_test() {
// Each scanner should emit a distinct shares message
let shares_for = |i: usize| {
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
id: KeyGenId { set: spec.set(), attempt: 0 },
id: KeyGenId { session: spec.set().session, attempt: 0 },
shares: vec![txs
.iter()
.enumerate()
Expand Down Expand Up @@ -267,7 +267,7 @@ async fn dkg_test() {
assert_eq!(
msgs.pop_front().unwrap(),
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { set: spec.set(), attempt: 0 },
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
Expand Down
3 changes: 1 addition & 2 deletions coordinator/src/tributary/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use zeroize::Zeroizing;
use ciphersuite::{Ciphersuite, Ristretto, group::GroupEncoding};
use frost::Participant;

use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair};
use serai_client::validator_sets::primitives::KeyPair;

use processor_messages::coordinator::SubstrateSignableId;

Expand Down Expand Up @@ -50,7 +50,6 @@ create_db!(
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
CurrentlyCompletingKeyPair: (genesis: [u8; 32]) -> KeyPair,
KeyPairDb: (set: ValidatorSet) -> KeyPair,
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
Expand Down
Loading

0 comments on commit 571195b

Please sign in to comment.