Skip to content

Commit

Permalink
pass timeouts and signer key ids in config for byzantine coordinators…
Browse files Browse the repository at this point in the history
…; return errors if nonce gathering timeouts, or if we no longer have a threshold of non-malicious signers
  • Loading branch information
xoloki committed Oct 27, 2023
1 parent 66e346d commit 609c12e
Show file tree
Hide file tree
Showing 3 changed files with 197 additions and 16 deletions.
136 changes: 124 additions & 12 deletions src/state_machine/coordinator/fire.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use crate::{
},
state_machine::{
coordinator::{Config, Coordinator as CoordinatorTrait, Error, State},
OperationResult, StateMachine,
OperationResult, SignError, StateMachine,
},
taproot::SchnorrProof,
traits::Aggregator as AggregatorTrait,
Expand Down Expand Up @@ -53,6 +53,8 @@ pub struct Coordinator<Aggregator: AggregatorTrait> {
aggregator: Aggregator,
nonce_start: Option<Instant>,
sign_start: Option<Instant>,
malicious_signer_ids: HashSet<u32>,
malicious_key_ids: HashSet<u32>,
}

impl<Aggregator: AggregatorTrait> Coordinator<Aggregator> {
Expand All @@ -70,14 +72,46 @@ impl<Aggregator: AggregatorTrait> Coordinator<Aggregator> {
State::NonceGather(_is_taproot, _merkle_root) => {
if let Some(start) = self.nonce_start {
if let Some(timeout) = self.config.nonce_timeout {
if now.duration_since(start) > timeout {}
if now.duration_since(start) > timeout {
warn!("Timeout gathering nonces");
return Ok((
None,
Some(OperationResult::SignError(SignError::NonceTimeout)),
));
}
}
}
}
State::SigShareGather(_is_taproot, _merkle_root) => {
State::SigShareGather(is_taproot, merkle_root) => {
if let Some(start) = self.sign_start {
if let Some(timeout) = self.config.sign_timeout {
if now.duration_since(start) > timeout {}
if now.duration_since(start) > timeout {
warn!("Timeout gathering signature shares for signing round {} iteration {}", self.current_sign_id, self.current_sign_iter_id);
for signer_id in &self.sign_wait_signer_ids {
warn!("Mark signer {} as malicious", signer_id);
self.malicious_signer_ids.insert(*signer_id);
for key_id in &self.config.signer_key_ids[signer_id] {
debug!("Mark key {} as malicious", key_id);
self.malicious_key_ids.insert(*key_id);
}
}

if self.config.num_keys - (self.malicious_key_ids.len() as u32)
< self.config.threshold
{
warn!("Too many malicious signers to continue");
return Ok((
None,
Some(OperationResult::SignError(
SignError::InsufficientSigners,
)),
));
}

self.move_to(State::NonceRequest(is_taproot, merkle_root))?;
let packet = self.request_nonces(is_taproot, merkle_root)?;
return Ok((Some(packet), None));
}
}
}
}
Expand Down Expand Up @@ -291,8 +325,9 @@ impl<Aggregator: AggregatorTrait> Coordinator<Aggregator> {
self.public_nonces.clear();
self.nonce_recv_key_ids.clear();
self.sign_wait_signer_ids.clear();
self.current_sign_iter_id = self.current_sign_iter_id.wrapping_add(1);
info!(
"Sign Round {} Nonce round {} Requesting Nonces",
"Sign round {} iteration {} Requesting Nonces",
self.current_sign_id, self.current_sign_iter_id,
);
let nonce_request = NonceRequest {
Expand Down Expand Up @@ -335,6 +370,21 @@ impl<Aggregator: AggregatorTrait> Coordinator<Aggregator> {
));
}

if self
.malicious_signer_ids
.contains(&nonce_response.signer_id)
{
info!(
"Sign round {} iteration {} received malicious NonceResponse from signer {} ({}/{})",
nonce_response.sign_id,
nonce_response.sign_iter_id,
nonce_response.signer_id,
self.nonce_recv_key_ids.len(),
self.config.threshold,
);
return Err(Error::MaliciousSigner(nonce_response.signer_id));
}

self.public_nonces
.insert(nonce_response.signer_id, nonce_response.clone());
self.sign_wait_signer_ids.insert(nonce_response.signer_id);
Expand All @@ -344,7 +394,7 @@ impl<Aggregator: AggregatorTrait> Coordinator<Aggregator> {
self.nonce_recv_key_ids.insert(*key_id);
}
info!(
"Sign round {} nonce round {} received NonceResponse from signer {} ({}/{})",
"Sign round {} iteration {} received NonceResponse from signer {} ({}/{})",
nonce_response.sign_id,
nonce_response.sign_iter_id,
nonce_response.signer_id,
Expand Down Expand Up @@ -546,8 +596,10 @@ impl<Aggregator: AggregatorTrait> StateMachine<State, Error> for Coordinator<Agg
}
State::DkgPrivateDistribute => prev_state == &State::DkgPublicGather,
State::DkgEndGather => prev_state == &State::DkgPrivateDistribute,
State::NonceRequest(_, _) => {
prev_state == &State::Idle || prev_state == &State::DkgEndGather
State::NonceRequest(is_taproot, merkle_root) => {
prev_state == &State::Idle
|| prev_state == &State::DkgEndGather
|| prev_state == &State::SigShareGather(*is_taproot, *merkle_root)
}
State::NonceGather(is_taproot, merkle_root) => {
prev_state == &State::NonceRequest(*is_taproot, *merkle_root)
Expand Down Expand Up @@ -597,6 +649,8 @@ impl<Aggregator: AggregatorTrait> CoordinatorTrait for Coordinator<Aggregator> {
state: State::Idle,
nonce_start: None,
sign_start: None,
malicious_signer_ids: Default::default(),
malicious_key_ids: Default::default(),
}
}

Expand Down Expand Up @@ -685,6 +739,8 @@ impl<Aggregator: AggregatorTrait> CoordinatorTrait for Coordinator<Aggregator> {
self.nonce_recv_key_ids.clear();
self.sign_recv_key_ids.clear();
self.sign_wait_signer_ids.clear();
self.nonce_start = None;
self.sign_start = None;
}
}

Expand All @@ -697,16 +753,17 @@ pub mod test {
fire::Coordinator as FireCoordinator,
test::{
coordinator_state_machine, feedback_messages, new_coordinator,
process_inbound_messages, setup, start_dkg_round,
process_inbound_messages, setup, setup_with_timeouts, start_dkg_round,
},
Config, Coordinator as CoordinatorTrait, State,
},
OperationResult,
OperationResult, SignError,
},
traits::{Aggregator as AggregatorTrait, Signer as SignerTrait},
v1, v2, Point, Scalar,
};
use rand_core::OsRng;
use std::{thread, time::Duration};

#[test]
fn new_coordinator_v1() {
Expand Down Expand Up @@ -1015,7 +1072,12 @@ pub mod test {
let num_signers = 5;
let keys_per_signer = 2;
let (mut coordinator, mut signers) =
setup::<FireCoordinator<Aggregator>, Signer>(num_signers, keys_per_signer);
setup_with_timeouts::<FireCoordinator<Aggregator>, Signer>(
num_signers,
keys_per_signer,
Some(Duration::from_millis(128)),
Some(Duration::from_millis(128)),
);
let config = coordinator.get_config();

// We have started a dkg round
Expand Down Expand Up @@ -1092,6 +1154,27 @@ pub mod test {

assert!(outbound_messages.is_empty());

// Sleep long enough to hit the timeout
thread::sleep(Duration::from_millis(256));

let (outbound_messages, operation_results) = insufficient_coordinator
.process_inbound_messages(&[])
.unwrap();

assert!(outbound_messages.is_empty());
assert_eq!(operation_results.len(), 1);
assert_eq!(
insufficient_coordinator.state,
State::NonceGather(is_taproot, merkle_root)
);
match &operation_results[0] {
OperationResult::SignError(sign_error) => match sign_error {
SignError::NonceTimeout => {}
_ => panic!("Expected SignError::NonceTimeout"),
},
_ => panic!("Expected OperationResult::SignError"),
}

// Start a new signing round with a sufficient number of signers for nonces but not sig shares
let mut insufficient_coordinator = coordinator.clone();
let mut insufficient_signers = signers.clone();
Expand All @@ -1118,9 +1201,10 @@ pub mod test {

assert_eq!(outbound_messages.len(), 1);

let mut malicious = Vec::new();
// now remove signers so the number is insufficient
for _ in 0..num_signers_to_remove {
insufficient_signers.pop();
malicious.push(insufficient_signers.pop());
}

// Send the SignatureShareRequest message to all signers and share their responses with the coordinator and signers
Expand All @@ -1136,5 +1220,33 @@ pub mod test {
insufficient_coordinator.state,
State::SigShareGather(is_taproot, merkle_root)
);

// Sleep long enough to hit the timeout
thread::sleep(Duration::from_millis(256));

let (outbound_messages, operation_results) = insufficient_coordinator
.process_inbound_messages(&[])
.unwrap();

assert_eq!(outbound_messages.len(), 1);
assert_eq!(operation_results.len(), 0);
assert_eq!(
insufficient_coordinator.state,
State::NonceGather(is_taproot, merkle_root)
);

// Send the NonceRequest message to all signers and share their responses with the coordinator and signers
let (outbound_messages, operation_results) = feedback_messages(
&mut insufficient_coordinator,
&mut insufficient_signers,
&outbound_messages,
);
assert!(outbound_messages.is_empty());
assert!(operation_results.is_empty());

assert_eq!(
insufficient_coordinator.state,
State::NonceGather(is_taproot, merkle_root)
);
}
}
39 changes: 36 additions & 3 deletions src/state_machine/coordinator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use crate::{
common::MerkleRoot, errors::AggregatorError, net::Packet, state_machine::OperationResult,
Point, Scalar,
};
use hashbrown::{HashMap, HashSet};
use std::time::Duration;

#[derive(Clone, Debug, PartialEq)]
Expand Down Expand Up @@ -42,6 +43,9 @@ pub enum Error {
/// A bad sign_iter_id in received message
#[error("Bad sign_iter_id: got {0} expected {1}")]
BadSignIterId(u64, u64),
/// A malicious signer sent the received message
#[error("Malicious signer {0}")]
MaliciousSigner(u32),
/// SignatureAggregator error
#[error("Aggregator: {0}")]
Aggregator(AggregatorError),
Expand Down Expand Up @@ -80,6 +84,8 @@ pub struct Config {
pub nonce_timeout: Option<Duration>,
/// timeout to gather signature shares
pub sign_timeout: Option<Duration>,
/// map of signer_id to controlled key_ids
pub signer_key_ids: HashMap<u32, HashSet<u32>>,
}

impl Config {
Expand All @@ -97,6 +103,7 @@ impl Config {
message_private_key,
nonce_timeout: None,
sign_timeout: None,
signer_key_ids: Default::default(),
}
}

Expand All @@ -108,6 +115,7 @@ impl Config {
message_private_key: Scalar,
nonce_timeout: Option<Duration>,
sign_timeout: Option<Duration>,
signer_key_ids: HashMap<u32, HashSet<u32>>,
) -> Self {
Config {
num_signers,
Expand All @@ -116,6 +124,7 @@ impl Config {
message_private_key,
nonce_timeout,
sign_timeout,
signer_key_ids,
}
}
}
Expand Down Expand Up @@ -166,9 +175,12 @@ pub mod fire;

#[cfg(test)]
pub mod test {
use hashbrown::HashMap;
use hashbrown::{HashMap, HashSet};
use rand_core::OsRng;
use std::sync::atomic::{AtomicBool, Ordering};
use std::{
sync::atomic::{AtomicBool, Ordering},
time::Duration,
};
use tracing_subscriber::{fmt, prelude::*, EnvFilter};

use crate::{
Expand Down Expand Up @@ -265,6 +277,15 @@ pub mod test {
pub fn setup<Coordinator: CoordinatorTrait, SignerType: SignerTrait>(
num_signers: u32,
keys_per_signer: u32,
) -> (Coordinator, Vec<Signer<SignerType>>) {
setup_with_timeouts::<Coordinator, SignerType>(num_signers, keys_per_signer, None, None)
}

pub fn setup_with_timeouts<Coordinator: CoordinatorTrait, SignerType: SignerTrait>(
num_signers: u32,
keys_per_signer: u32,
nonce_timeout: Option<Duration>,
sign_timeout: Option<Duration>,
) -> (Coordinator, Vec<Signer<SignerType>>) {
unsafe {
if let Ok(false) =
Expand All @@ -290,16 +311,20 @@ pub mod test {
let mut key_id: u32 = 0;
let mut signer_ids_map = HashMap::new();
let mut signer_key_ids = HashMap::new();
let mut signer_key_ids_set = HashMap::new();
let mut key_ids_map = HashMap::new();
for (i, (_private_key, public_key)) in key_pairs.iter().enumerate() {
let mut key_ids = Vec::new();
let mut key_ids_set = HashSet::new();
for _ in 0..keys_per_signer {
key_ids_map.insert(key_id + 1, *public_key);
key_ids.push(key_id);
key_ids_set.insert(key_id);
key_id += 1;
}
signer_ids_map.insert(i as u32, *public_key);
signer_key_ids.insert(i as u32, key_ids);
signer_key_ids_set.insert(i as u32, key_ids_set);
}
let public_keys = PublicKeys {
signers: signer_ids_map,
Expand All @@ -321,7 +346,15 @@ pub mod test {
)
})
.collect::<Vec<Signer<SignerType>>>();
let config = Config::new(num_signers, num_keys, threshold, key_pairs[0].0);
let config = Config::with_timeouts(
num_signers,
num_keys,
threshold,
key_pairs[0].0,
nonce_timeout,
sign_timeout,
signer_key_ids_set,
);
let coordinator = Coordinator::new(config);
(coordinator, signers)
}
Expand Down
Loading

0 comments on commit 609c12e

Please sign in to comment.