Merge pull request #4778 from stacks-network/chore/signer-traits

Pull current stacks signer out into v1 implementation and create placeholder v0 mod
This commit is contained in:
jferrant
2024-05-14 18:00:25 +00:00
committed by GitHub
14 changed files with 338 additions and 242 deletions

View File

@@ -86,8 +86,9 @@ jobs:
- tests::signer::stackerdb_sign_request_rejected
- tests::signer::stackerdb_block_proposal
- tests::signer::stackerdb_filter_bad_transactions
- tests::signer::stackerdb_mine_2_nakamoto_reward_cycles
- tests::signer::stackerdb_sign_after_signer_reboot
# TODO: enable these once v1 signer is fixed
# - tests::signer::stackerdb_mine_2_nakamoto_reward_cycles
# - tests::signer::stackerdb_sign_after_signer_reboot
- tests::nakamoto_integrations::stack_stx_burn_op_integration_test
- tests::signer::stackerdb_delayed_dkg
# Do not run this one until we figure out why it fails in CI

View File

@@ -95,6 +95,7 @@ pub trait SignerSession {
}
/// signer session for a stackerdb instance
#[derive(Debug)]
pub struct StackerDBSession {
/// host we're talking to
pub host: String,

View File

@@ -141,7 +141,6 @@ pub(crate) mod tests {
use super::*;
use crate::config::{GlobalConfig, SignerConfig};
use crate::signer::SignerSlotID;
pub struct MockServerClient {
pub server: TcpListener,

View File

@@ -28,9 +28,19 @@ use wsts::net::Packet;
use super::ClientError;
use crate::client::retry_with_exponential_backoff;
use crate::config::SignerConfig;
use crate::signer::SignerSlotID;
/// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID
#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)]
pub struct SignerSlotID(pub u32);
impl std::fmt::Display for SignerSlotID {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
/// The StackerDB client for communicating with the .signers contract
#[derive(Debug)]
pub struct StackerDB {
/// The stacker-db sessions for each signer set and message type.
/// Maps message ID to the DB session.

View File

@@ -31,7 +31,7 @@ use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPu
use stacks_common::types::PrivateKey;
use wsts::curve::scalar::Scalar;
use crate::signer::SignerSlotID;
use crate::client::SignerSlotID;
const EVENT_TIMEOUT_MS: u64 = 5000;
// Default transaction fee to use in microstacks (if unspecificed in the config file)

View File

@@ -26,14 +26,46 @@ pub mod cli;
pub mod client;
/// The configuration module for the signer
pub mod config;
/// The coordinator selector for the signer
pub mod coordinator;
/// The primary runloop for the signer
pub mod runloop;
/// The signer module for processing events
pub mod signer;
/// The state module for the signer
pub mod signerdb;
/// The monitoring server for the signer
pub mod monitoring;
/// The primary runloop for the signer
pub mod runloop;
/// The v0 implementation of the signer. This does not include WSTS support
pub mod v0;
/// The v1 implementation of the singer. This includes WSTS support
pub mod v1;
use std::fmt::{Debug, Display};
use std::sync::mpsc::Sender;
use libsigner::SignerEvent;
use wsts::state_machine::OperationResult;
use crate::client::StacksClient;
use crate::config::SignerConfig;
use crate::runloop::RunLoopCommand;
/// A trait which provides a common `Signer` interface for `v1` and `v2`
pub trait Signer: Debug + Display {
/// Create a new `Signer` instance
fn new(config: SignerConfig) -> Self;
/// Update the `Signer` instance's next reward cycle data with the latest `SignerConfig`
fn update_next_signer_data(&mut self, next_signer_config: &SignerConfig);
/// Get the reward cycle of the signer
fn reward_cycle(&self) -> u64;
/// Process an event
fn process_event(
&mut self,
stacks_client: &StacksClient,
event: Option<&SignerEvent>,
res: Sender<Vec<OperationResult>>,
current_reward_cycle: u64,
);
/// Process a command
fn process_command(
&mut self,
stacks_client: &StacksClient,
current_reward_cycle: u64,
command: Option<RunLoopCommand>,
);
}

View File

@@ -45,14 +45,15 @@ use stacks_signer::cli::{
};
use stacks_signer::config::GlobalConfig;
use stacks_signer::runloop::{RunLoop, RunLoopCommand};
use stacks_signer::v1;
use tracing_subscriber::prelude::*;
use tracing_subscriber::{fmt, EnvFilter};
use wsts::state_machine::OperationResult;
struct SpawnedSigner {
running_signer: RunningSigner<SignerEventReceiver, Vec<OperationResult>>,
cmd_send: Sender<RunLoopCommand>,
res_recv: Receiver<Vec<OperationResult>>,
_cmd_send: Sender<RunLoopCommand>,
_res_recv: Receiver<Vec<OperationResult>>,
}
/// Create a new stacker db session
@@ -82,21 +83,25 @@ fn spawn_running_signer(path: &PathBuf) -> SpawnedSigner {
let config = GlobalConfig::try_from(path).unwrap();
let endpoint = config.endpoint;
info!("Starting signer with config: {}", config);
let (cmd_send, cmd_recv) = channel();
let (res_send, res_recv) = channel();
let (_cmd_send, cmd_recv) = channel();
let (res_send, _res_recv) = channel();
let ev = SignerEventReceiver::new(config.network.is_mainnet());
#[cfg(feature = "monitoring_prom")]
{
stacks_signer::monitoring::start_serving_monitoring_metrics(config.clone()).ok();
}
let runloop = RunLoop::from(config);
let mut signer: Signer<RunLoopCommand, Vec<OperationResult>, RunLoop, SignerEventReceiver> =
Signer::new(runloop, ev, cmd_recv, res_send);
let runloop = RunLoop::new(config);
let mut signer: Signer<
RunLoopCommand,
Vec<OperationResult>,
RunLoop<v1::signer::Signer>,
SignerEventReceiver,
> = Signer::new(runloop, ev, cmd_recv, res_send);
let running_signer = signer.spawn(endpoint).unwrap();
SpawnedSigner {
running_signer,
cmd_send,
res_recv,
_cmd_send,
_res_recv,
}
}

View File

@@ -21,15 +21,32 @@ use blockstack_lib::burnchains::PoxConstants;
use blockstack_lib::chainstate::stacks::boot::SIGNERS_NAME;
use blockstack_lib::util_lib::boot::boot_code_id;
use hashbrown::HashMap;
use libsigner::{SignerEntries, SignerEvent, SignerRunLoop};
use libsigner::{BlockProposalSigners, SignerEntries, SignerEvent, SignerRunLoop};
use slog::{slog_debug, slog_error, slog_info, slog_warn};
use stacks_common::types::chainstate::StacksAddress;
use stacks_common::{debug, error, info, warn};
use wsts::common::MerkleRoot;
use wsts::state_machine::OperationResult;
use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient};
use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID, StacksClient};
use crate::config::{GlobalConfig, SignerConfig};
use crate::signer::{Command as SignerCommand, Signer, SignerSlotID};
use crate::Signer as SignerTrait;
/// Which signer operation to perform
#[derive(PartialEq, Clone, Debug)]
pub enum SignerCommand {
/// Generate a DKG aggregate public key
Dkg,
/// Sign a message
Sign {
/// The block to sign over
block_proposal: BlockProposalSigners,
/// Whether to make a taproot signature
is_taproot: bool,
/// Taproot merkle root
merkle_root: Option<MerkleRoot>,
},
}
/// Which operation to perform
#[derive(PartialEq, Clone, Debug)]
@@ -101,7 +118,7 @@ impl RewardCycleInfo {
}
/// The runloop for the stacks signer
pub struct RunLoop {
pub struct RunLoop<Signer: SignerTrait> {
/// Configuration info
pub config: GlobalConfig,
/// The stacks node client
@@ -117,9 +134,9 @@ pub struct RunLoop {
pub current_reward_cycle_info: Option<RewardCycleInfo>,
}
impl From<GlobalConfig> for RunLoop {
/// Creates new runloop from a config
fn from(config: GlobalConfig) -> Self {
impl<Signer: SignerTrait> RunLoop<Signer> {
/// Create a new signer runloop from the provided configuration
pub fn new(config: GlobalConfig) -> Self {
let stacks_client = StacksClient::from(&config);
Self {
config,
@@ -130,9 +147,6 @@ impl From<GlobalConfig> for RunLoop {
current_reward_cycle_info: None,
}
}
}
impl RunLoop {
/// Get the registered signers for a specific reward cycle
/// Returns None if no signers are registered or its not Nakamoto cycle
pub fn get_parsed_reward_set(
@@ -237,20 +251,14 @@ impl RunLoop {
let prior_reward_cycle = reward_cycle.saturating_sub(1);
let prior_reward_set = prior_reward_cycle % 2;
if let Some(signer) = self.stacks_signers.get_mut(&prior_reward_set) {
if signer.reward_cycle == prior_reward_cycle {
if signer.reward_cycle() == prior_reward_cycle {
// The signers have been calculated for the next reward cycle. Update the current one
debug!("{signer}: Next reward cycle ({reward_cycle}) signer set calculated. Reconfiguring current reward cycle signer.");
signer.next_signer_addresses = new_signer_config
.signer_entries
.signer_ids
.keys()
.copied()
.collect();
signer.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone();
signer.update_next_signer_data(&new_signer_config);
}
}
}
let new_signer = Signer::from(new_signer_config);
let new_signer = Signer::new(new_signer_config);
info!("{new_signer} initialized.");
self.stacks_signers.insert(reward_index, new_signer);
} else {
@@ -318,7 +326,7 @@ impl RunLoop {
if self
.stacks_signers
.get(&(next_reward_cycle % 2))
.map(|signer| signer.reward_cycle != next_reward_cycle)
.map(|signer| signer.reward_cycle() != next_reward_cycle)
.unwrap_or(true)
{
info!("Received a new burnchain block height ({current_burn_block_height}) in the prepare phase of the next reward cycle ({next_reward_cycle}). Checking for signer registration...");
@@ -337,7 +345,7 @@ impl RunLoop {
fn cleanup_stale_signers(&mut self, current_reward_cycle: u64) {
let mut to_delete = Vec::new();
for (idx, signer) in &mut self.stacks_signers {
if signer.reward_cycle < current_reward_cycle {
if signer.reward_cycle() < current_reward_cycle {
debug!("{signer}: Signer's tenure has completed.");
to_delete.push(*idx);
continue;
@@ -349,7 +357,7 @@ impl RunLoop {
}
}
impl SignerRunLoop<Vec<OperationResult>, RunLoopCommand> for RunLoop {
impl<Signer: SignerTrait> SignerRunLoop<Vec<OperationResult>, RunLoopCommand> for RunLoop<Signer> {
fn set_event_timeout(&mut self, timeout: Duration) {
self.config.event_timeout = timeout;
}
@@ -399,58 +407,18 @@ impl SignerRunLoop<Vec<OperationResult>, RunLoopCommand> for RunLoop {
return None;
}
for signer in self.stacks_signers.values_mut() {
let event_parity = match event {
Some(SignerEvent::BlockValidationResponse(_)) => Some(current_reward_cycle % 2),
// Block proposal events do have reward cycles, but each proposal has its own cycle,
// and the vec could be heterogenous, so, don't differentiate.
Some(SignerEvent::MinerMessages(..))
| Some(SignerEvent::NewBurnBlock(_))
| Some(SignerEvent::StatusCheck)
| None => None,
Some(SignerEvent::SignerMessages(msg_parity, ..)) => {
Some(u64::from(msg_parity) % 2)
}
};
let other_signer_parity = (signer.reward_cycle + 1) % 2;
if event_parity == Some(other_signer_parity) {
continue;
}
if signer.approved_aggregate_public_key.is_none() {
if let Err(e) =
signer.refresh_dkg(&self.stacks_client, res.clone(), current_reward_cycle)
{
error!("{signer}: failed to refresh DKG: {e}");
}
}
signer.refresh_coordinator();
if let Err(e) = signer.process_event(
signer.process_event(
&self.stacks_client,
event.as_ref(),
res.clone(),
current_reward_cycle,
) {
error!("{signer}: errored processing event: {e}");
}
if let Some(command) = self.commands.pop_front() {
let reward_cycle = command.reward_cycle;
if signer.reward_cycle != reward_cycle {
warn!(
"{signer}: not registered for reward cycle {reward_cycle}. Ignoring command: {command:?}"
);
} else {
info!(
"{signer}: Queuing an external runloop command ({:?}): {command:?}",
signer
.state_machine
.public_keys
.signers
.get(&signer.signer_id)
);
signer.commands.push_back(command.command);
}
}
);
// After processing event, run the next command for each signer
signer.process_next_command(&self.stacks_client, current_reward_cycle);
signer.process_command(
&self.stacks_client,
current_reward_cycle,
self.commands.pop_front(),
);
}
None
}

View File

@@ -0,0 +1,15 @@
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
// Copyright (C) 2020-2024 Stacks Open Internet Foundation
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.

View File

@@ -0,0 +1,22 @@
// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation
// Copyright (C) 2020-2024 Stacks Open Internet Foundation
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
/// The coordinator selector for the signer
pub mod coordinator;
/// The signer module for processing events
pub mod signer;
/// The state module for the signer
pub mod signerdb;

View File

@@ -38,7 +38,7 @@ use stacks_common::types::chainstate::{ConsensusHash, StacksAddress};
use stacks_common::types::StacksEpochId;
use stacks_common::util::hash::Sha512Trunc256Sum;
use stacks_common::{debug, error, info, warn};
use wsts::common::{MerkleRoot, Signature};
use wsts::common::Signature;
use wsts::curve::keys::PublicKey;
use wsts::curve::point::Point;
use wsts::curve::scalar::Scalar;
@@ -52,20 +52,12 @@ use wsts::state_machine::{OperationResult, SignError};
use wsts::traits::Signer as _;
use wsts::v2;
use crate::client::{ClientError, StackerDB, StacksClient};
use crate::client::{ClientError, SignerSlotID, StackerDB, StacksClient};
use crate::config::SignerConfig;
use crate::coordinator::CoordinatorSelector;
use crate::signerdb::SignerDb;
/// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID
#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)]
pub struct SignerSlotID(pub u32);
impl std::fmt::Display for SignerSlotID {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
use crate::runloop::{RunLoopCommand, SignerCommand};
use crate::v1::coordinator::CoordinatorSelector;
use crate::v1::signerdb::SignerDb;
use crate::Signer as SignerTrait;
/// Additional Info about a proposed block
#[derive(Serialize, Deserialize, Debug, PartialEq)]
@@ -117,22 +109,6 @@ impl BlockInfo {
}
}
/// Which signer operation to perform
#[derive(PartialEq, Clone, Debug)]
pub enum Command {
/// Generate a DKG aggregate public key
Dkg,
/// Sign a message
Sign {
/// The block to sign over
block_proposal: BlockProposalSigners,
/// Whether to make a taproot signature
is_taproot: bool,
/// Taproot merkle root
merkle_root: Option<MerkleRoot>,
},
}
/// The specific operations that a signer can perform
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum Operation {
@@ -154,6 +130,7 @@ pub enum State {
}
/// The stacks signer registered for the reward cycle
#[derive(Debug)]
pub struct Signer {
/// The coordinator for inbound messages for a specific reward cycle
pub coordinator: FireCoordinator<v2::Aggregator>,
@@ -162,7 +139,7 @@ pub struct Signer {
/// the state of the signer
pub state: State,
/// Received Commands that need to be processed
pub commands: VecDeque<Command>,
pub commands: VecDeque<SignerCommand>,
/// The stackerdb client
pub stackerdb: StackerDB,
/// Whether the signer is a mainnet signer or not
@@ -208,7 +185,177 @@ impl std::fmt::Display for Signer {
}
}
impl SignerTrait for Signer {
/// Create a new signer from the given configuration
fn new(config: SignerConfig) -> Self {
Self::from(config)
}
/// Refresh the next signer data from the given configuration data
fn update_next_signer_data(&mut self, new_signer_config: &SignerConfig) {
self.next_signer_addresses = new_signer_config
.signer_entries
.signer_ids
.keys()
.copied()
.collect();
self.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone();
}
/// Return the reward cycle of the signer
fn reward_cycle(&self) -> u64 {
self.reward_cycle
}
/// Process the event
fn process_event(
&mut self,
stacks_client: &StacksClient,
event: Option<&SignerEvent>,
res: Sender<Vec<OperationResult>>,
current_reward_cycle: u64,
) {
let event_parity = match event {
Some(SignerEvent::BlockValidationResponse(_)) => Some(current_reward_cycle % 2),
// Block proposal events do have reward cycles, but each proposal has its own cycle,
// and the vec could be heterogenous, so, don't differentiate.
Some(SignerEvent::MinerMessages(..))
| Some(SignerEvent::NewBurnBlock(_))
| Some(SignerEvent::StatusCheck)
| None => None,
Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2),
};
let other_signer_parity = (self.reward_cycle + 1) % 2;
if event_parity == Some(other_signer_parity) {
return;
}
if self.approved_aggregate_public_key.is_none() {
if let Err(e) = self.refresh_dkg(stacks_client, res.clone(), current_reward_cycle) {
error!("{self}: failed to refresh DKG: {e}");
}
}
self.refresh_coordinator();
if self.approved_aggregate_public_key.is_none() {
if let Err(e) = self.refresh_dkg(stacks_client, res.clone(), current_reward_cycle) {
error!("{self}: failed to refresh DKG: {e}");
}
}
self.refresh_coordinator();
debug!("{self}: Processing event: {event:?}");
match event {
Some(SignerEvent::BlockValidationResponse(block_validate_response)) => {
debug!("{self}: Received a block proposal result from the stacks node...");
self.handle_block_validate_response(
stacks_client,
block_validate_response,
res,
current_reward_cycle,
)
}
Some(SignerEvent::SignerMessages(signer_set, messages)) => {
if *signer_set != self.stackerdb.get_signer_set() {
debug!("{self}: Received a signer message for a reward cycle that does not belong to this signer. Ignoring...");
return;
}
debug!(
"{self}: Received {} messages from the other signers...",
messages.len()
);
self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle);
}
Some(SignerEvent::MinerMessages(messages, miner_key)) => {
if let Some(miner_key) = miner_key {
let miner_key = PublicKey::try_from(miner_key.to_bytes_compressed().as_slice())
.expect("FATAL: could not convert from StacksPublicKey to PublicKey");
self.miner_key = Some(miner_key);
};
if current_reward_cycle != self.reward_cycle {
// There is not point in processing blocks if we are not the current reward cycle (we can never actually contribute to signing these blocks)
debug!("{self}: Received a proposed block, but this signer's reward cycle is not the current one ({current_reward_cycle}). Ignoring...");
return;
}
debug!(
"{self}: Received {} messages from the miner",
messages.len();
"miner_key" => ?miner_key,
);
self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle);
}
Some(SignerEvent::StatusCheck) => {
debug!("{self}: Received a status check event.")
}
Some(SignerEvent::NewBurnBlock(height)) => {
debug!("{self}: Receved a new burn block event for block height {height}")
}
None => {
// No event. Do nothing.
debug!("{self}: No event received")
}
}
}
fn process_command(
&mut self,
stacks_client: &StacksClient,
current_reward_cycle: u64,
command: Option<RunLoopCommand>,
) {
if let Some(command) = command {
let reward_cycle = command.reward_cycle;
if self.reward_cycle != reward_cycle {
warn!(
"{self}: not registered for reward cycle {reward_cycle}. Ignoring command: {command:?}"
);
} else {
info!(
"{self}: Queuing an external runloop command ({:?}): {command:?}",
self.state_machine.public_keys.signers.get(&self.signer_id)
);
self.commands.push_back(command.command);
}
}
self.process_next_command(stacks_client, current_reward_cycle);
}
}
impl Signer {
/// Attempt to process the next command in the queue, and update state accordingly
fn process_next_command(&mut self, stacks_client: &StacksClient, current_reward_cycle: u64) {
match &self.state {
State::Uninitialized => {
// We cannot process any commands until we have restored our state
warn!("{self}: Cannot process commands until state is restored. Waiting...");
}
State::Idle => {
let Some(command) = self.commands.front() else {
debug!("{self}: Nothing to process. Waiting for command...");
return;
};
let coordinator_id = if matches!(command, SignerCommand::Dkg) {
// We cannot execute a DKG command if we are not the coordinator
Some(self.get_coordinator_dkg().0)
} else {
self.get_coordinator_sign(current_reward_cycle).0
};
if coordinator_id != Some(self.signer_id) {
debug!(
"{self}: Coordinator is {coordinator_id:?}. Will not process any commands...",
);
return;
}
let command = self
.commands
.pop_front()
.expect("BUG: Already asserted that the command queue was not empty");
self.execute_command(stacks_client, &command);
}
State::OperationInProgress(op) => {
// We cannot execute the next command until the current one is finished...
debug!(
"{self}: Waiting for {op:?} operation to finish. Coordinator state = {:?}",
self.coordinator.state
);
}
}
}
/// Return the current coordinator.
/// If the current reward cycle is the active reward cycle, this is the miner,
/// so the first element of the tuple will be None (because the miner does not have a signer index).
@@ -410,9 +557,9 @@ impl Signer {
}
/// Execute the given command and update state accordingly
fn execute_command(&mut self, stacks_client: &StacksClient, command: &Command) {
fn execute_command(&mut self, stacks_client: &StacksClient, command: &SignerCommand) {
match command {
Command::Dkg => {
SignerCommand::Dkg => {
crate::monitoring::increment_commands_processed("dkg");
if self.approved_aggregate_public_key.is_some() {
debug!("Reward cycle #{} Signer #{}: Already have an aggregate key. Ignoring DKG command.", self.reward_cycle, self.signer_id);
@@ -445,7 +592,7 @@ impl Signer {
}
self.update_operation(Operation::Dkg);
}
Command::Sign {
SignerCommand::Sign {
block_proposal,
is_taproot,
merkle_root,
@@ -496,50 +643,6 @@ impl Signer {
}
}
/// Attempt to process the next command in the queue, and update state accordingly
pub fn process_next_command(
&mut self,
stacks_client: &StacksClient,
current_reward_cycle: u64,
) {
match &self.state {
State::Uninitialized => {
// We cannot process any commands until we have restored our state
warn!("{self}: Cannot process commands until state is restored. Waiting...");
}
State::Idle => {
let Some(command) = self.commands.front() else {
debug!("{self}: Nothing to process. Waiting for command...");
return;
};
let coordinator_id = if matches!(command, Command::Dkg) {
// We cannot execute a DKG command if we are not the coordinator
Some(self.get_coordinator_dkg().0)
} else {
self.get_coordinator_sign(current_reward_cycle).0
};
if coordinator_id != Some(self.signer_id) {
debug!(
"{self}: Coordinator is {coordinator_id:?}. Will not process any commands...",
);
return;
}
let command = self
.commands
.pop_front()
.expect("BUG: Already asserted that the command queue was not empty");
self.execute_command(stacks_client, &command);
}
State::OperationInProgress(op) => {
// We cannot execute the next command until the current one is finished...
debug!(
"{self}: Waiting for {op:?} operation to finish. Coordinator state = {:?}",
self.coordinator.state
);
}
}
}
/// Handle the block validate response returned from our prior calls to submit a block for validation
fn handle_block_validate_response(
&mut self,
@@ -1400,9 +1503,9 @@ impl Signer {
if self.approved_aggregate_public_key.is_some() {
return Ok(());
}
if self.commands.front() != Some(&Command::Dkg) {
if self.commands.front() != Some(&SignerCommand::Dkg) {
info!("{self} is the current coordinator and must trigger DKG. Queuing DKG command...");
self.commands.push_front(Command::Dkg);
self.commands.push_front(SignerCommand::Dkg);
} else {
debug!("{self}: DKG command already queued...");
}
@@ -1458,7 +1561,7 @@ impl Signer {
pub fn should_queue_dkg(&mut self, stacks_client: &StacksClient) -> Result<bool, ClientError> {
if self.state != State::Idle
|| self.signer_id != self.get_coordinator_dkg().0
|| self.commands.front() == Some(&Command::Dkg)
|| self.commands.front() == Some(&SignerCommand::Dkg)
{
// We are not the coordinator, we are in the middle of an operation, or we have already queued DKG. Do not attempt to queue DKG
return Ok(false);
@@ -1556,68 +1659,6 @@ impl Signer {
}
Ok(true)
}
/// Process the event
pub fn process_event(
&mut self,
stacks_client: &StacksClient,
event: Option<&SignerEvent>,
res: Sender<Vec<OperationResult>>,
current_reward_cycle: u64,
) -> Result<(), ClientError> {
debug!("{self}: Processing event: {event:?}");
match event {
Some(SignerEvent::BlockValidationResponse(block_validate_response)) => {
debug!("{self}: Received a block proposal result from the stacks node...");
self.handle_block_validate_response(
stacks_client,
block_validate_response,
res,
current_reward_cycle,
)
}
Some(SignerEvent::SignerMessages(signer_set, messages)) => {
if *signer_set != self.stackerdb.get_signer_set() {
debug!("{self}: Received a signer message for a reward cycle that does not belong to this signer. Ignoring...");
return Ok(());
}
debug!(
"{self}: Received {} messages from the other signers...",
messages.len()
);
self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle);
}
Some(SignerEvent::MinerMessages(messages, miner_key)) => {
if let Some(miner_key) = miner_key {
let miner_key = PublicKey::try_from(miner_key.to_bytes_compressed().as_slice())
.expect("FATAL: could not convert from StacksPublicKey to PublicKey");
self.miner_key = Some(miner_key);
};
if current_reward_cycle != self.reward_cycle {
// There is not point in processing blocks if we are not the current reward cycle (we can never actually contribute to signing these blocks)
debug!("{self}: Received a proposed block, but this signer's reward cycle is not the current one ({current_reward_cycle}). Ignoring...");
return Ok(());
}
debug!(
"{self}: Received {} messages from the miner",
messages.len();
"miner_key" => ?miner_key,
);
self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle);
}
Some(SignerEvent::StatusCheck) => {
debug!("{self}: Received a status check event.")
}
Some(SignerEvent::NewBurnBlock(height)) => {
debug!("{self}: Receved a new burn block event for block height {height}")
}
None => {
// No event. Do nothing.
debug!("{self}: No event received")
}
}
Ok(())
}
}
fn load_encrypted_signer_state<S: SignerStateStorage>(

View File

@@ -24,7 +24,7 @@ use slog::slog_debug;
use stacks_common::debug;
use stacks_common::util::hash::Sha512Trunc256Sum;
use crate::signer::BlockInfo;
use crate::v1::signer::BlockInfo;
/// This struct manages a SQLite database connection
/// for the signer.

View File

@@ -40,11 +40,11 @@ use stacks_common::types::chainstate::{
use stacks_common::types::StacksEpochId;
use stacks_common::util::hash::{hex_bytes, MerkleTree, Sha512Trunc256Sum};
use stacks_common::util::secp256k1::MessageSignature;
use stacks_signer::client::{StackerDB, StacksClient};
use stacks_signer::client::{SignerSlotID, StackerDB, StacksClient};
use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network};
use stacks_signer::coordinator::CoordinatorSelector;
use stacks_signer::runloop::RunLoopCommand;
use stacks_signer::signer::{Command as SignerCommand, SignerSlotID};
use stacks_signer::runloop::{RunLoopCommand, SignerCommand};
use stacks_signer::v1;
use stacks_signer::v1::coordinator::CoordinatorSelector;
use tracing_subscriber::prelude::*;
use tracing_subscriber::{fmt, EnvFilter};
use wsts::curve::point::Point;
@@ -512,6 +512,7 @@ impl SignerTest {
entries.public_keys
}
#[allow(dead_code)]
fn get_signer_metrics(&self) -> String {
#[cfg(feature = "monitoring_prom")]
{
@@ -805,11 +806,12 @@ fn spawn_signer(
{
stacks_signer::monitoring::start_serving_monitoring_metrics(config.clone()).ok();
}
let runloop: stacks_signer::runloop::RunLoop = stacks_signer::runloop::RunLoop::from(config);
let runloop: stacks_signer::runloop::RunLoop<v1::signer::Signer> =
stacks_signer::runloop::RunLoop::new(config);
let mut signer: Signer<
RunLoopCommand,
Vec<OperationResult>,
stacks_signer::runloop::RunLoop,
stacks_signer::runloop::RunLoop<v1::signer::Signer>,
SignerEventReceiver,
> = Signer::new(runloop, ev, receiver, sender);
info!("Spawning signer on endpoint {}", endpoint);