fix: a bad slot signature should be a distinct error

This commit is contained in:
Jude Nelson
2024-02-06 14:19:58 -05:00
parent 1e81fa54f5
commit 844a8edde2
14 changed files with 4418 additions and 42 deletions

BIN
stackslib/--help/cli.sqlite Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -237,6 +237,8 @@ pub struct ChainsCoordinator<
/// Used to tell the P2P thread that the stackerdb
/// needs to be refreshed.
pub refresh_stacker_db: Arc<AtomicBool>,
/// whether or not the canonical tip is now a Nakamoto header
pub in_nakamoto_epoch: bool,
}
#[derive(Debug)]
@@ -538,6 +540,7 @@ impl<
config,
burnchain_indexer,
refresh_stacker_db: comms.refresh_stacker_db.clone(),
in_nakamoto_epoch: false,
};
let mut nakamoto_available = false;
@@ -699,6 +702,7 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader
config: ChainsCoordinatorConfig::new(),
burnchain_indexer,
refresh_stacker_db: Arc::new(AtomicBool::new(false)),
in_nakamoto_epoch: false,
}
}
}

View File

@@ -483,6 +483,38 @@ impl<
if (bits & (CoordinatorEvents::NEW_STACKS_BLOCK as u8)) != 0 {
signal_mining_blocked(miner_status.clone());
debug!("Received new Nakamoto stacks block notice");
// we may still be processing epoch 2 blocks after the Nakamoto transition, so be sure
// to process them so we can get to the Nakamoto blocks!
if !self.in_nakamoto_epoch {
debug!("Check to see if the system has entered the Nakamoto epoch");
if let Ok(Some(canonical_header)) = NakamotoChainState::get_canonical_block_header(&self.chain_state_db.db(), &self.sortition_db) {
if canonical_header.is_nakamoto_block() {
// great! don't check again
debug!("The canonical Stacks tip ({}/{}) is a Nakamoto block!", &canonical_header.consensus_hash, &canonical_header.anchored_header.block_hash());
self.in_nakamoto_epoch = true;
}
else {
// need to process epoch 2 blocks
debug!("Received new epoch 2.x Stacks block notice");
match self.handle_new_stacks_block() {
Ok(missing_block_opt) => {
if missing_block_opt.is_some() {
debug!(
"Missing affirmed anchor block: {:?}",
&missing_block_opt.as_ref().expect("unreachable")
);
}
}
Err(e) => {
warn!("Error processing new stacks block: {:?}", e);
}
}
}
}
}
// now we can process the nakamoto block
match self.handle_new_nakamoto_stacks_block() {
Ok(new_anchor_block_opt) => {
if let Some(bhh) = new_anchor_block_opt {

View File

@@ -2701,6 +2701,33 @@ impl ConversationP2P {
self.dns_deadline < u128::MAX
}
/// Try to get the IPv4 or IPv6 address out of a data URL.
fn try_decode_data_url_ipaddr(data_url: &UrlString) -> Option<SocketAddr> {
// need to begin resolution
// NOTE: should always succeed, since a UrlString shouldn't decode unless it's a valid URL or the empty string
let Ok(url) = data_url.parse_to_block_url() else {
return None;
};
let port = match url.port_or_known_default() {
Some(p) => p,
None => {
return None;
}
};
let ip_addr_opt = match url.host() {
Some(url::Host::Ipv4(addr)) => {
// have IPv4 address already
Some(SocketAddr::new(IpAddr::V4(addr), port))
}
Some(url::Host::Ipv6(addr)) => {
// have IPv6 address already
Some(SocketAddr::new(IpAddr::V6(addr), port))
}
_ => None
};
ip_addr_opt
}
/// Attempt to resolve the hostname of a conversation's data URL to its IP address.
fn try_resolve_data_url_host(
&mut self,
@@ -2713,6 +2740,13 @@ impl ConversationP2P {
if self.data_url.len() == 0 {
return;
}
if let Some(ipaddr) = Self::try_decode_data_url_ipaddr(&self.data_url) {
// don't need to resolve!
debug!("{}: Resolved data URL {} to {}", &self, &self.data_url, &ipaddr);
self.data_ip = Some(ipaddr);
return;
}
let Some(dns_client) = dns_client_opt else {
return;
};

File diff suppressed because it is too large Load Diff

View File

@@ -2728,7 +2728,6 @@ impl NakamotoDownloadStateMachine {
&mut self,
network: &PeerNetwork,
sortdb: &SortitionDB,
chainstate: &StacksChainState,
) -> Result<(), NetError> {
let sort_tip = &network.burnchain_tip;
let Some(invs) = network.inv_state_nakamoto.as_ref() else {
@@ -2754,6 +2753,7 @@ impl NakamotoDownloadStateMachine {
let can_advance_wanted_tenures =
if let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() {
!Self::have_unprocessed_tenures(
sortdb.pox_constants.block_height_to_reward_cycle(sortdb.first_block_height, self.nakamoto_start_height).expect("FATAL: first nakamoto block from before system start"),
&self.tenure_downloads.completed_tenures,
prev_wanted_tenures,
&self.tenure_block_ids,
@@ -2886,6 +2886,7 @@ impl NakamotoDownloadStateMachine {
/// determine whether or not to update the set of wanted tenures -- we don't want to skip
/// fetching wanted tenures if they're still available!
pub(crate) fn have_unprocessed_tenures<'a>(
first_nakamoto_rc: u64,
completed_tenures: &HashSet<ConsensusHash>,
prev_wanted_tenures: &[WantedTenure],
tenure_block_ids: &HashMap<NeighborAddress, AvailableTenures>,
@@ -2915,16 +2916,25 @@ impl NakamotoDownloadStateMachine {
let mut has_prev_inv = false;
let mut has_cur_inv = false;
for inv in inventory_iter {
if inv.tenures_inv.get(&prev_wanted_rc).is_some() {
if prev_wanted_rc < first_nakamoto_rc {
// assume the epoch 2.x inventory has this
has_prev_inv = true;
}
if inv.tenures_inv.get(&cur_wanted_rc).is_some() {
else if inv.tenures_inv.get(&prev_wanted_rc).is_some() {
has_prev_inv = true;
}
if cur_wanted_rc < first_nakamoto_rc {
// assume the epoch 2.x inventory has this
has_cur_inv = true;
}
else if inv.tenures_inv.get(&cur_wanted_rc).is_some() {
has_cur_inv = true;
}
}
if !has_prev_inv || !has_cur_inv {
test_debug!("No peer has an inventory for either the previous ({}) or current ({}) wanted tenures", prev_wanted_rc, cur_wanted_rc);
test_debug!("No peer has an inventory for either the previous ({},{}) or current ({},{}) wanted tenures", prev_wanted_rc, has_prev_inv, cur_wanted_rc, has_cur_inv);
return true;
}
@@ -2948,11 +2958,13 @@ impl NakamotoDownloadStateMachine {
}
}
if !has_prev_rc_block || !has_cur_rc_block {
if (prev_wanted_rc >= first_nakamoto_rc && !has_prev_rc_block) || (cur_wanted_rc >= first_nakamoto_rc && !has_cur_rc_block) {
test_debug!(
"tenure_block_ids stale: missing representation in reward cycles {} and {}",
"tenure_block_ids stale: missing representation in reward cycles {} ({}) and {} ({})",
prev_wanted_rc,
cur_wanted_rc
has_prev_rc_block,
cur_wanted_rc,
has_cur_rc_block,
);
return true;
}
@@ -3050,7 +3062,7 @@ impl NakamotoDownloadStateMachine {
if sort_rc == next_sort_rc {
// not at a reward cycle boundary, os just extend self.wanted_tenures
test_debug!("Extend wanted tenures since no sort_rc change and we have tenure data");
self.extend_wanted_tenures(network, sortdb, chainstate)?;
self.extend_wanted_tenures(network, sortdb)?;
self.update_tenure_start_blocks(chainstate)?;
return Ok(());
}
@@ -3058,6 +3070,7 @@ impl NakamotoDownloadStateMachine {
let can_advance_wanted_tenures =
if let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() {
!Self::have_unprocessed_tenures(
sortdb.pox_constants.block_height_to_reward_cycle(self.nakamoto_start_height, sortdb.first_block_height).expect("FATAL: nakamoto starts before system start"),
&self.tenure_downloads.completed_tenures,
prev_wanted_tenures,
&self.tenure_block_ids,
@@ -3441,6 +3454,7 @@ impl NakamotoDownloadStateMachine {
///
/// This method is static to facilitate testing.
pub(crate) fn need_unconfirmed_tenures<'a>(
nakamoto_start_block: u64,
burnchain_height: u64,
sort_tip: &BlockSnapshot,
completed_tenures: &HashSet<ConsensusHash>,
@@ -3473,6 +3487,7 @@ impl NakamotoDownloadStateMachine {
// there are still confirmed tenures we have to go and get
if Self::have_unprocessed_tenures(
pox_constants.block_height_to_reward_cycle(first_burn_height, nakamoto_start_block).expect("FATAL: nakamoto starts before system start"),
completed_tenures,
prev_wanted_tenures,
tenure_block_ids,
@@ -4011,8 +4026,10 @@ impl NakamotoDownloadStateMachine {
return HashMap::new();
};
debug!("tenure_downloads.is_empty: {}", self.tenure_downloads.is_empty());
if self.tenure_downloads.is_empty()
&& Self::need_unconfirmed_tenures(
self.nakamoto_start_height,
burnchain_height,
&network.burnchain_tip,
&self.tenure_downloads.completed_tenures,
@@ -4065,6 +4082,7 @@ impl NakamotoDownloadStateMachine {
&& self.unconfirmed_tenure_download_schedule.is_empty()
{
if Self::need_unconfirmed_tenures(
self.nakamoto_start_height,
burnchain_height,
&network.burnchain_tip,
&self.tenure_downloads.completed_tenures,

View File

@@ -353,6 +353,7 @@ impl NakamotoTenureInv {
/// Adjust the next reward cycle to query.
/// Returns the reward cycle to query.
pub fn next_reward_cycle(&mut self) -> u64 {
test_debug!("Next reward cycle: {}", self.cur_reward_cycle + 1);
let query_rc = self.cur_reward_cycle;
self.cur_reward_cycle = self.cur_reward_cycle.saturating_add(1);
query_rc
@@ -646,11 +647,12 @@ impl<NC: NeighborComms> NakamotoInvStateMachine<NC> {
)
});
let proceed = inv.getnakamotoinv_begin(network, current_reward_cycle);
let proceed = inv.getnakamotoinv_begin(network, inv.reward_cycle());
let inv_rc = inv.reward_cycle();
new_inventories.insert(naddr.clone(), inv);
if self.comms.has_inflight(&naddr) {
test_debug!("{:?}: still waiting for reply from {}", network.get_local_peer(), &naddr);
continue;
}

View File

@@ -149,15 +149,21 @@ impl NeighborRPC {
let data_addr = if let Some(ip) = convo.data_ip {
ip.clone()
} else {
debug!(
"{}: have not resolved {} data URL {} yet",
network.get_local_peer(),
&convo,
&data_url
);
if convo.waiting_for_dns() {
debug!(
"{}: have not resolved {} data URL {} yet: waiting for DNS",
network.get_local_peer(),
&convo,
&data_url
);
return Err(NetError::WaitingForDNS);
} else {
debug!(
"{}: have not resolved {} data URL {} yet, and not waiting for DNS",
network.get_local_peer(),
&convo,
&data_url
);
return Err(NetError::PeerNotConnected);
}
};

View File

@@ -1811,7 +1811,7 @@ impl PeerNetwork {
}
if let Some(inv_state) = self.inv_state_nakamoto.as_mut() {
debug!(
"{:?}: Remove inventory state for epoch 2.x {:?}",
"{:?}: Remove inventory state for Nakamoto {:?}",
&self.local_peer, &nk
);
inv_state.del_peer(&NeighborAddress::from_neighbor_key(nk, pubkh));
@@ -5699,14 +5699,19 @@ impl PeerNetwork {
self.do_attachment_downloads(dns_client_opt, network_result);
// synchronize stacker DBs
match self.run_stacker_db_sync() {
Ok(stacker_db_sync_results) => {
network_result.consume_stacker_db_sync_results(stacker_db_sync_results);
}
Err(e) => {
warn!("Failed to run Stacker DB sync: {:?}", &e);
if !ibd {
match self.run_stacker_db_sync() {
Ok(stacker_db_sync_results) => {
network_result.consume_stacker_db_sync_results(stacker_db_sync_results);
}
Err(e) => {
warn!("Failed to run Stacker DB sync: {:?}", &e);
}
}
}
else {
debug!("{}: skip StackerDB sync in IBD", self.get_local_peer());
}
// remove timed-out requests from other threads
for (_, convo) in self.peers.iter_mut() {

View File

@@ -315,6 +315,7 @@ impl StackerDBs {
// Even if we failed to create or reconfigure the DB, we still want to keep track of them
// so that we can attempt to create/reconfigure them again later.
debug!("Reloaded configuration for {}", &stackerdb_contract_id);
test_debug!("Reloaded configuration for {}: {:?}", &stackerdb_contract_id, &new_config);
new_stackerdb_configs.insert(stackerdb_contract_id, new_config);
}
Ok(new_stackerdb_configs)

View File

@@ -245,7 +245,11 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
let local_write_timestamps = self
.stackerdbs
.get_slot_write_timestamps(&self.smart_contract_id)?;
assert_eq!(local_slot_versions.len(), local_write_timestamps.len());
if local_slot_versions.len() != local_write_timestamps.len() {
// interleaved DB write?
return Err(net_error::Transient("Interleaved DB write has led to an inconsistent view of the stackerdb. Try again.".into()));
}
let mut need_chunks: HashMap<usize, (StackerDBGetChunkData, Vec<NeighborAddress>)> =
HashMap::new();
@@ -267,11 +271,11 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
}
for (naddr, chunk_inv) in self.chunk_invs.iter() {
assert_eq!(
chunk_inv.slot_versions.len(),
local_slot_versions.len(),
"FATAL: did not validate StackerDBChunkInvData"
);
if chunk_inv.slot_versions.len() != local_slot_versions.len() {
// need to retry -- our view of the versions got changed through a
// reconfiguration
continue;
}
if *local_version >= chunk_inv.slot_versions[i] {
// remote peer has same view as local peer, or stale
@@ -355,11 +359,9 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
for (i, local_version) in local_slot_versions.iter().enumerate() {
let mut local_chunk = None;
for (naddr, chunk_inv) in self.chunk_invs.iter() {
assert_eq!(
chunk_inv.slot_versions.len(),
local_slot_versions.len(),
"FATAL: did not validate StackerDBChunkData"
);
if chunk_inv.slot_versions.len() != local_slot_versions.len() {
continue;
}
if *local_version <= chunk_inv.slot_versions[i] {
// remote peer has same or newer view than local peer
@@ -783,14 +785,15 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
network: &mut PeerNetwork,
) -> Result<bool, net_error> {
for (naddr, message) in self.comms.collect_replies(network).into_iter() {
let chunk_inv = match message.payload {
let chunk_inv_opt = match message.payload {
StacksMessageType::StackerDBChunkInv(data) => {
if data.slot_versions.len() != self.num_slots {
info!("{:?}: Received malformed StackerDBChunkInv from {:?}: expected {} chunks, got {}", network.get_local_peer(), &naddr, self.num_slots, data.slot_versions.len());
self.comms.add_broken(network, &naddr);
continue;
info!("{:?}: Received malformed StackerDBChunkInv for {} from {:?}: expected {} chunks, got {}", network.get_local_peer(), &self.smart_contract_id, &naddr, self.num_slots, data.slot_versions.len());
None
}
else {
Some(data)
}
data
}
StacksMessageType::Nack(data) => {
debug!(
@@ -811,8 +814,11 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
network.get_local_peer(),
&naddr
);
self.chunk_invs.insert(naddr.clone(), chunk_inv);
self.connected_replicas.insert(naddr);
if let Some(chunk_inv) = chunk_inv_opt {
self.chunk_invs.insert(naddr.clone(), chunk_inv);
self.connected_replicas.insert(naddr);
}
}
if self.comms.count_inflight() > 0 {
// not done yet, so blocked
@@ -942,7 +948,6 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
"Remote neighbor {:?} served an invalid chunk for ID {}",
&naddr, data.slot_id
);
self.comms.add_broken(network, &naddr);
self.connected_replicas.remove(&naddr);
continue;
}
@@ -1082,7 +1087,6 @@ impl<NC: NeighborComms> StackerDBSync<NC> {
// must be well-formed
if new_chunk_inv.slot_versions.len() != self.num_slots {
info!("{:?}: Received malformed StackerDBChunkInv from {:?}: expected {} chunks, got {}", network.get_local_peer(), &naddr, self.num_slots, new_chunk_inv.slot_versions.len());
self.comms.add_broken(network, &naddr);
continue;
}

View File

@@ -66,6 +66,7 @@ use stacks_common::types::chainstate::{
BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey,
};
use stacks_common::types::StacksPublicKeyBuffer;
use stacks_common::util::sleep_ms;
use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum};
use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey};
@@ -81,6 +82,10 @@ use crate::tests::neon_integrations::{
use crate::tests::{make_stacks_transfer, to_addr};
use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain};
use rand::RngCore;
use crate::tests::get_chain_info;
use stacks::core::PEER_VERSION_TESTNET;
pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000;
static POX_4_DEFAULT_STACKER_STX_AMT: u128 = 99_000_000_000_000;
@@ -2418,3 +2423,241 @@ fn vote_for_aggregate_key_burn_op() {
run_loop_thread.join().unwrap();
}
/// This test boots a follower node using the block downloader
#[test]
#[ignore]
fn follower_bootup() {
if env::var("BITCOIND_TEST") != Ok("1".into()) {
return;
}
let signers = TestSigners::default();
let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None);
let http_origin = format!("http://{}", &naka_conf.node.rpc_bind);
naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1);
let sender_sk = Secp256k1PrivateKey::new();
let sender_signer_sk = Secp256k1PrivateKey::new();
let sender_signer_addr = tests::to_addr(&sender_signer_sk);
let tenure_count = 5;
let inter_blocks_per_tenure = 9;
// setup sender + recipient for some test stx transfers
// these are necessary for the interim blocks to get mined at all
let sender_addr = tests::to_addr(&sender_sk);
let send_amt = 100;
let send_fee = 180;
naka_conf.add_initial_balance(
PrincipalData::from(sender_addr.clone()).to_string(),
(send_amt + send_fee) * tenure_count * inter_blocks_per_tenure,
);
naka_conf.add_initial_balance(
PrincipalData::from(sender_signer_addr.clone()).to_string(),
100000,
);
let recipient = PrincipalData::from(StacksAddress::burn_address(false));
let stacker_sk = setup_stacker(&mut naka_conf);
test_observer::spawn();
let observer_port = test_observer::EVENT_OBSERVER_PORT;
naka_conf.events_observers.insert(EventObserverConfig {
endpoint: format!("localhost:{observer_port}"),
events_keys: vec![EventKeyType::AnyEvent],
});
let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone());
btcd_controller
.start_bitcoind()
.expect("Failed starting bitcoind");
let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None);
btc_regtest_controller.bootstrap_chain(201);
let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap();
let run_loop_stopper = run_loop.get_termination_switch();
let Counters {
blocks_processed,
naka_submitted_vrfs: vrfs_submitted,
naka_submitted_commits: commits_submitted,
naka_proposed_blocks: proposals_submitted,
..
} = run_loop.counters();
let coord_channel = run_loop.coordinator_channels();
let run_loop_thread = thread::Builder::new()
.name("run_loop".into())
.spawn(move || run_loop.start(None, 0))
.unwrap();
wait_for_runloop(&blocks_processed);
boot_to_epoch_3(
&naka_conf,
&blocks_processed,
&[stacker_sk],
&[sender_signer_sk],
Some(&signers),
&mut btc_regtest_controller,
);
info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner");
let burnchain = naka_conf.get_burnchain();
let sortdb = burnchain.open_sortition_db(true).unwrap();
let (chainstate, _) = StacksChainState::open(
naka_conf.is_mainnet(),
naka_conf.burnchain.chain_id,
&naka_conf.get_chainstate_path_str(),
None,
)
.unwrap();
let block_height_pre_3_0 =
NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)
.unwrap()
.unwrap()
.stacks_block_height;
info!("Nakamoto miner started...");
blind_signer(&naka_conf, &signers, &sender_signer_sk, proposals_submitted);
// first block wakes up the run loop, wait until a key registration has been submitted.
next_block_and(&mut btc_regtest_controller, 60, || {
let vrf_count = vrfs_submitted.load(Ordering::SeqCst);
Ok(vrf_count >= 1)
})
.unwrap();
// second block should confirm the VRF register, wait until a block commit is submitted
next_block_and(&mut btc_regtest_controller, 60, || {
let commits_count = commits_submitted.load(Ordering::SeqCst);
Ok(commits_count >= 1)
})
.unwrap();
let mut follower_conf = naka_conf.clone();
follower_conf.events_observers.clear();
follower_conf.node.working_dir = format!("{}-follower", &naka_conf.node.working_dir);
follower_conf.node.seed = vec![0x01; 32];
follower_conf.node.local_peer_seed = vec![0x02; 32];
let mut rng = rand::thread_rng();
let mut buf = [0u8; 8];
rng.fill_bytes(&mut buf);
let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534
let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534
let localhost = "127.0.0.1";
follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port);
follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port);
follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port);
follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port);
let node_info = get_chain_info(&naka_conf);
follower_conf.node.add_bootstrap_node(&format!("{}@{}", &node_info.node_public_key.unwrap(), naka_conf.node.p2p_bind), CHAIN_ID_TESTNET, PEER_VERSION_TESTNET);
let mut follower_run_loop = boot_nakamoto::BootRunLoop::new(follower_conf.clone()).unwrap();
let follower_run_loop_stopper = follower_run_loop.get_termination_switch();
let follower_coord_channel = follower_run_loop.coordinator_channels();
debug!("Booting follower-thread ({},{})", &follower_conf.node.p2p_bind, &follower_conf.node.rpc_bind);
debug!("Booting follower-thread: neighbors = {:?}", &follower_conf.node.bootstrap_node);
// spawn a follower thread
let follower_thread = thread::Builder::new()
.name("follower-thread".into())
.spawn(move || follower_run_loop.start(None, 0))
.unwrap();
debug!("Booted follower-thread");
// Mine `tenure_count` nakamoto tenures
for tenure_ix in 0..tenure_count {
let commits_before = commits_submitted.load(Ordering::SeqCst);
next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel)
.unwrap();
let mut last_tip = BlockHeaderHash([0x00; 32]);
let mut last_tip_height = 0;
// mine the interim blocks
for interim_block_ix in 0..inter_blocks_per_tenure {
let blocks_processed_before = coord_channel
.lock()
.expect("Mutex poisoned")
.get_stacks_blocks_processed();
// submit a tx so that the miner will mine an extra block
let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix;
let transfer_tx =
make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt);
submit_tx(&http_origin, &transfer_tx);
loop {
let blocks_processed = coord_channel
.lock()
.expect("Mutex poisoned")
.get_stacks_blocks_processed();
if blocks_processed > blocks_processed_before {
break;
}
thread::sleep(Duration::from_millis(100));
}
let info = get_chain_info_result(&naka_conf).unwrap();
assert_ne!(info.stacks_tip, last_tip);
assert_ne!(info.stacks_tip_height, last_tip_height);
last_tip = info.stacks_tip;
last_tip_height = info.stacks_tip_height;
}
let start_time = Instant::now();
while commits_submitted.load(Ordering::SeqCst) <= commits_before {
if start_time.elapsed() >= Duration::from_secs(20) {
panic!("Timed out waiting for block-commit");
}
thread::sleep(Duration::from_millis(100));
}
}
// load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3
let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)
.unwrap()
.unwrap();
info!(
"Latest tip";
"height" => tip.stacks_block_height,
"is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(),
);
assert!(tip.anchored_header.as_stacks_nakamoto().is_some());
assert_eq!(
tip.stacks_block_height,
block_height_pre_3_0 + ((inter_blocks_per_tenure + 1) * tenure_count),
"Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks"
);
// wait for follower to reach the chain tip
loop {
sleep_ms(1000);
let follower_node_info = get_chain_info(&follower_conf);
info!("Follower tip is now {}/{}", &follower_node_info.stacks_tip_consensus_hash, &follower_node_info.stacks_tip);
if follower_node_info.stacks_tip_consensus_hash == tip.consensus_hash && follower_node_info.stacks_tip == tip.anchored_header.block_hash() {
break;
}
}
coord_channel
.lock()
.expect("Mutex poisoned")
.stop_chains_coordinator();
run_loop_stopper.store(false, Ordering::SeqCst);
follower_coord_channel
.lock()
.expect("Mutex poisoned")
.stop_chains_coordinator();
follower_run_loop_stopper.store(false, Ordering::SeqCst);
run_loop_thread.join().unwrap();
follower_thread.join().unwrap();
}