Merge branch 'develop' into fix/agg-increase-event

This commit is contained in:
Hank Stoever
2024-04-29 14:26:41 -07:00
committed by GitHub
56 changed files with 992 additions and 63 deletions

View File

@@ -23,5 +23,5 @@ RUN case ${TARGETPLATFORM} in \
&& unzip ${BIN_ARCH}.zip -d /out
FROM --platform=${TARGETPLATFORM} alpine
COPY --from=builder /out/stacks-node /bin/
COPY --from=builder /out/stacks-node /out/stacks-signer /bin/
CMD ["stacks-node", "mainnet"]

View File

@@ -23,5 +23,5 @@ RUN case ${TARGETPLATFORM} in \
&& unzip ${BIN_ARCH}.zip -d /out
FROM --platform=${TARGETPLATFORM} debian:bookworm
COPY --from=builder /out/stacks-node /bin/
COPY --from=builder /out/stacks-node /out/stacks-signer /bin/
CMD ["stacks-node", "mainnet"]

View File

@@ -92,6 +92,7 @@ jobs:
- tests::signer::stackerdb_delayed_dkg
# Do not run this one until we figure out why it fails in CI
# - tests::neon_integrations::bitcoin_reorg_flap
# - tests::neon_integrations::bitcoin_reorg_flap_with_follower
steps:
## Setup test environment
- name: Setup Test Environment

View File

@@ -70,6 +70,8 @@ jobs:
draft: false
prerelease: true
fail_on_unmatched_files: true
target_commitish: ${{ github.sha }}
generate_release_notes: true
files: |
release/*.zip
CHECKSUMS.txt

View File

@@ -5,7 +5,30 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to the versioning scheme outlined in the [README.md](README.md).
## [Next-Branch]
## [2.5.0.0.3]
This release fixes a regression in `2.5.0.0.0` from `2.4.0.1.0` caused by git merge
## [2.5.0.0.2]
This release fixes two bugs in `2.5.0.0.0`, correctly setting the activation height for 2.5, and the network peer version.
## [2.5.0.0.0]
This release implements the 2.5 Stacks consensus rules which activates at Bitcoin block `840,360`: primarily the instantiation
of the pox-4 contract. For more details see SIP-021.
This is the first consensus-critical release for Nakamoto. Nodes which do not update before the 2.5 activation height will be forked away from the rest of the network. This release is compatible with 2.4.x chain state directories and does not require resyncing from genesis. The first time a node boots with this version it will perform some database migrations which could lengthen the normal node startup time.
**This is a required release before Nakamoto rules are enabled in 3.0.**
### Timing of Release from 2.5 to 3.0
Activating Nakamoto will include two epochs:
- **Epoch 2.5:** Pox-4 contract is booted up but no Nakamoto consensus rules take effect.
- **Epoch 3:** Nakamoto consensus rules take effect.
### Added

View File

@@ -1340,7 +1340,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> {
self.global_context.begin();
let result = stx_transfer_consolidated(self, from, to, amount, memo);
match result {
Ok(value) => match value.clone().expect_result() {
Ok(value) => match value.clone().expect_result()? {
Ok(_) => {
self.global_context.commit()?;
Ok(value)
@@ -1966,8 +1966,14 @@ impl CallStack {
#[cfg(test)]
mod test {
use stacks_common::types::chainstate::StacksAddress;
use stacks_common::util::hash::Hash160;
use super::*;
use crate::vm::callables::DefineType;
use crate::vm::tests::{
test_epochs, tl_env_factory, MemoryEnvironmentGenerator, TopLevelMemoryEnvironmentGenerator,
};
use crate::vm::types::signatures::CallableSubtype;
use crate::vm::types::{FixedFunction, FunctionArg, FunctionType, StandardPrincipalData};
@@ -2123,6 +2129,35 @@ mod test {
assert_eq!(table[&p2][&t7], AssetMapEntry::Burn(35 + 36));
}
/// Test the stx-transfer consolidation tx invalidation
/// bug from 2.4.0.1.0-rc1
#[apply(test_epochs)]
fn stx_transfer_consolidate_regr_24010(
epoch: StacksEpochId,
mut tl_env_factory: TopLevelMemoryEnvironmentGenerator,
) {
let mut env = tl_env_factory.get_env(epoch);
let u1 = StacksAddress {
version: 0,
bytes: Hash160([1; 20]),
};
let u2 = StacksAddress {
version: 0,
bytes: Hash160([2; 20]),
};
// insufficient balance must be a non-includable transaction. it must error here,
// not simply rollback the tx and squelch the error as includable.
let e = env
.stx_transfer(
&PrincipalData::from(u1.clone()),
&PrincipalData::from(u2.clone()),
1000,
&BuffData::empty(),
)
.unwrap_err();
assert_eq!(e.to_string(), "Interpreter(InsufficientBalance)");
}
#[test]
fn test_canonicalize_contract_context() {
let trait_id = TraitIdentifier::new(

View File

@@ -149,7 +149,7 @@ impl MemoryEnvironmentGenerator {
pub struct TopLevelMemoryEnvironmentGenerator(MemoryBackingStore);
impl TopLevelMemoryEnvironmentGenerator {
fn get_env(&mut self, epoch: StacksEpochId) -> OwnedEnvironment {
pub fn get_env(&mut self, epoch: StacksEpochId) -> OwnedEnvironment {
let owned_env = OwnedEnvironment::new(self.0.as_clarity_db(), epoch);
owned_env
}

View File

@@ -288,7 +288,7 @@ fn create_event_info_data_code(
;; Get start cycle ID
start-cycle-id: (+ (current-pox-reward-cycle) u1 pox-set-offset),
}}
}}
}})
"#,
stacker = &args[0],
pox_addr = &args[1],

View File

@@ -788,7 +788,14 @@ fn inner_find_heaviest_block_commit_ptr(
}
if let Some(last_vtxindex) = last_vtxindex.as_mut() {
assert!(*last_vtxindex < opdata.vtxindex);
assert!(
*last_vtxindex < opdata.vtxindex,
"{} !< {} at block {} (op {:?})",
*last_vtxindex,
opdata.vtxindex,
opdata.block_height,
&opdata
);
*last_vtxindex = opdata.vtxindex;
} else {
last_vtxindex = Some(opdata.vtxindex);

View File

@@ -232,6 +232,11 @@ CREATE TABLE burnchain_db_block_ops (
-- 32-byte transaction ID
txid TEXT NOT NULL,
-- This should have been present when we created this table, but we forgot.
-- So instead, query methods against this table need to use REPLACE INTO and
-- SELECT DISTINCT for compatibility.
-- PRIMARY KEY(txid,block_hash),
-- ensure that the operation corresponds to an actual block
FOREIGN KEY(block_hash) REFERENCES burnchain_db_block_headers(block_hash)
);
@@ -432,7 +437,8 @@ impl<'a> BurnchainDBTransaction<'a> {
) -> Result<(), BurnchainError> {
// find all block-commits for this block
let commits: Vec<LeaderBlockCommitOp> = {
let block_ops_qry = "SELECT * FROM burnchain_db_block_ops WHERE block_hash = ?";
let block_ops_qry =
"SELECT DISTINCT * FROM burnchain_db_block_ops WHERE block_hash = ?";
let block_ops = query_rows(&self.sql_tx, block_ops_qry, &[&hdr.block_hash])?;
block_ops
.into_iter()
@@ -891,7 +897,7 @@ impl<'a> BurnchainDBTransaction<'a> {
block_header: &BurnchainBlockHeader,
block_ops: &[BlockstackOperationType],
) -> Result<(), BurnchainError> {
let sql = "INSERT INTO burnchain_db_block_ops
let sql = "REPLACE INTO burnchain_db_block_ops
(block_hash, txid, op) VALUES (?, ?, ?)";
let mut stmt = self.sql_tx.prepare(sql)?;
for op in block_ops.iter() {
@@ -1133,7 +1139,7 @@ impl BurnchainDB {
) -> Result<BurnchainBlockData, BurnchainError> {
let block_header_qry =
"SELECT * FROM burnchain_db_block_headers WHERE block_hash = ? LIMIT 1";
let block_ops_qry = "SELECT * FROM burnchain_db_block_ops WHERE block_hash = ?";
let block_ops_qry = "SELECT DISTINCT * FROM burnchain_db_block_ops WHERE block_hash = ?";
let block_header = query_row(conn, block_header_qry, &[block])?
.ok_or_else(|| BurnchainError::UnknownBlock(block.clone()))?;
@@ -1150,7 +1156,8 @@ impl BurnchainDB {
burn_header_hash: &BurnchainHeaderHash,
txid: &Txid,
) -> Option<BlockstackOperationType> {
let qry = "SELECT op FROM burnchain_db_block_ops WHERE txid = ?1 AND block_hash = ?2";
let qry =
"SELECT DISTINCT op FROM burnchain_db_block_ops WHERE txid = ?1 AND block_hash = ?2";
let args: &[&dyn ToSql] = &[txid, burn_header_hash];
match query_row(conn, qry, args) {
@@ -1169,7 +1176,7 @@ impl BurnchainDB {
indexer: &B,
txid: &Txid,
) -> Option<BlockstackOperationType> {
let qry = "SELECT op FROM burnchain_db_block_ops WHERE txid = ?1";
let qry = "SELECT DISTINCT op FROM burnchain_db_block_ops WHERE txid = ?1";
let args: &[&dyn ToSql] = &[txid];
let ops: Vec<BlockstackOperationType> =

View File

@@ -2848,6 +2848,80 @@ impl SortitionDB {
Ok(())
}
/// Validates given StacksEpochs (will runtime panic if there is any invalid StacksEpoch structuring) and
/// replaces them into the SortitionDB's epochs table
fn validate_and_replace_epochs(
db_tx: &Transaction,
epochs: &[StacksEpoch],
) -> Result<(), db_error> {
let epochs = StacksEpoch::validate_epochs(epochs);
let existing_epochs = Self::get_stacks_epochs(db_tx)?;
if existing_epochs == epochs {
return Ok(());
}
let tip = SortitionDB::get_canonical_burn_chain_tip(db_tx)?;
let existing_epoch_idx = StacksEpoch::find_epoch(&existing_epochs, tip.block_height)
.unwrap_or_else(|| {
panic!(
"FATAL: Sortition tip {} has no epoch in its existing epochs table",
tip.block_height
);
});
let new_epoch_idx =
StacksEpoch::find_epoch(&epochs, tip.block_height).unwrap_or_else(|| {
panic!(
"FATAL: Sortition tip {} has no epoch in the configured epochs list",
tip.block_height
);
});
// can't retcon epochs -- all epochs up to (but excluding) the tip's epoch in both epoch
// lists must be the same.
for i in 0..existing_epoch_idx.min(new_epoch_idx) {
if existing_epochs[i] != epochs[i] {
panic!(
"FATAL: tried to retcon epoch {:?} into epoch {:?}",
&existing_epochs[i], &epochs[i]
);
}
}
// can't change parameters of the current epoch in either epoch list,
// except for the end height (and only if it hasn't been reached yet)
let mut diff_epoch = existing_epochs[existing_epoch_idx].clone();
diff_epoch.end_height = epochs[new_epoch_idx].end_height;
if diff_epoch != epochs[new_epoch_idx] {
panic!(
"FATAL: tried to change current epoch {:?} into {:?}",
&existing_epochs[existing_epoch_idx], &epochs[new_epoch_idx]
);
}
if tip.block_height >= epochs[new_epoch_idx].end_height {
panic!("FATAL: tip has reached or passed the end of the configured epoch");
}
info!("Replace existing epochs with new epochs");
db_tx.execute("DELETE FROM epochs;", NO_PARAMS)?;
for epoch in epochs.into_iter() {
let args: &[&dyn ToSql] = &[
&(epoch.epoch_id as u32),
&u64_to_sql(epoch.start_height)?,
&u64_to_sql(epoch.end_height)?,
&epoch.block_limit,
&epoch.network_epoch,
];
db_tx.execute(
"INSERT INTO epochs (epoch_id,start_block_height,end_block_height,block_limit,network_epoch) VALUES (?1,?2,?3,?4,?5)",
args
)?;
}
Ok(())
}
/// Get a block commit by its content-addressed location in a specific sortition.
pub fn get_block_commit(
conn: &Connection,
@@ -3322,6 +3396,10 @@ impl SortitionDB {
self.apply_schema_8_migration(migrator.take())?;
} else if version == expected_version {
let tx = self.tx_begin()?;
SortitionDB::validate_and_replace_epochs(&tx, epochs)?;
tx.commit()?;
return Ok(());
} else {
panic!("The schema version of the sortition DB is invalid.")
@@ -10646,4 +10724,51 @@ pub mod tests {
good_ops_2[3]
);
}
#[test]
fn test_validate_and_replace_epochs() {
use crate::core::STACKS_EPOCHS_MAINNET;
let path_root = "/tmp/test_validate_and_replace_epochs";
if fs::metadata(path_root).is_ok() {
fs::remove_dir_all(path_root).unwrap();
}
fs::create_dir_all(path_root).unwrap();
let mut bad_epochs = STACKS_EPOCHS_MAINNET.to_vec();
let idx = bad_epochs.len() - 2;
bad_epochs[idx].end_height += 1;
bad_epochs[idx + 1].start_height += 1;
let sortdb = SortitionDB::connect(
&format!("{}/sortdb.sqlite", &path_root),
0,
&BurnchainHeaderHash([0x00; 32]),
0,
&bad_epochs,
PoxConstants::mainnet_default(),
None,
true,
)
.unwrap();
let db_epochs = SortitionDB::get_stacks_epochs(sortdb.conn()).unwrap();
assert_eq!(db_epochs, bad_epochs);
let fixed_sortdb = SortitionDB::connect(
&format!("{}/sortdb.sqlite", &path_root),
0,
&BurnchainHeaderHash([0x00; 32]),
0,
&STACKS_EPOCHS_MAINNET.to_vec(),
PoxConstants::mainnet_default(),
None,
true,
)
.unwrap();
let db_epochs = SortitionDB::get_stacks_epochs(sortdb.conn()).unwrap();
assert_eq!(db_epochs, STACKS_EPOCHS_MAINNET.to_vec());
}
}

View File

@@ -897,12 +897,12 @@
(err ERR_STACKING_INVALID_LOCK_PERIOD))
(let ((partial-amount-ustx (get stacked-amount partial-stacked))
;; reward-cycle must point to an existing record in reward-cycle-total-stacked
;; infallible; getting something from partial-stacked-by-cycle succeeded so this must succeed
(existing-cycle (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle })))
;; reward-cycle and reward-cycle-index must point to an existing record in reward-cycle-pox-address-list
(existing-entry (unwrap! (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index })
(err ERR_DELEGATION_NO_REWARD_SLOT)))
;; reward-cycle must point to an existing record in reward-cycle-total-stacked
;; infallible; getting existing-entry succeeded so this must succeed
(existing-cycle (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle })))
(increased-entry-total (+ (get total-ustx existing-entry) partial-amount-ustx))
(increased-cycle-total (+ (get total-ustx existing-cycle) partial-amount-ustx))
(existing-signer-key (get signer existing-entry)))

View File

@@ -2631,6 +2631,111 @@ fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() {
);
}
// test that delegate-stack-increase calls emit and event
#[test]
fn pox_4_delegate_stack_increase_events() {
// Config for this test
let (epochs, pox_constants) = make_test_epochs_pox();
let mut burnchain = Burnchain::default_unittest(
0,
&BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(),
);
burnchain.pox_constants = pox_constants.clone();
let observer = TestEventObserver::new();
let (mut peer, mut keys) = instantiate_pox_peer_with_epoch(
&burnchain,
function_name!(),
Some(epochs.clone()),
Some(&observer),
);
assert_eq!(burnchain.pox_constants.reward_slots(), 6);
let mut coinbase_nonce = 0;
let mut latest_block = None;
let alice_key = keys.pop().unwrap();
let alice_address = key_to_stacks_addr(&alice_key);
let alice_principal = PrincipalData::from(alice_address.clone());
let alice_pox_addr = pox_addr_from(&alice_key);
let bob_key = keys.pop().unwrap();
let bob_address = key_to_stacks_addr(&bob_key);
let bob_principal = PrincipalData::from(bob_address.clone());
let bob_pox_addr = pox_addr_from(&bob_key);
let bob_pox_addr_val = Value::Tuple(bob_pox_addr.as_clarity_tuple().unwrap());
// Advance into pox4
let target_height = burnchain.pox_constants.pox_4_activation_height;
// produce blocks until the first reward phase that everyone should be in
while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) {
latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce));
}
// alice delegate to bob
let next_cycle = get_current_reward_cycle(&peer, &burnchain) + 1;
let amount = 100_000_000;
let alice_delegate =
make_pox_4_delegate_stx(&alice_key, 0, amount, bob_principal.clone(), None, None);
// bob delegate-stack-stx
let bob_delegate_stack_stx = make_pox_4_delegate_stack_stx(
&bob_key,
0,
alice_principal.clone(),
amount / 2,
bob_pox_addr.clone(),
get_tip(peer.sortdb.as_ref()).block_height as u128,
2,
);
// bob delegate-stack-increase
let bob_delegate_stack_increase = make_pox_4_delegate_stack_increase(
&bob_key,
1,
&alice_principal,
bob_pox_addr.clone(),
amount / 2,
);
latest_block = Some(peer.tenure_with_txs(
&[
alice_delegate.clone(),
bob_delegate_stack_stx.clone(),
bob_delegate_stack_increase.clone(),
],
&mut coinbase_nonce,
));
let txs: HashMap<_, _> = observer
.get_blocks()
.into_iter()
.flat_map(|b| b.receipts)
.filter_map(|r| match r.transaction {
TransactionOrigin::Stacks(ref t) => Some((t.txid(), r.clone())),
_ => None,
})
.collect();
let bob_delegate_stack_increase_tx = txs
.get(&bob_delegate_stack_increase.txid())
.unwrap()
.clone();
// Check event for delegate-stack-increase tx
let bob_delegate_stack_increase_tx_events = &bob_delegate_stack_increase_tx.events;
assert_eq!(bob_delegate_stack_increase_tx_events.len() as u64, 2);
let bob_delegate_stack_increase_op_data = HashMap::from([
("start-cycle-id", Value::UInt(next_cycle)),
("end-cycle-id", Optional(OptionalData { data: None })),
("increase-by", Value::UInt(amount / 2)),
("pox-addr", bob_pox_addr_val.clone()),
("delegator", alice_principal.clone().into()),
]);
}
// test that revoke-delegate-stx calls emit an event and
// test that revoke-delegate-stx is only successfull if user has delegated.
#[test]
@@ -7951,6 +8056,107 @@ fn test_scenario_four() {
assert_eq!(approved_key, None);
}
// In this test case, Alice delegates twice the stacking minimum to Bob.
// Bob stacks Alice's funds, and then immediately tries to stacks-aggregation-increase.
// This should return a clarity user error.
#[test]
fn delegate_stack_increase_err() {
let lock_period: u128 = 2;
let observer = TestEventObserver::new();
let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) =
prepare_pox4_test(function_name!(), Some(&observer));
let alice_nonce = 0;
let alice_key = &keys[0];
let alice_address = PrincipalData::from(key_to_stacks_addr(alice_key));
let mut bob_nonce = 0;
let bob_delegate_key = &keys[1];
let bob_delegate_address = PrincipalData::from(key_to_stacks_addr(bob_delegate_key));
let min_ustx = get_stacking_minimum(&mut peer, &latest_block);
let signer_sk = StacksPrivateKey::from_seed(&[1, 3, 3, 7]);
let signer_pk = StacksPublicKey::from_private(&signer_sk);
let signer_pk_bytes = signer_pk.to_bytes_compressed();
let signer_key_val = Value::buff_from(signer_pk_bytes.clone()).unwrap();
let pox_addr = PoxAddress::from_legacy(
AddressHashMode::SerializeP2PKH,
key_to_stacks_addr(bob_delegate_key).bytes,
);
let next_reward_cycle = 1 + burnchain
.block_height_to_reward_cycle(block_height)
.unwrap();
let delegate_stx = make_pox_4_delegate_stx(
alice_key,
alice_nonce,
2 * min_ustx,
bob_delegate_address.clone(),
None,
Some(pox_addr.clone()),
);
let alice_principal = PrincipalData::from(key_to_stacks_addr(alice_key));
let delegate_stack_stx = make_pox_4_delegate_stack_stx(
bob_delegate_key,
bob_nonce,
alice_principal,
min_ustx * 2,
pox_addr.clone(),
block_height as u128,
lock_period,
);
let txs = vec![delegate_stx, delegate_stack_stx];
let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce);
bob_nonce += 1;
let signature = make_signer_key_signature(
&pox_addr,
&signer_sk,
next_reward_cycle.into(),
&Pox4SignatureTopic::AggregationIncrease,
1_u128,
u128::MAX,
1,
);
// Bob's Aggregate Increase
let bobs_aggregate_increase = make_pox_4_aggregation_increase(
&bob_delegate_key,
bob_nonce,
&pox_addr,
next_reward_cycle.into(),
0,
Some(signature),
&signer_pk,
u128::MAX,
1,
);
let txs = vec![bobs_aggregate_increase];
let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce);
let delegate_transactions =
get_last_block_sender_transactions(&observer, key_to_stacks_addr(bob_delegate_key));
let actual_result = delegate_transactions.first().cloned().unwrap().result;
// Should be a DELEGATION NO REWARD SLOT error
let expected_result = Value::error(Value::Int(28)).unwrap();
assert_eq!(actual_result, expected_result);
// test that the reward set is empty
let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle);
let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht);
assert!(reward_set.is_empty());
}
pub fn get_stacking_state_pox_4(
peer: &mut TestPeer,
tip: &StacksBlockId,

View File

@@ -61,7 +61,7 @@ pub const PEER_VERSION_EPOCH_3_0: u8 = 0x0b;
// this should be updated to the latest network epoch version supported by
// this node. this will be checked by the `validate_epochs()` method.
pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_3_0 as u32;
pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_2_5 as u32;
// set the fourth byte of the peer version
pub const PEER_VERSION_MAINNET: u32 = PEER_VERSION_MAINNET_MAJOR | PEER_NETWORK_EPOCH;
@@ -121,7 +121,7 @@ pub const BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT: u64 = 788_240;
/// This is Epoch-2.3, now Epoch-2.4, activation height proposed in SIP-024
pub const BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT: u64 = 791_551;
/// This is Epoch-2.5, activation height proposed in SIP-021
pub const BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT: u64 = 839_444;
pub const BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT: u64 = 840_360;
/// This is Epoch-3.0, activation height proposed in SIP-021
pub const BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT: u64 = 2_000_000;
@@ -1431,10 +1431,14 @@ impl StacksEpochExtension for StacksEpoch {
.iter()
.max()
.expect("FATAL: expect at least one epoch");
assert!(
max_epoch.network_epoch as u32 <= PEER_NETWORK_EPOCH,
"stacks-blockchain static network epoch should be greater than or equal to the max epoch's"
);
if max_epoch.epoch_id == StacksEpochId::Epoch30 {
assert!(PEER_NETWORK_EPOCH >= u32::from(PEER_VERSION_EPOCH_2_5));
} else {
assert!(
max_epoch.network_epoch as u32 <= PEER_NETWORK_EPOCH,
"stacks-blockchain static network epoch should be greater than or equal to the max epoch's"
);
}
assert!(
StacksEpochId::latest() >= max_epoch.epoch_id,

View File

@@ -29,6 +29,7 @@ use stacks_common::util::uint::{Uint256, Uint512};
use crate::burnchains::{BurnchainSigner, Txid};
use crate::core::MemPoolDB;
use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse};
use crate::net::rpc::ConversationHttp;
use crate::net::Error as net_error;
use crate::util_lib::db::{sqlite_open, tx_busy_handler, DBConn, Error as DatabaseError};
@@ -46,19 +47,20 @@ pub fn increment_rpc_calls_counter() {
}
pub fn instrument_http_request_handler<F, R>(
req: StacksHttpRequest,
conv_http: &mut ConversationHttp,
mut req: StacksHttpRequest,
handler: F,
) -> Result<R, net_error>
where
F: FnOnce(StacksHttpRequest) -> Result<R, net_error>,
F: FnOnce(&mut ConversationHttp, StacksHttpRequest) -> Result<R, net_error>,
{
#[cfg(feature = "monitoring_prom")]
increment_rpc_calls_counter();
#[cfg(feature = "monitoring_prom")]
let timer = prometheus::new_rpc_call_timer(req.request_path());
let timer = prometheus::new_rpc_call_timer(conv_http.metrics_identifier(&mut req));
let res = handler(req);
let res = handler(conv_http, req);
#[cfg(feature = "monitoring_prom")]
timer.stop_and_record();

View File

@@ -116,6 +116,10 @@ impl HttpRequest for RPCCallReadOnlyRequestHandler {
.unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/contracts/call-read/:principal/:contract_name/:func_name"
}
/// Try to decode this request.
fn try_parse_request(
&mut self,

View File

@@ -84,6 +84,10 @@ impl HttpRequest for RPCGetAccountRequestHandler {
.unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/accounts/:principal"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -59,6 +59,10 @@ impl HttpRequest for RPCGetAttachmentRequestHandler {
Regex::new(r#"^/v2/attachments/(?P<attachment_hash>[0-9a-f]{40})$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/attachments/:hash"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -61,6 +61,10 @@ impl HttpRequest for RPCGetAttachmentsInvRequestHandler {
Regex::new("^/v2/attachments/inv$").unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/attachments/inv"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -90,6 +90,10 @@ impl HttpRequest for RPCBlocksRequestHandler {
Regex::new(r#"^/v2/blocks/(?P<block_id>[0-9a-f]{64})$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/blocks/:block_id"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -126,6 +126,10 @@ impl HttpRequest for RPCNakamotoBlockRequestHandler {
Regex::new(r#"^/v3/blocks/(?P<block_id>[0-9a-f]{64})$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v3/blocks/:block_id"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -83,6 +83,10 @@ impl HttpRequest for RPCGetConstantValRequestHandler {
.unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/constant_val/:principal/:contract_name/:const_name"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -78,6 +78,10 @@ impl HttpRequest for RPCGetContractAbiRequestHandler {
.unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/contracts/interface/:principal/:contract_name"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -86,6 +86,10 @@ impl HttpRequest for RPCGetContractSrcRequestHandler {
.unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/contracts/source/:principal/:contract_name"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -86,6 +86,10 @@ impl HttpRequest for RPCGetDataVarRequestHandler {
.unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/data_var/:principal/:contract_name/:var_name"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -109,6 +109,10 @@ impl HttpRequest for RPCHeadersRequestHandler {
Regex::new(r#"^/v2/headers/(?P<quantity>[0-9]+)$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/headers/:height"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -179,6 +179,10 @@ impl HttpRequest for RPCPeerInfoRequestHandler {
Regex::new(r#"^/v2/info$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/info"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -86,6 +86,10 @@ impl HttpRequest for RPCGetIsTraitImplementedRequestHandler {
.unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/traits/:principal/:contract_name"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -92,6 +92,10 @@ impl HttpRequest for RPCGetMapEntryRequestHandler {
.unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/map_entry/:principal/:contract_name/:map_name"
}
/// Try to decode this request.
/// The body must be a hex string, encoded as a JSON string.
/// So, something like `"123abc"`. It encodes the map key as a serialized Clarity value.

View File

@@ -79,6 +79,10 @@ impl HttpRequest for RPCMicroblocksConfirmedRequestHandler {
Regex::new(r#"^/v2/microblocks/confirmed/(?P<block_id>[0-9a-f]{64})$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/microblocks/confirmed/:block_id"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -112,6 +112,10 @@ impl HttpRequest for RPCMicroblocksIndexedRequestHandler {
Regex::new(r#"^/v2/microblocks/(?P<tail_microblock_id>[0-9a-f]{64})$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/microblocks/:microblock_id"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -106,6 +106,10 @@ impl HttpRequest for RPCMicroblocksUnconfirmedRequestHandler {
Regex::new(r#"^/v2/microblocks/unconfirmed/(?P<parent_block_id>[0-9a-f]{64})/(?P<start_sequence>[0-9]{1,6})$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/microblocks/unconfirmed/:block_id/:seq"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -179,6 +179,10 @@ impl HttpRequest for RPCNeighborsRequestHandler {
Regex::new(r#"^/v2/neighbors$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/neighbors"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -440,6 +440,10 @@ impl HttpRequest for RPCPoxInfoRequestHandler {
Regex::new(r#"^/v2/pox$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/pox"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -77,6 +77,10 @@ impl HttpRequest for RPCGetStackerDBChunkRequestHandler {
)).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/stackerdb/:principal/:contract_name/:slot_id/:slot_version"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -73,6 +73,10 @@ impl HttpRequest for RPCGetStackerDBMetadataRequestHandler {
.unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/stackerdb/:principal/:contract_name"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -99,6 +99,10 @@ impl HttpRequest for GetStackersRequestHandler {
Regex::new(r#"^/v2/stacker_set/(?P<cycle_num>[0-9]{1,20})$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/stacker_set/:cycle_num"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -59,6 +59,10 @@ impl HttpRequest for RPCGetStxTransferCostRequestHandler {
Regex::new(r#"^/v2/fees/transfer$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/fees/transfer"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -155,6 +155,10 @@ impl HttpRequest for RPCNakamotoTenureRequestHandler {
Regex::new(r#"^/v3/tenures/(?P<block_id>[0-9a-f]{64})$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v3/tenures/:block_id"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -81,6 +81,10 @@ impl HttpRequest for RPCNakamotoTenureInfoRequestHandler {
Regex::new(r#"^/v3/tenures/info"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v3/tenures/info"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -75,6 +75,10 @@ impl HttpRequest for RPCGetTransactionUnconfirmedRequestHandler {
Regex::new(r#"^/v2/transactions/unconfirmed/(?P<txid>[0-9a-f]{64})$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/transactions/unconfirmed/:txid"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -77,6 +77,10 @@ impl HttpRequest for RPCListStackerDBReplicasRequestHandler {
.unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/stackedb/:principal/:contract_name/replicas"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -95,6 +95,10 @@ impl HttpRequest for RPCPostBlockRequestHandler {
Regex::new(r#"^/v2/blocks/upload/(?P<consensus_hash>[0-9a-f]{40})$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/blocks/upload/:block"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -377,6 +377,10 @@ impl HttpRequest for RPCBlockProposalRequestHandler {
Regex::new(r#"^/v2/block_proposal$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/block_proposal"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -111,6 +111,10 @@ impl HttpRequest for RPCPostFeeRateRequestHandler {
Regex::new(r#"^/v2/fees/transaction$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/fees/transaction"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -224,6 +224,10 @@ impl HttpRequest for RPCMempoolQueryRequestHandler {
Regex::new(r#"^/v2/mempool/query$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/mempool/query"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -85,6 +85,10 @@ impl HttpRequest for RPCPostMicroblockRequestHandler {
Regex::new(r#"^/v2/microblocks$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/microblocks"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -82,6 +82,10 @@ impl HttpRequest for RPCPostStackerDBChunkRequestHandler {
.unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/block_proposal/:principal/:contract_name/chunks"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -120,6 +120,10 @@ impl HttpRequest for RPCPostTransactionRequestHandler {
Regex::new(r#"^/v2/transactions$"#).unwrap()
}
fn metrics_identifier(&self) -> &str {
"/v2/transactions"
}
/// Try to decode this request.
/// There's nothing to load here, so just make sure the request is well-formed.
fn try_parse_request(

View File

@@ -695,4 +695,6 @@ pub trait HttpRequest: Send + HttpRequestClone {
query_str: Option<&str>,
body: &[u8],
) -> Result<HttpRequestContents, Error>;
/// Get identifier from finite set to be used in metrics
fn metrics_identifier(&self) -> &str;
}

View File

@@ -36,6 +36,7 @@ use stacks_common::util::get_epoch_time_ms;
use stacks_common::util::retry::{BoundReader, RetryReader};
use url::Url;
use super::rpc::ConversationHttp;
use crate::burnchains::Txid;
use crate::chainstate::burn::db::sortdb::SortitionDB;
use crate::chainstate::burn::BlockSnapshot;
@@ -436,6 +437,8 @@ pub struct StacksHttpRequest {
preamble: HttpRequestPreamble,
contents: HttpRequestContents,
start_time: u128,
/// Cache result of `StacksHttp::find_response_handler` so we don't have to do the regex matching twice
response_handler_index: Option<usize>,
}
impl StacksHttpRequest {
@@ -444,6 +447,7 @@ impl StacksHttpRequest {
preamble,
contents,
start_time: get_epoch_time_ms(),
response_handler_index: None,
}
}
@@ -479,6 +483,7 @@ impl StacksHttpRequest {
preamble,
contents,
start_time: get_epoch_time_ms(),
response_handler_index: None,
})
}
@@ -546,6 +551,11 @@ impl StacksHttpRequest {
self.send(&mut ret)?;
Ok(ret)
}
#[cfg(test)]
pub fn get_response_handler_index(&self) -> Option<usize> {
self.response_handler_index
}
}
/// A received HTTP response (fully decoded in RAM)
@@ -913,9 +923,7 @@ impl StacksHttp {
if request_verb != verb {
continue;
}
let _captures = if let Some(caps) = regex.captures(request_path) {
caps
} else {
let Some(_captures) = regex.captures(request_path) else {
continue;
};
@@ -986,9 +994,7 @@ impl StacksHttp {
if &preamble.verb != verb {
continue;
}
let captures = if let Some(caps) = regex.captures(&decoded_path) {
caps
} else {
let Some(captures) = regex.captures(&decoded_path) else {
continue;
};
@@ -1086,21 +1092,21 @@ impl StacksHttp {
node: &mut StacksNodeState,
) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> {
let (decoded_path, _) = decode_request_path(&request.preamble().path_and_query_str)?;
let response_handler_index =
if let Some(i) = self.find_response_handler(&request.preamble().verb, &decoded_path) {
i
} else {
// method not found
return StacksHttpResponse::new_error(
&request.preamble,
&HttpNotFound::new(format!(
"No such API endpoint '{} {}'",
&request.preamble().verb,
&decoded_path
)),
)
.try_into_contents();
};
let Some(response_handler_index) = request
.response_handler_index
.or_else(|| self.find_response_handler(&request.preamble().verb, &decoded_path))
else {
// method not found
return StacksHttpResponse::new_error(
&request.preamble,
&HttpNotFound::new(format!(
"No such API endpoint '{} {}'",
&request.preamble().verb,
&decoded_path
)),
)
.try_into_contents();
};
let (_, _, request_handler) = self
.request_handlers
@@ -1244,6 +1250,30 @@ impl StacksHttp {
window
}
/// Get a unique `&str` identifier for each request type
/// This can only return a finite set of identifiers, which makes it safer to use for Prometheus metrics
/// For details see https://github.com/stacks-network/stacks-core/issues/4574
pub fn metrics_identifier(&self, req: &mut StacksHttpRequest) -> &str {
let Ok((decoded_path, _)) = decode_request_path(&req.request_path()) else {
return "<err-url-decode>";
};
let Some(response_handler_index) = req
.response_handler_index
.or_else(|| self.find_response_handler(&req.preamble().verb, &decoded_path))
else {
return "<err-handler-not-found>";
};
req.response_handler_index = Some(response_handler_index);
let (_, _, request_handler) = self
.request_handlers
.get(response_handler_index)
.expect("FATAL: request points to a nonexistent handler");
request_handler.metrics_identifier()
}
/// Given a fully-formed single HTTP response, parse it (used by clients).
#[cfg(test)]
pub fn parse_response(

View File

@@ -156,12 +156,12 @@ impl ConversationHttp {
let stacks_http = StacksHttp::new(peer_addr.clone(), conn_opts);
ConversationHttp {
connection: ConnectionHttp::new(stacks_http, conn_opts, None),
conn_id: conn_id,
conn_id,
timeout: conn_opts.timeout,
reply_streams: VecDeque::new(),
peer_addr: peer_addr,
outbound_url: outbound_url,
peer_host: peer_host,
peer_addr,
outbound_url,
peer_host,
canonical_stacks_tip_height: None,
pending_request: None,
pending_response: None,
@@ -533,11 +533,8 @@ impl ConversationHttp {
test_debug!("{:?}: {} HTTP requests pending", &self, num_inbound);
for _i in 0..num_inbound {
let msg = match self.connection.next_inbox_message() {
None => {
continue;
}
Some(m) => m,
let Some(msg) = self.connection.next_inbox_message() else {
continue;
};
match msg {
@@ -549,9 +546,11 @@ impl ConversationHttp {
let start_time = Instant::now();
let verb = req.verb().to_string();
let request_path = req.request_path().to_string();
let msg_opt = monitoring::instrument_http_request_handler(req, |req| {
self.handle_request(req, node)
})?;
let msg_opt = monitoring::instrument_http_request_handler(
self,
req,
|conv_http, req| conv_http.handle_request(req, node),
)?;
info!("Handled StacksHTTPRequest";
"verb" => %verb,
@@ -659,4 +658,8 @@ impl ConversationHttp {
pub fn get_peer_host(&self) -> PeerHost {
self.peer_host.clone()
}
pub fn metrics_identifier(&self, req: &mut StacksHttpRequest) -> &str {
self.connection.protocol.metrics_identifier(req)
}
}

View File

@@ -1050,3 +1050,70 @@ fn test_http_parse_proof_request_query() {
.get_with_proof();
assert!(proof_req);
}
#[test]
fn test_metrics_identifiers() {
let convo = ConversationHttp::new(
"127.0.0.1:12345".parse().unwrap(),
None,
PeerHost::DNS("localhost".to_string(), 12345),
&ConnectionOptions::default(),
100,
32,
);
let fixtures = vec![
// Valid requests
(("GET", "/v2/info"), ("/v2/info", true)),
(
("GET", "/v2/info?param1=value&param2=other_value"),
("/v2/info", true),
),
(
(
"GET",
"/v2/blocks/d8bd3c7e7cf7a9d783560a71356d3d9dbc84dc2f0c1a0001be8b141927c9d7ab",
),
("/v2/blocks/:block_id", true),
),
// Invalid requests
(("POST", "/v2/info"), ("<err-handler-not-found>", false)),
(("GET", "!@#%&^$#!&^(@&+++"), ("<err-url-decode>", false)),
(
("GET", "/some/nonexistent/endpoint"),
("<err-handler-not-found>", false),
),
(
(
"GET",
"/v2/blocks/dsviawevasigngawuqajauharpqjumzkalfuwgfkwpdhtbefgxkdhdfduskafdgh",
),
("<err-handler-not-found>", false),
),
];
for (input, output) in fixtures {
// Destructure fixture data
let (verb, path_and_query_string) = input;
let (metrics_identifier_expected, should_have_handler) = output;
// Create request from data
let preamble = HttpRequestPreamble::new(
HttpVersion::Http11,
verb.to_string(),
path_and_query_string.to_string(),
"localhost".to_string(),
12345,
true,
);
let mut request = StacksHttpRequest::new(preamble, HttpRequestContents::new());
let metrics_identifier = convo.metrics_identifier(&mut request);
let response_handler_index = request.get_response_handler_index();
// Check that we get expected metrics identifier and request handler
assert_eq!(metrics_identifier, metrics_identifier_expected);
assert_eq!(response_handler_index.is_some(), should_have_handler);
}
}

View File

@@ -271,6 +271,32 @@ mod tests {
// Should default add xenon affirmation overrides
assert_eq!(config.burnchain.affirmation_overrides.len(), 5);
}
#[test]
fn should_override_xenon_default_affirmation_overrides() {
let affirmation_string = "aaapnnnnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnpppppnnnnnnnnnnnnnnnnnnnnnnnpppppppppppppppnnnnnnnnnnnnnnnnnnnnnnnppppppppppnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnppppppppnnnnnnnnnnnnnnnnnnnnnnnppnppnnnnnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnnnppppppnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnnpppppppnnnnnnnnnnnnnnnnnnnnnnnnnnpnnnnnnnnnnnnnnnnnnnnnnnnnpppnppppppppppppppnnppppnpa";
let affirmation =
AffirmationMap::decode(affirmation_string).expect("Failed to decode affirmation map");
let config = Config::from_config_file(
ConfigFile::from_str(&format!(
r#"
[burnchain]
chain = "bitcoin"
mode = "xenon"
[[burnchain.affirmation_overrides]]
reward_cycle = 413
affirmation = "{affirmation_string}"
"#,
))
.expect("Expected to be able to parse config file from string"),
)
.expect("Expected to be able to parse affirmation map from file");
// Should default add xenon affirmation overrides, but overwrite with the configured one above
assert_eq!(config.burnchain.affirmation_overrides.len(), 5);
assert_eq!(config.burnchain.affirmation_overrides[&413], affirmation);
}
}
impl ConfigFile {
@@ -1490,7 +1516,6 @@ impl BurnchainConfigFile {
BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT,
)
.unwrap();
eprintln!("last_present_cycle = {last_present_cycle}");
assert_eq!(
u64::try_from(affirmations_pre_2_5.len()).unwrap(),
last_present_cycle - 1
@@ -1501,10 +1526,12 @@ impl BurnchainConfigFile {
BITCOIN_TESTNET_STACKS_25_REORGED_HEIGHT,
)
.unwrap();
eprintln!("last_present_cycle = {last_present_cycle}, last_override = {last_override}");
let override_values = ["a", "n"];
for (override_index, reward_cycle) in (last_present_cycle + 1..=last_override).enumerate() {
let affirmation = format!("{affirmations_pre_2_5}{}", "a".repeat(override_index + 1));
assert!(override_values.len() > override_index);
let overrides = override_values[..(override_index + 1)].join("");
let affirmation = format!("{affirmations_pre_2_5}{overrides}");
default_overrides.push(AffirmationOverride {
reward_cycle,
affirmation,
@@ -1513,7 +1540,10 @@ impl BurnchainConfigFile {
if let Some(affirmation_overrides) = self.affirmation_overrides.as_mut() {
for affirmation in default_overrides {
affirmation_overrides.push(affirmation);
// insert at front, so that the hashmap uses the configured overrides
// instead of the defaults (the configured overrides will write over the
// the defaults because they come later in the list).
affirmation_overrides.insert(0, affirmation);
}
} else {
self.affirmation_overrides = Some(default_overrides);

View File

@@ -1169,6 +1169,55 @@ impl BlockMinerThread {
return vec![];
}
let sortdb_tip_handle = burn_db.index_handle_at_tip();
let stacks_tips: Vec<_> = stacks_tips
.into_iter()
.filter(|candidate| {
let candidate_ch = &candidate.consensus_hash;
let candidate_burn_ht = match SortitionDB::get_block_snapshot_consensus(
sortdb_tip_handle.conn(),
candidate_ch
) {
Ok(Some(x)) => x.block_height,
Ok(None) => {
warn!("Tried to evaluate potential chain tip with an unknown consensus hash";
"consensus_hash" => %candidate_ch,
"stacks_block_hash" => %candidate.anchored_block_hash);
return false;
},
Err(e) => {
warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash";
"consensus_hash" => %candidate_ch,
"stacks_block_hash" => %candidate.anchored_block_hash,
"err" => ?e);
return false;
},
};
let tip_ch = match sortdb_tip_handle.get_consensus_at(candidate_burn_ht) {
Ok(Some(x)) => x,
Ok(None) => {
warn!("Tried to evaluate potential chain tip with a consensus hash ahead of canonical tip";
"consensus_hash" => %candidate_ch,
"stacks_block_hash" => %candidate.anchored_block_hash);
return false;
},
Err(e) => {
warn!("Error while trying to evaluate potential chain tip with an unknown consensus hash";
"consensus_hash" => %candidate_ch,
"stacks_block_hash" => %candidate.anchored_block_hash,
"err" => ?e);
return false;
},
};
if &tip_ch != candidate_ch {
false
} else {
true
}
})
.collect();
let mut considered = HashSet::new();
let mut candidates = vec![];
let end_height = stacks_tips[0].height;

View File

@@ -10,7 +10,7 @@ use clarity::vm::ast::ASTRules;
use clarity::vm::costs::ExecutionCost;
use clarity::vm::types::PrincipalData;
use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value, MAX_CALL_STACK_DEPTH};
use rand::Rng;
use rand::{Rng, RngCore};
use rusqlite::types::ToSql;
use serde_json::json;
use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType};
@@ -42,6 +42,7 @@ use stacks::core::{
BLOCK_LIMIT_MAINNET_21, CHAIN_ID_TESTNET, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0,
PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1,
PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5,
PEER_VERSION_TESTNET,
};
use stacks::net::api::getaccount::AccountEntryResponse;
use stacks::net::api::getcontractsrc::ContractSrcResponse;
@@ -12259,3 +12260,202 @@ fn bitcoin_reorg_flap() {
btcd_controller.stop_bitcoind().unwrap();
channel.stop_chains_coordinator();
}
fn next_block_and_wait_all(
btc_controller: &mut BitcoinRegtestController,
miner_blocks_processed: &Arc<AtomicU64>,
follower_blocks_processed: &[&Arc<AtomicU64>],
) -> bool {
let followers_current: Vec<_> = follower_blocks_processed
.iter()
.map(|blocks_processed| blocks_processed.load(Ordering::SeqCst))
.collect();
if !next_block_and_wait(btc_controller, miner_blocks_processed) {
return false;
}
// wait for followers to catch up
loop {
let finished = follower_blocks_processed
.iter()
.zip(followers_current.iter())
.map(|(blocks_processed, current)| blocks_processed.load(Ordering::SeqCst) <= *current)
.fold(true, |acc, loaded| acc && loaded);
if finished {
break;
}
thread::sleep(Duration::from_millis(100));
}
true
}
#[test]
#[ignore]
fn bitcoin_reorg_flap_with_follower() {
if env::var("BITCOIND_TEST") != Ok("1".into()) {
return;
}
let (conf, _miner_account) = neon_integration_test_conf();
let mut btcd_controller = BitcoinCoreController::new(conf.clone());
btcd_controller
.start_bitcoind()
.expect("Failed starting bitcoind");
let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None);
btc_regtest_controller.bootstrap_chain(201);
eprintln!("Chain bootstrapped...");
let mut miner_run_loop = neon::RunLoop::new(conf.clone());
let miner_blocks_processed = miner_run_loop.get_blocks_processed_arc();
let miner_channel = miner_run_loop.get_coordinator_channel().unwrap();
let mut follower_conf = conf.clone();
follower_conf.events_observers.clear();
follower_conf.node.working_dir = format!("{}-follower", &conf.node.working_dir);
follower_conf.node.seed = vec![0x01; 32];
follower_conf.node.local_peer_seed = vec![0x02; 32];
let mut rng = rand::thread_rng();
let mut buf = [0u8; 8];
rng.fill_bytes(&mut buf);
let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534
let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534
let localhost = "127.0.0.1";
follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port);
follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port);
follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port);
follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port);
thread::spawn(move || miner_run_loop.start(None, 0));
wait_for_runloop(&miner_blocks_processed);
// figure out the started node's port
let node_info = get_chain_info(&conf);
follower_conf.node.add_bootstrap_node(
&format!(
"{}@{}",
&node_info.node_public_key.unwrap(),
conf.node.p2p_bind
),
CHAIN_ID_TESTNET,
PEER_VERSION_TESTNET,
);
let mut follower_run_loop = neon::RunLoop::new(follower_conf.clone());
let follower_blocks_processed = follower_run_loop.get_blocks_processed_arc();
let follower_channel = follower_run_loop.get_coordinator_channel().unwrap();
thread::spawn(move || follower_run_loop.start(None, 0));
wait_for_runloop(&follower_blocks_processed);
eprintln!("Follower bootup complete!");
// first block wakes up the run loop
next_block_and_wait_all(&mut btc_regtest_controller, &miner_blocks_processed, &[]);
// first block will hold our VRF registration
next_block_and_wait_all(
&mut btc_regtest_controller,
&miner_blocks_processed,
&[&follower_blocks_processed],
);
let mut miner_sort_height = miner_channel.get_sortitions_processed();
let mut follower_sort_height = follower_channel.get_sortitions_processed();
eprintln!(
"Miner sort height: {}, follower sort height: {}",
miner_sort_height, follower_sort_height
);
while miner_sort_height < 210 && follower_sort_height < 210 {
next_block_and_wait_all(
&mut btc_regtest_controller,
&miner_blocks_processed,
&[&follower_blocks_processed],
);
miner_sort_height = miner_channel.get_sortitions_processed();
follower_sort_height = miner_channel.get_sortitions_processed();
eprintln!(
"Miner sort height: {}, follower sort height: {}",
miner_sort_height, follower_sort_height
);
}
// stop bitcoind and copy its DB to simulate a chain flap
btcd_controller.stop_bitcoind().unwrap();
thread::sleep(Duration::from_secs(5));
let btcd_dir = conf.get_burnchain_path_str();
let mut new_conf = conf.clone();
new_conf.node.working_dir = format!("{}.new", &conf.node.working_dir);
fs::create_dir_all(&new_conf.node.working_dir).unwrap();
copy_dir_all(&btcd_dir, &new_conf.get_burnchain_path_str()).unwrap();
// resume
let mut btcd_controller = BitcoinCoreController::new(conf.clone());
btcd_controller
.start_bitcoind()
.expect("Failed starting bitcoind");
let btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None);
thread::sleep(Duration::from_secs(5));
info!("\n\nBegin fork A\n\n");
// make fork A
for _i in 0..3 {
btc_regtest_controller.build_next_block(1);
thread::sleep(Duration::from_secs(5));
}
btcd_controller.stop_bitcoind().unwrap();
info!("\n\nBegin reorg flap from A to B\n\n");
// carry out the flap to fork B -- new_conf's state was the same as before the reorg
let mut btcd_controller = BitcoinCoreController::new(new_conf.clone());
let btc_regtest_controller = BitcoinRegtestController::new(new_conf.clone(), None);
btcd_controller
.start_bitcoind()
.expect("Failed starting bitcoind");
for _i in 0..5 {
btc_regtest_controller.build_next_block(1);
thread::sleep(Duration::from_secs(5));
}
btcd_controller.stop_bitcoind().unwrap();
info!("\n\nBegin reorg flap from B to A\n\n");
let mut btcd_controller = BitcoinCoreController::new(conf.clone());
let btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None);
btcd_controller
.start_bitcoind()
.expect("Failed starting bitcoind");
// carry out the flap back to fork A
for _i in 0..7 {
btc_regtest_controller.build_next_block(1);
thread::sleep(Duration::from_secs(5));
}
assert_eq!(miner_channel.get_sortitions_processed(), 225);
assert_eq!(follower_channel.get_sortitions_processed(), 225);
btcd_controller.stop_bitcoind().unwrap();
miner_channel.stop_chains_coordinator();
follower_channel.stop_chains_coordinator();
}