Merge branch 'next' into feat/btc-stx-exchange-rate

This commit is contained in:
Jude Nelson
2022-06-07 11:14:43 -04:00
10 changed files with 1499 additions and 40 deletions

View File

@@ -72,6 +72,9 @@ jobs:
- tests::epoch_205::test_cost_limit_switch_version205
- tests::epoch_205::test_exact_block_costs
- tests::epoch_205::bigger_microblock_streams_in_2_05
- tests::epoch_21::transition_fixes_utxo_chaining
- tests::epoch_21::transition_adds_burn_block_height
- tests::epoch_21::transition_fixes_bitcoin_rigidity
steps:
- uses: actions/checkout@v2
- name: Download docker image

View File

@@ -103,6 +103,12 @@ impl FromRow<SortitionId> for SortitionId {
}
}
impl FromRow<BurnchainHeaderHash> for BurnchainHeaderHash {
fn from_row<'a>(row: &'a Row) -> Result<BurnchainHeaderHash, db_error> {
BurnchainHeaderHash::from_column(row, "burn_header_hash")
}
}
impl FromRow<MissedBlockCommit> for MissedBlockCommit {
fn from_row<'a>(row: &'a Row) -> Result<MissedBlockCommit, db_error> {
let intended_sortition = SortitionId::from_column(row, "intended_sortition_id")?;
@@ -607,7 +613,7 @@ const SORTITION_DB_SCHEMA_3: &'static [&'static str] = &[r#"
);"#];
// update this to add new indexes
const LAST_SORTITION_DB_INDEX: &'static str = "index_parent_sortition_id";
const LAST_SORTITION_DB_INDEX: &'static str = "index_parent_burn_header_hash";
const SORTITION_DB_INDEXES: &'static [&'static str] = &[
"CREATE INDEX IF NOT EXISTS snapshots_block_hashes ON snapshots(block_height,index_root,winning_stacks_block_hash);",
@@ -628,6 +634,8 @@ const SORTITION_DB_INDEXES: &'static [&'static str] = &[
"CREATE INDEX IF NOT EXISTS index_missed_commits_intended_sortition_id ON missed_commits(intended_sortition_id);",
"CREATE INDEX IF NOT EXISTS canonical_stacks_blocks ON canonical_accepted_stacks_blocks(tip_consensus_hash,stacks_block_hash);",
"CREATE INDEX IF NOT EXISTS index_parent_sortition_id ON block_commit_parents(parent_sortition_id);",
"CREATE INDEX IF NOT EXISTS index_burn_header_hash ON snapshots(burn_header_hash);",
"CREATE INDEX IF NOT EXISTS index_parent_burn_header_hash ON snapshots(parent_burn_header_hash,burn_header_hash);",
];
pub struct SortitionDB {
@@ -3149,6 +3157,9 @@ impl SortitionDB {
}
}
/// Get the list of Stack-STX operations processed in a given burnchain block.
/// This will be the same list in each PoX fork; it's up to the Stacks block-processing logic
/// to reject them.
pub fn get_stack_stx_ops(
conn: &Connection,
burn_header_hash: &BurnchainHeaderHash,
@@ -3160,6 +3171,9 @@ impl SortitionDB {
)
}
/// Get the list of Transfer-STX operations processed in a given burnchain block.
/// This will be the same list in each PoX fork; it's up to the Stacks block-processing logic
/// to reject them.
pub fn get_transfer_stx_ops(
conn: &Connection,
burn_header_hash: &BurnchainHeaderHash,
@@ -3171,6 +3185,68 @@ impl SortitionDB {
)
}
/// Get the parent burnchain header hash of a given burnchain header hash
fn get_parent_burnchain_header_hash(
conn: &Connection,
burnchain_header_hash: &BurnchainHeaderHash,
) -> Result<Option<BurnchainHeaderHash>, db_error> {
let sql = "SELECT parent_burn_header_hash AS burn_header_hash FROM snapshots WHERE burn_header_hash = ?1";
let args: &[&dyn ToSql] = &[burnchain_header_hash];
let mut rows = query_rows::<BurnchainHeaderHash, _>(conn, sql, args)?;
// there can be more than one if there was a PoX reorg. If so, make sure they're _all the
// same_ (otherwise we have corruption and must panic)
if let Some(bhh) = rows.pop() {
for row in rows.into_iter() {
if row != bhh {
panic!(
"FATAL: burnchain header hash {} has two parents: {} and {}",
burnchain_header_hash, &bhh, &row
);
}
}
Ok(Some(bhh))
} else {
Ok(None)
}
}
/// Get the last N ancestor burnchain header hashes, given a burnchain header hash.
/// This is done without regards to PoX forks.
///
/// The returned list will be formatted as follows:
///
/// * burn_header_hash
/// * 1st ancestor of burn_header_hash
/// * 2nd ancestor of burn_header_hash
/// ...
/// * Nth ancestor of burn_header_hash
///
/// That is, the resulting list will have up to N+1 items.
///
/// If an ancestor is not found, then return early.
/// The returned list always starts with `burn_header_hash`.
pub fn get_ancestor_burnchain_header_hashes(
conn: &Connection,
burn_header_hash: &BurnchainHeaderHash,
count: u64,
) -> Result<Vec<BurnchainHeaderHash>, db_error> {
let mut ret = vec![burn_header_hash.clone()];
for _i in 0..count {
let parent_bhh = match SortitionDB::get_parent_burnchain_header_hash(
conn,
ret.last().expect("FATAL: empty burn header hash list"),
)? {
Some(bhh) => bhh,
None => {
break;
}
};
ret.push(parent_bhh);
}
Ok(ret)
}
pub fn index_handle_at_tip<'a>(&'a self) -> SortitionHandleConn<'a> {
let sortition_id = SortitionDB::get_canonical_sortition_tip(self.conn()).unwrap();
self.index_handle(&sortition_id)
@@ -3912,6 +3988,11 @@ impl<'a> SortitionHandleTx<'a> {
Ok(())
}
#[cfg(test)]
pub fn mock_insert_stack_stx(&mut self, op: &StackStxOp) -> Result<(), db_error> {
self.insert_stack_stx(op)
}
/// Insert a transfer-stx op
fn insert_transfer_stx(&mut self, op: &TransferStxOp) -> Result<(), db_error> {
let args: &[&dyn ToSql] = &[
@@ -3930,6 +4011,11 @@ impl<'a> SortitionHandleTx<'a> {
Ok(())
}
#[cfg(test)]
pub fn mock_insert_transfer_stx(&mut self, op: &TransferStxOp) -> Result<(), db_error> {
self.insert_transfer_stx(op)
}
/// Insert a leader block commitment.
/// No validity checking will be done, beyond what is encoded in the block_commits table
/// constraints. That is, type mismatches and serialization issues will be caught, but nothing else.
@@ -8005,4 +8091,76 @@ pub mod tests {
}
}
}
#[test]
fn test_get_ancestor_burnchain_header_hashes() {
let block_height = 100;
let first_burn_hash = BurnchainHeaderHash([0x00; 32]);
let mut db = SortitionDB::connect_test(block_height, &first_burn_hash).unwrap();
for i in 1..11 {
test_append_snapshot(&mut db, BurnchainHeaderHash([i as u8; 32]), &vec![]);
}
// typical
let ancestors = SortitionDB::get_ancestor_burnchain_header_hashes(
db.conn(),
&BurnchainHeaderHash([0x09; 32]),
6,
)
.unwrap();
assert_eq!(
ancestors,
vec![
BurnchainHeaderHash([0x09; 32]),
BurnchainHeaderHash([0x08; 32]),
BurnchainHeaderHash([0x07; 32]),
BurnchainHeaderHash([0x06; 32]),
BurnchainHeaderHash([0x05; 32]),
BurnchainHeaderHash([0x04; 32]),
BurnchainHeaderHash([0x03; 32])
]
);
// edge case -- get too many
let ancestors = SortitionDB::get_ancestor_burnchain_header_hashes(
db.conn(),
&BurnchainHeaderHash([0x09; 32]),
20,
)
.unwrap();
assert_eq!(
ancestors,
vec![
BurnchainHeaderHash([0x09; 32]),
BurnchainHeaderHash([0x08; 32]),
BurnchainHeaderHash([0x07; 32]),
BurnchainHeaderHash([0x06; 32]),
BurnchainHeaderHash([0x05; 32]),
BurnchainHeaderHash([0x04; 32]),
BurnchainHeaderHash([0x03; 32]),
BurnchainHeaderHash([0x02; 32]),
BurnchainHeaderHash([0x01; 32]),
BurnchainHeaderHash([0x00; 32]),
BurnchainHeaderHash([0xff; 32]),
]
);
// edge case -- get none
let ancestors = SortitionDB::get_ancestor_burnchain_header_hashes(
db.conn(),
&BurnchainHeaderHash([0x09; 32]),
0,
)
.unwrap();
assert_eq!(ancestors, vec![BurnchainHeaderHash([0x09; 32])]);
// edge case -- get one that doesn't exist
let ancestors = SortitionDB::get_ancestor_burnchain_header_hashes(
db.conn(),
&BurnchainHeaderHash([0xfe; 32]),
0,
)
.unwrap();
assert_eq!(ancestors, vec![BurnchainHeaderHash([0xfe; 32])]);
}
}

View File

@@ -1222,6 +1222,8 @@ mod test {
&ExecutionCost::zero(),
123,
false,
vec![],
vec![],
)
.unwrap();
tx.commit().unwrap();

View File

@@ -171,6 +171,8 @@ pub struct SetupBlockResult<'a, 'b> {
Option<(MinerReward, Vec<MinerReward>, MinerReward, MinerRewardInfo)>,
pub evaluated_epoch: StacksEpochId,
pub applied_epoch_transition: bool,
pub burn_stack_stx_ops: Vec<StackStxOp>,
pub burn_transfer_stx_ops: Vec<TransferStxOp>,
}
pub struct DummyEventDispatcher;
@@ -4040,15 +4042,15 @@ impl StacksChainState {
block: &StacksBlock,
microblocks: &Vec<StacksMicroblock>,
) -> Result<(), Error> {
let parent_sn = {
let db_handle = sort_ic.as_handle(&snapshot.sortition_id);
let sn = match db_handle.get_block_snapshot(&snapshot.parent_burn_header_hash)? {
Some(sn) => sn,
None => {
return Err(Error::NoSuchBlockError);
}
};
sn
let parent_sn = match SortitionDB::get_block_snapshot_for_winning_stacks_block(
sort_ic,
&snapshot.sortition_id,
&block.header.parent_block,
)? {
Some(sn) => sn,
None => {
return Err(Error::NoSuchBlockError);
}
};
self.preprocess_anchored_block(
@@ -4598,6 +4600,8 @@ impl StacksChainState {
"txid" => %txid,
"burn_block" => %burn_header_hash,
"contract_call_ecode" => %resp.data);
} else {
debug!("Processed StackStx burnchain op"; "amount_ustx" => stacked_ustx, "num_cycles" => num_cycles, "burn_block_height" => block_height, "sender" => %sender, "reward_addr" => %reward_addr, "txid" => %txid);
}
let mut execution_cost = clarity_tx.cost_so_far();
execution_cost
@@ -4666,17 +4670,20 @@ impl StacksChainState {
)
});
match result {
Ok((value, _, events)) => Some(StacksTransactionReceipt {
transaction: TransactionOrigin::Burn(txid),
events,
result: value,
post_condition_aborted: false,
stx_burned: 0,
contract_analysis: None,
execution_cost: ExecutionCost::zero(),
microblock_header: None,
tx_index: 0,
}),
Ok((value, _, events)) => {
debug!("Processed TransferStx burnchain op"; "transfered_ustx" => transfered_ustx, "sender" => %sender, "recipient" => %recipient, "txid" => %txid);
Some(StacksTransactionReceipt {
transaction: TransactionOrigin::Burn(txid),
events,
result: value,
post_condition_aborted: false,
stx_burned: 0,
contract_analysis: None,
execution_cost: ExecutionCost::zero(),
microblock_header: None,
tx_index: 0,
})
}
Err(e) => {
info!("TransferStx burn op processing error.";
"error" => ?e,
@@ -4859,6 +4866,133 @@ impl StacksChainState {
Ok(parent_miner)
}
fn get_stacking_and_transfer_burn_ops_v205(
sortdb_conn: &Connection,
burn_tip: &BurnchainHeaderHash,
) -> Result<(Vec<StackStxOp>, Vec<TransferStxOp>), Error> {
let stacking_burn_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, burn_tip)?;
let transfer_burn_ops = SortitionDB::get_transfer_stx_ops(sortdb_conn, burn_tip)?;
Ok((stacking_burn_ops, transfer_burn_ops))
}
fn get_stacking_and_transfer_burn_ops_v210(
chainstate_tx: &mut ChainstateTx,
parent_index_hash: &StacksBlockId,
sortdb_conn: &Connection,
burn_tip: &BurnchainHeaderHash,
burn_tip_height: u64,
epoch_start_height: u64,
) -> Result<(Vec<StackStxOp>, Vec<TransferStxOp>), Error> {
// only consider transactions in Stacks 2.1
let search_window: u8 =
if epoch_start_height + (BURNCHAIN_TX_SEARCH_WINDOW as u64) > burn_tip_height {
burn_tip_height
.saturating_sub(epoch_start_height)
.try_into()
.expect("FATAL: search window exceeds u8")
} else {
BURNCHAIN_TX_SEARCH_WINDOW
};
debug!(
"Search the last {} sortitions for burnchain-hosted stacks operations before {} ({})",
search_window, burn_tip, burn_tip_height
);
let ancestor_burnchain_header_hashes = SortitionDB::get_ancestor_burnchain_header_hashes(
sortdb_conn,
burn_tip,
search_window.into(),
)?;
let processed_burnchain_txids = StacksChainState::get_burnchain_txids_in_ancestors(
chainstate_tx.deref().deref(),
parent_index_hash,
search_window.into(),
)?;
// Find the *new* transactions -- the ones that we *haven't* seen in this Stacks
// fork yet. Note that we search for the ones that we have seen by searching back
// `BURNCHAIN_TX_SEARCH_WINDOW` *Stacks* blocks, whose sortitions may span more
// than `BURNCHAIN_TX_SEARCH_WINDOW` burnchain blocks. The inclusion of txids for
// burnchain transactions in the latter query is not a problem, because these txids
// are used to *exclude* transactions from the last `BURNCHAIN_TX_SEARCH_WINDOW`
// burnchain blocks. These excluded txids, if they were mined outside of this
// window, are *already* excluded.
let mut all_stacking_burn_ops = vec![];
let mut all_transfer_burn_ops = vec![];
// go from oldest burn header hash to newest
for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() {
let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh)?;
let transfer_ops = SortitionDB::get_transfer_stx_ops(sortdb_conn, ancestor_bhh)?;
for stacking_op in stacking_ops.into_iter() {
if !processed_burnchain_txids.contains(&stacking_op.txid) {
all_stacking_burn_ops.push(stacking_op);
}
}
for transfer_op in transfer_ops.into_iter() {
if !processed_burnchain_txids.contains(&transfer_op.txid) {
all_transfer_burn_ops.push(transfer_op);
}
}
}
Ok((all_stacking_burn_ops, all_transfer_burn_ops))
}
/// Get the list of burnchain-hosted stacking and transfer operations to apply when evaluating
/// the Stacks block that was selected for this burnchain block.
/// The rules are different for different epochs:
///
/// * In Stacks 2.0/2.05, only the operations in the burnchain block will be considered.
/// So if a transaction was mined in burnchain block N, it will be processed in the Stacks
/// block mined in burnchain block N (if there is one).
///
/// * In Stacks 2.1+, the operations in the last K burnchain blocks that have not yet been
/// considered in this Stacks block's fork will be processed in the order in which they are
/// mined in the burnchain. So if a transaction was mined in an burnchain block between N and
/// N-K inclusive, it will be processed in each Stacks fork that contains at least one Stacks
/// block mined in the same burnchain interval.
///
/// The rationale for the new behavior in Stacks 2.1+ is that burnchain-hosted STX operations
/// can get picked up in Stacks blocks that only live on short-lived forks, or get mined in
/// burnchain blocks in which there was no sortiton. In either case, the operation does not
/// materialize on the canonical Stacks chain. This is a bad user
/// experience, because the act of sending a PreStxOp plus this StackStxOp / TransferStxOp is a
/// time-consuming and tedious process that must then be repeated.
///
/// The change in Stacks 2.1+ makes it so that it's overwhelmingly likely to work
/// the first time -- the choice of K is significantly bigger than the length of short-lived
/// forks or periods of time with no sortition than have been observed in practice.
fn get_stacking_and_transfer_burn_ops(
chainstate_tx: &mut ChainstateTx,
parent_index_hash: &StacksBlockId,
sortdb_conn: &Connection,
burn_tip: &BurnchainHeaderHash,
burn_tip_height: u64,
) -> Result<(Vec<StackStxOp>, Vec<TransferStxOp>), Error> {
let cur_epoch = SortitionDB::get_stacks_epoch(sortdb_conn, burn_tip_height)?
.expect("FATAL: no epoch defined for current burnchain tip height");
match cur_epoch.epoch_id {
StacksEpochId::Epoch10 => {
panic!("FATAL: processed a block in Epoch 1.0");
}
StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => {
StacksChainState::get_stacking_and_transfer_burn_ops_v205(sortdb_conn, burn_tip)
}
StacksEpochId::Epoch21 => StacksChainState::get_stacking_and_transfer_burn_ops_v210(
chainstate_tx,
parent_index_hash,
sortdb_conn,
burn_tip,
burn_tip_height,
cur_epoch.start_height,
),
}
}
/// Called in both follower and miner block assembly paths.
/// Returns clarity_tx, list of receipts, microblock execution cost,
/// microblock fees, microblock burns, list of microblock tx receipts,
@@ -4895,8 +5029,14 @@ impl StacksChainState {
(latest_miners, parent_miner)
};
let stacking_burn_ops = SortitionDB::get_stack_stx_ops(conn, &burn_tip)?;
let transfer_burn_ops = SortitionDB::get_transfer_stx_ops(conn, &burn_tip)?;
let (stacking_burn_ops, transfer_burn_ops) =
StacksChainState::get_stacking_and_transfer_burn_ops(
chainstate_tx,
&parent_index_hash,
conn,
&burn_tip,
burn_tip_height.into(),
)?;
// load the execution cost of the parent block if the executor is the follower.
// otherwise, if the executor is the miner, only load the parent cost if the parent
@@ -5011,14 +5151,14 @@ impl StacksChainState {
let (applied_epoch_transition, mut tx_receipts) =
StacksChainState::process_epoch_transition(&mut clarity_tx, burn_tip_height)?;
// process stacking & transfer operations from bitcoin ops
// process stacking & transfer operations from burnchain ops
tx_receipts.extend(StacksChainState::process_stacking_ops(
&mut clarity_tx,
stacking_burn_ops,
stacking_burn_ops.clone(),
));
tx_receipts.extend(StacksChainState::process_transfer_ops(
&mut clarity_tx,
transfer_burn_ops,
transfer_burn_ops.clone(),
));
Ok(SetupBlockResult {
@@ -5031,6 +5171,8 @@ impl StacksChainState {
matured_miner_rewards_opt,
evaluated_epoch,
applied_epoch_transition,
burn_stack_stx_ops: stacking_burn_ops,
burn_transfer_stx_ops: transfer_burn_ops,
})
}
@@ -5217,6 +5359,8 @@ impl StacksChainState {
matured_miner_rewards_opt,
evaluated_epoch,
applied_epoch_transition,
burn_stack_stx_ops,
burn_transfer_stx_ops,
} = StacksChainState::setup_block(
chainstate_tx,
clarity_instance,
@@ -5482,6 +5626,8 @@ impl StacksChainState {
&block_execution_cost,
block_size,
applied_epoch_transition,
burn_stack_stx_ops,
burn_transfer_stx_ops,
)
.expect("FATAL: failed to advance chain tip");
@@ -6445,6 +6591,8 @@ pub mod test {
use crate::cost_estimates::UnitEstimator;
use crate::types::chainstate::{BlockHeaderHash, StacksWorkScore};
use crate::burnchains::test::Txid_from_test_data;
use super::*;
use clarity::vm::types::StacksAddressExtensions;
@@ -11083,6 +11231,561 @@ pub mod test {
);
}
fn make_transfer_op(
addr: &StacksAddress,
recipient_addr: &StacksAddress,
burn_height: u64,
tenure_id: usize,
) -> TransferStxOp {
let transfer_op = TransferStxOp {
sender: addr.clone(),
recipient: recipient_addr.clone(),
transfered_ustx: ((tenure_id + 1) * 1000) as u128,
memo: vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05],
txid: Txid_from_test_data(
tenure_id as u64,
1,
&BurnchainHeaderHash([tenure_id as u8; 32]),
tenure_id as u64,
),
vtxindex: (10 + tenure_id) as u32,
block_height: burn_height,
burn_header_hash: BurnchainHeaderHash([0x00; 32]),
};
transfer_op
}
/// Verify that the stacking and transfer operations on the burnchain work as expected in
/// Stacks 2.1. That is, they're up for consideration in the 6 subsequent sortiitons after
/// they are mined (including the one they are in). This test verifies that TransferSTX
/// operations are picked up and applied as expected in the given Stacks fork, even though
/// there are empty sortitions.
#[test]
fn test_get_stacking_and_transfer_burn_ops_v210() {
let mut peer_config =
TestPeerConfig::new("test_stacking_and_transfer_burn_ops_v210", 21315, 21316);
let privk = StacksPrivateKey::from_hex(
"eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01",
)
.unwrap();
let addr = StacksAddress::from_public_keys(
C32_ADDRESS_VERSION_TESTNET_SINGLESIG,
&AddressHashMode::SerializeP2PKH,
1,
&vec![StacksPublicKey::from_private(&privk)],
)
.unwrap();
let recipient_privk = StacksPrivateKey::new();
let recipient_addr = StacksAddress::from_public_keys(
C32_ADDRESS_VERSION_TESTNET_SINGLESIG,
&AddressHashMode::SerializeP2PKH,
1,
&vec![StacksPublicKey::from_private(&recipient_privk)],
)
.unwrap();
let initial_balance = 1000000000;
peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)];
peer_config.epochs = Some(StacksEpoch::unit_test_2_1(0));
let mut peer = TestPeer::new(peer_config);
let chainstate_path = peer.chainstate_path.clone();
let num_blocks = 10;
let first_stacks_block_height = {
let sn =
SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
.unwrap();
sn.block_height
};
let mut last_block_id = StacksBlockId([0x00; 32]);
for tenure_id in 0..num_blocks {
let tip =
SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
.unwrap();
assert_eq!(
tip.block_height,
first_stacks_block_height + (tenure_id as u64)
);
// For the first 5 burn blocks, sortition a Stacks block.
// For sortitions 6 and 8, don't sortition any Stacks block.
// For sortitions 7 and 9, do sortition a Stacks block, and verify that it includes all
// burnchain STX operations that got skipped by the missing sortition.
let process_stacks_block = tenure_id <= 5 || tenure_id % 2 != 0;
let (mut burn_ops, stacks_block_opt, microblocks_opt) = if process_stacks_block {
let (burn_ops, stacks_block, microblocks) = peer.make_tenure(
|ref mut miner,
ref mut sortdb,
ref mut chainstate,
vrf_proof,
ref parent_opt,
ref parent_microblock_header_opt| {
let parent_tip = match parent_opt {
None => {
StacksChainState::get_genesis_header_info(chainstate.db()).unwrap()
}
Some(block) => {
let ic = sortdb.index_conn();
let snapshot =
SortitionDB::get_block_snapshot_for_winning_stacks_block(
&ic,
&tip.sortition_id,
&block.block_hash(),
)
.unwrap()
.unwrap(); // succeeds because we don't fork
StacksChainState::get_anchored_block_header_info(
chainstate.db(),
&snapshot.consensus_hash,
&snapshot.winning_stacks_block_hash,
)
.unwrap()
.unwrap()
}
};
let mut mempool =
MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap();
let coinbase_tx = make_coinbase(miner, tenure_id);
let anchored_block = StacksBlockBuilder::build_anchored_block(
chainstate,
&sortdb.index_conn(),
&mut mempool,
&parent_tip,
tip.total_burn,
vrf_proof,
Hash160([tenure_id as u8; 20]),
&coinbase_tx,
BlockBuilderSettings::max_value(),
None,
)
.unwrap();
(anchored_block.0, vec![])
},
);
(burn_ops, Some(stacks_block), Some(microblocks))
} else {
(vec![], None, None)
};
let mut expected_transfer_ops = if tenure_id == 0 || tenure_id - 1 < 5 {
// all contiguous blocks up to now, so only expect this block's stx-transfer
vec![make_transfer_op(
&addr,
&recipient_addr,
tip.block_height + 1,
tenure_id,
)]
} else if (tenure_id - 1) % 2 == 0 {
// no sortition in the last burn block, so only expect this block's stx-transfer
vec![make_transfer_op(
&addr,
&recipient_addr,
tip.block_height + 1,
tenure_id,
)]
} else {
// last sortition had no block, so expect both the previous block's
// stx-transfer *and* this block's stx-transfer
vec![
make_transfer_op(&addr, &recipient_addr, tip.block_height, tenure_id - 1),
make_transfer_op(&addr, &recipient_addr, tip.block_height + 1, tenure_id),
]
};
// add one stx-transfer burn op per block
let mut stx_burn_ops = vec![BlockstackOperationType::TransferStx(make_transfer_op(
&addr,
&recipient_addr,
tip.block_height + 1,
tenure_id,
))];
burn_ops.append(&mut stx_burn_ops);
let (_, burn_header_hash, consensus_hash) = peer.next_burnchain_block(burn_ops.clone());
match (stacks_block_opt, microblocks_opt) {
(Some(stacks_block), Some(microblocks)) => {
peer.process_stacks_epoch_at_tip(&stacks_block, &microblocks);
last_block_id = StacksBlockHeader::make_index_block_hash(
&consensus_hash,
&stacks_block.block_hash(),
);
}
_ => {}
}
let tip =
SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
.unwrap();
let sortdb = peer.sortdb.take().unwrap();
{
let chainstate = peer.chainstate();
let (mut chainstate_tx, clarity_instance) =
chainstate.chainstate_tx_begin().unwrap();
let (stack_stx_ops, transfer_stx_ops) =
StacksChainState::get_stacking_and_transfer_burn_ops_v210(
&mut chainstate_tx,
&last_block_id,
sortdb.conn(),
&tip.burn_header_hash,
tip.block_height,
0,
)
.unwrap();
assert_eq!(transfer_stx_ops.len(), expected_transfer_ops.len());
// burn header hash will be different, since it's set post-processing.
// everything else must be the same though.
for i in 0..expected_transfer_ops.len() {
expected_transfer_ops[i].burn_header_hash =
transfer_stx_ops[i].burn_header_hash.clone();
}
assert_eq!(transfer_stx_ops, expected_transfer_ops);
}
peer.sortdb.replace(sortdb);
}
// all burnchain transactions mined, even if there was no sortition in the burn block in
// which they were mined.
let sortdb = peer.sortdb.take().unwrap();
// definitely missing some blocks -- there are empty sortitions
let stacks_tip = peer
.chainstate()
.get_stacks_chain_tip(&sortdb)
.unwrap()
.unwrap();
assert_eq!(stacks_tip.height, 8);
// but we did process all burnchain operations
let (consensus_hash, block_bhh) =
SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap();
let tip_hash = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh);
let account = peer
.chainstate()
.with_read_only_clarity_tx(&sortdb.index_conn(), &tip_hash, |conn| {
StacksChainState::get_account(conn, &addr.to_account_principal())
})
.unwrap();
peer.sortdb.replace(sortdb);
assert_eq!(
account.stx_balance.get_total_balance(),
1000000000 - (1000 + 2000 + 3000 + 4000 + 5000 + 6000 + 7000 + 8000 + 9000)
);
}
/// Verify that the stacking and transfer operations on the burnchain work as expected in
/// Stacks 2.1. That is, they're up for consideration in the 6 subsequent sortiitons after
/// they are mined (including the one they are in). This test verifies that TransferSTX
/// operations are only dropped from consideration if there are more than 6 sortitions
/// between when they are mined and when the next Stacks block is mined.
#[test]
fn test_get_stacking_and_transfer_burn_ops_v210_expiration() {
let mut peer_config = TestPeerConfig::new(
"test_stacking_and_transfer_burn_ops_v210_expiration",
21317,
21318,
);
let privk = StacksPrivateKey::from_hex(
"eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01",
)
.unwrap();
let addr = StacksAddress::from_public_keys(
C32_ADDRESS_VERSION_TESTNET_SINGLESIG,
&AddressHashMode::SerializeP2PKH,
1,
&vec![StacksPublicKey::from_private(&privk)],
)
.unwrap();
let recipient_privk = StacksPrivateKey::new();
let recipient_addr = StacksAddress::from_public_keys(
C32_ADDRESS_VERSION_TESTNET_SINGLESIG,
&AddressHashMode::SerializeP2PKH,
1,
&vec![StacksPublicKey::from_private(&recipient_privk)],
)
.unwrap();
let initial_balance = 1000000000;
peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)];
peer_config.epochs = Some(StacksEpoch::unit_test_2_1(0));
let mut peer = TestPeer::new(peer_config);
let chainstate_path = peer.chainstate_path.clone();
let num_blocks = 20;
let first_stacks_block_height = {
let sn =
SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
.unwrap();
sn.block_height
};
let mut last_block_id = StacksBlockId([0x00; 32]);
for tenure_id in 0..num_blocks {
let tip =
SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
.unwrap();
assert_eq!(
tip.block_height,
first_stacks_block_height + (tenure_id as u64)
);
// For the first 5 burn blocks, sortition a Stacks block.
let process_stacks_block = tenure_id <= 5 || tenure_id >= 13;
let (mut burn_ops, stacks_block_opt, microblocks_opt) = if process_stacks_block {
let (burn_ops, stacks_block, microblocks) = peer.make_tenure(
|ref mut miner,
ref mut sortdb,
ref mut chainstate,
vrf_proof,
ref parent_opt,
ref parent_microblock_header_opt| {
let parent_tip = match parent_opt {
None => {
StacksChainState::get_genesis_header_info(chainstate.db()).unwrap()
}
Some(block) => {
let ic = sortdb.index_conn();
let snapshot =
SortitionDB::get_block_snapshot_for_winning_stacks_block(
&ic,
&tip.sortition_id,
&block.block_hash(),
)
.unwrap()
.unwrap(); // succeeds because we don't fork
StacksChainState::get_anchored_block_header_info(
chainstate.db(),
&snapshot.consensus_hash,
&snapshot.winning_stacks_block_hash,
)
.unwrap()
.unwrap()
}
};
let mut mempool =
MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap();
let coinbase_tx = make_coinbase(miner, tenure_id);
let anchored_block = StacksBlockBuilder::build_anchored_block(
chainstate,
&sortdb.index_conn(),
&mut mempool,
&parent_tip,
tip.total_burn,
vrf_proof,
Hash160([tenure_id as u8; 20]),
&coinbase_tx,
BlockBuilderSettings::max_value(),
None,
)
.unwrap();
(anchored_block.0, vec![])
},
);
(burn_ops, Some(stacks_block), Some(microblocks))
} else {
(vec![], None, None)
};
let mut expected_transfer_ops = if tenure_id == 0 || tenure_id - 1 < 5 {
// all contiguous blocks up to now, so only expect this block's stx-transfer
vec![make_transfer_op(
&addr,
&recipient_addr,
tip.block_height + 1,
tenure_id,
)]
} else if tenure_id - 1 == 5 {
vec![
make_transfer_op(&addr, &recipient_addr, tip.block_height, tenure_id - 1),
make_transfer_op(&addr, &recipient_addr, tip.block_height + 1, tenure_id),
]
} else if tenure_id - 1 == 6 {
vec![
make_transfer_op(&addr, &recipient_addr, tip.block_height - 1, tenure_id - 2),
make_transfer_op(&addr, &recipient_addr, tip.block_height, tenure_id - 1),
make_transfer_op(&addr, &recipient_addr, tip.block_height + 1, tenure_id),
]
} else if tenure_id - 1 == 7 {
vec![
make_transfer_op(&addr, &recipient_addr, tip.block_height - 2, tenure_id - 3),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 1, tenure_id - 2),
make_transfer_op(&addr, &recipient_addr, tip.block_height, tenure_id - 1),
make_transfer_op(&addr, &recipient_addr, tip.block_height + 1, tenure_id),
]
} else if tenure_id - 1 == 8 {
vec![
make_transfer_op(&addr, &recipient_addr, tip.block_height - 3, tenure_id - 4),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 2, tenure_id - 3),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 1, tenure_id - 2),
make_transfer_op(&addr, &recipient_addr, tip.block_height, tenure_id - 1),
make_transfer_op(&addr, &recipient_addr, tip.block_height + 1, tenure_id),
]
} else if tenure_id - 1 == 9 {
vec![
make_transfer_op(&addr, &recipient_addr, tip.block_height - 4, tenure_id - 5),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 3, tenure_id - 4),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 2, tenure_id - 3),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 1, tenure_id - 2),
make_transfer_op(&addr, &recipient_addr, tip.block_height, tenure_id - 1),
make_transfer_op(&addr, &recipient_addr, tip.block_height + 1, tenure_id),
]
} else if tenure_id - 1 == 10 {
vec![
make_transfer_op(&addr, &recipient_addr, tip.block_height - 5, tenure_id - 6),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 4, tenure_id - 5),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 3, tenure_id - 4),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 2, tenure_id - 3),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 1, tenure_id - 2),
make_transfer_op(&addr, &recipient_addr, tip.block_height, tenure_id - 1),
make_transfer_op(&addr, &recipient_addr, tip.block_height + 1, tenure_id),
]
} else if tenure_id - 1 == 11 {
vec![
make_transfer_op(&addr, &recipient_addr, tip.block_height - 5, tenure_id - 6),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 4, tenure_id - 5),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 3, tenure_id - 4),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 2, tenure_id - 3),
make_transfer_op(&addr, &recipient_addr, tip.block_height - 1, tenure_id - 2),
make_transfer_op(&addr, &recipient_addr, tip.block_height, tenure_id - 1),
make_transfer_op(&addr, &recipient_addr, tip.block_height + 1, tenure_id),
]
} else {
vec![make_transfer_op(
&addr,
&recipient_addr,
tip.block_height + 1,
tenure_id,
)]
};
// add one stx-transfer burn op per block
let mut stx_burn_ops = vec![BlockstackOperationType::TransferStx(make_transfer_op(
&addr,
&recipient_addr,
tip.block_height + 1,
tenure_id,
))];
burn_ops.append(&mut stx_burn_ops);
let (_, burn_header_hash, consensus_hash) = peer.next_burnchain_block(burn_ops.clone());
match (stacks_block_opt, microblocks_opt) {
(Some(stacks_block), Some(microblocks)) => {
peer.process_stacks_epoch_at_tip(&stacks_block, &microblocks);
last_block_id = StacksBlockHeader::make_index_block_hash(
&consensus_hash,
&stacks_block.block_hash(),
);
}
_ => {}
}
let tip =
SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
.unwrap();
let sortdb = peer.sortdb.take().unwrap();
{
let chainstate = peer.chainstate();
let (mut chainstate_tx, clarity_instance) =
chainstate.chainstate_tx_begin().unwrap();
let (stack_stx_ops, transfer_stx_ops) =
StacksChainState::get_stacking_and_transfer_burn_ops_v210(
&mut chainstate_tx,
&last_block_id,
sortdb.conn(),
&tip.burn_header_hash,
tip.block_height,
0,
)
.unwrap();
assert_eq!(transfer_stx_ops.len(), expected_transfer_ops.len());
// burn header hash will be different, since it's set post-processing.
// everything else must be the same though.
for i in 0..expected_transfer_ops.len() {
expected_transfer_ops[i].burn_header_hash =
transfer_stx_ops[i].burn_header_hash.clone();
}
assert_eq!(transfer_stx_ops, expected_transfer_ops);
}
peer.sortdb.replace(sortdb);
}
// all burnchain transactions mined, even if there was no sortition in the burn block in
// which they were mined.
let sortdb = peer.sortdb.take().unwrap();
// definitely missing some blocks -- there are empty sortitions
let stacks_tip = peer
.chainstate()
.get_stacks_chain_tip(&sortdb)
.unwrap()
.unwrap();
assert_eq!(stacks_tip.height, 13);
// but we did process all burnchain operations
let (consensus_hash, block_bhh) =
SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap();
let tip_hash = StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_bhh);
let account = peer
.chainstate()
.with_read_only_clarity_tx(&sortdb.index_conn(), &tip_hash, |conn| {
StacksChainState::get_account(conn, &addr.to_account_principal())
})
.unwrap();
peer.sortdb.replace(sortdb);
// skipped tenure 6's TransferSTX
assert_eq!(
account.stx_balance.get_total_balance(),
1000000000
- (1000
+ 2000
+ 3000
+ 4000
+ 5000
+ 7000
+ 8000
+ 9000
+ 10000
+ 11000
+ 12000
+ 13000
+ 14000
+ 15000
+ 16000
+ 17000
+ 18000
+ 19000)
);
}
// TODO(test): test multiple anchored blocks confirming the same microblock stream (in the same
// place, and different places, with/without orphans)
// TODO(test): process_next_staging_block

View File

@@ -331,4 +331,33 @@ impl StacksChainState {
.map_err(|e| Error::DBError(db_error::SqliteError(e)))?
.is_some())
}
/// Load up the past N ancestors' index block hashes of a given block, *including* the given
/// index_block_hash. The returned vector will contain the following hashes, in this order
/// * index_block_hash
/// * 1st ancestor of index_block_hash
/// * 2nd ancestor of index_block_hash
/// ...
/// * Nth ancestor of index_block_hash
pub fn get_ancestor_index_hashes(
conn: &Connection,
index_block_hash: &StacksBlockId,
count: u64,
) -> Result<Vec<StacksBlockId>, Error> {
let mut ret = vec![index_block_hash.clone()];
for _i in 0..count {
let parent_index_block_hash = {
let cur_index_block_hash = ret.last().expect("FATAL: empty list of ancestors");
match StacksChainState::get_parent_block_id(conn, &cur_index_block_hash)? {
Some(ibhh) => ibhh,
None => {
// out of ancestors
break;
}
}
};
ret.push(parent_index_block_hash);
}
Ok(ret)
}
}

View File

@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{btree_map::Entry, BTreeMap};
use std::collections::{btree_map::Entry, BTreeMap, HashSet};
use std::fmt;
use std::fs;
use std::io;
@@ -25,6 +25,7 @@ use std::path::{Path, PathBuf};
use rusqlite::types::ToSql;
use rusqlite::Connection;
use rusqlite::OpenFlags;
use rusqlite::OptionalExtension;
use rusqlite::Row;
use rusqlite::Transaction;
use rusqlite::NO_PARAMS;
@@ -34,6 +35,7 @@ use crate::burnchains::{Address, Burnchain, BurnchainParameters, PoxConstants};
use crate::chainstate::burn::db::sortdb::BlockHeaderCache;
use crate::chainstate::burn::db::sortdb::*;
use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn};
use crate::chainstate::burn::operations::{StackStxOp, TransferStxOp};
use crate::chainstate::burn::ConsensusHash;
use crate::chainstate::stacks::boot::*;
use crate::chainstate::stacks::db::accounts::*;
@@ -94,6 +96,7 @@ use crate::clarity_vm::database::HeadersDBConn;
use crate::util_lib::boot::{boot_code_acc, boot_code_addr, boot_code_id, boot_code_tx_auth};
use clarity::vm::Value;
use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, TrieHash};
pub mod accounts;
pub mod blocks;
pub mod contracts;
@@ -766,6 +769,14 @@ const CHAINSTATE_SCHEMA_3: &'static [&'static str] = &[
r#"
CREATE INDEX IF NOT EXISTS index_parent_block_id_by_block_id ON block_headers(index_block_hash,parent_block_id);
"#,
// table to map index block hashes to the txids of on-burnchain stacks operations that were
// proessed
r#"
CREATE TABLE burnchain_txids(
index_block_hash TEXT PRIMARY KEY,
-- this is a JSON-encoded list of txids
txids TEXT NOT NULL
);"#,
r#"
UPDATE db_config SET version = "3";
"#,
@@ -984,6 +995,10 @@ impl StacksChainState {
tx.execute_batch(cmd)?;
}
}
"3" => {
// done
break;
}
_ => {
error!(
"Invalid chain state database: expected version = {}, got {}",
@@ -2147,6 +2162,76 @@ impl StacksChainState {
Ok(height_opt)
}
/// Get the burnchain txids for a given index block hash
fn get_burnchain_txids_for_block(
conn: &Connection,
index_block_hash: &StacksBlockId,
) -> Result<Vec<Txid>, Error> {
let sql = "SELECT txids FROM burnchain_txids WHERE index_block_hash = ?1";
let args: &[&dyn ToSql] = &[index_block_hash];
let txids = conn
.query_row(sql, args, |r| {
let txids_json: String = r.get_unwrap(0);
let txids: Vec<Txid> = serde_json::from_str(&txids_json)
.expect("FATAL: database corruption: could not parse TXID JSON");
Ok(txids)
})
.optional()?
.unwrap_or(vec![]);
Ok(txids)
}
/// Get the txids of the burnchain operations applied in the past N Stacks blocks.
pub fn get_burnchain_txids_in_ancestors(
conn: &Connection,
index_block_hash: &StacksBlockId,
count: u64,
) -> Result<HashSet<Txid>, Error> {
let mut ret = HashSet::new();
let ancestors = StacksChainState::get_ancestor_index_hashes(conn, index_block_hash, count)?;
for ancestor in ancestors.into_iter() {
let txids = StacksChainState::get_burnchain_txids_for_block(conn, &ancestor)?;
for txid in txids.into_iter() {
ret.insert(txid);
}
}
Ok(ret)
}
/// Store all on-burnchain STX operations' txids by index block hash
fn store_burnchain_txids(
tx: &DBTx,
index_block_hash: &StacksBlockId,
burn_stack_stx_ops: Vec<StackStxOp>,
burn_transfer_stx_ops: Vec<TransferStxOp>,
) -> Result<(), Error> {
let mut txids: Vec<_> = burn_stack_stx_ops
.into_iter()
.fold(vec![], |mut txids, op| {
txids.push(op.txid);
txids
});
let mut xfer_txids = burn_transfer_stx_ops
.into_iter()
.fold(vec![], |mut txids, op| {
txids.push(op.txid);
txids
});
txids.append(&mut xfer_txids);
let txids_json =
serde_json::to_string(&txids).expect("FATAL: could not serialize Vec<Txid>");
let sql = "INSERT INTO burnchain_txids (index_block_hash, txids) VALUES (?1, ?2)";
let args: &[&dyn ToSql] = &[index_block_hash, &txids_json];
tx.execute(sql, args)?;
Ok(())
}
/// Append a Stacks block to an existing Stacks block, and grant the miner the block reward.
/// Return the new Stacks header info.
pub fn advance_tip<'a>(
@@ -2165,6 +2250,8 @@ impl StacksChainState {
anchor_block_cost: &ExecutionCost,
anchor_block_size: u64,
applied_epoch_transition: bool,
burn_stack_stx_ops: Vec<StackStxOp>,
burn_transfer_stx_ops: Vec<TransferStxOp>,
) -> Result<StacksHeaderInfo, Error> {
if new_tip.parent_block != FIRST_STACKS_BLOCK_HASH {
// not the first-ever block, so linkage must occur
@@ -2224,6 +2311,12 @@ impl StacksChainState {
block_reward,
user_burns,
)?;
StacksChainState::store_burnchain_txids(
headers_tx.deref(),
&index_block_hash,
burn_stack_stx_ops,
burn_transfer_stx_ops,
)?;
if let Some((miner_payout, user_payouts, parent_payout, reward_info)) = mature_miner_payouts
{

View File

@@ -66,6 +66,9 @@ pub const NETWORK_P2P_PORT: u16 = 6265;
// its current block-commit in a sortition
pub const MINING_COMMITMENT_WINDOW: u8 = 6;
// Number of previous burnchain blocks to search to find burnchain-hosted Stacks operations
pub const BURNCHAIN_TX_SEARCH_WINDOW: u8 = 6;
// This controls a miner heuristic for dropping a transaction from repeated consideration
// in the mempool. If the transaction caused the block limit to be reached when the block
// was previously `TX_BLOCK_LIMIT_PROPORTION_HEURISTIC`% full, the transaction will be dropped

View File

@@ -3413,17 +3413,25 @@ pub mod test {
) -> (StacksBlock, Vec<StacksMicroblock>),
{
let mut sortdb = self.sortdb.take().unwrap();
let mut burn_block = {
let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap();
TestBurnchainBlock::new(&sn, 0)
};
let last_sortition_block =
SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); // no forks here
let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap();
let mut burn_block = TestBurnchainBlock::new(&tip, 0);
let mut stacks_node = self.stacks_node.take().unwrap();
let parent_block_opt = stacks_node.get_last_anchored_block(&self.miner);
let parent_sortition_opt = match parent_block_opt.as_ref() {
Some(parent_block) => {
let ic = sortdb.index_conn();
SortitionDB::get_block_snapshot_for_winning_stacks_block(
&ic,
&tip.sortition_id,
&parent_block.block_hash(),
)
.unwrap()
}
None => None,
};
let parent_microblock_header_opt =
get_last_microblock_header(&stacks_node, &self.miner, parent_block_opt.as_ref());
let last_key = stacks_node.get_last_key(&self.miner);
@@ -3460,13 +3468,13 @@ pub mod test {
&microblocks,
1000,
&last_key,
Some(&last_sortition_block),
parent_sortition_opt.as_ref(),
);
let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner);
// patch in reward set info
match get_next_recipients(
&last_sortition_block,
&tip,
&mut stacks_node.chainstate,
&mut sortdb,
&self.config.burnchain,

View File

@@ -23,8 +23,14 @@ use stacks::core;
use stacks::chainstate::burn::db::sortdb::SortitionDB;
use stacks::chainstate::burn::distribution::BurnSamplePoint;
use stacks::chainstate::burn::operations::BlockstackOperationType;
use stacks::chainstate::burn::operations::PreStxOp;
use stacks::chainstate::burn::operations::TransferStxOp;
use stacks::burnchains::bitcoin::address::{BitcoinAddress, BitcoinAddressType};
use stacks::burnchains::bitcoin::BitcoinNetworkType;
use stacks::burnchains::PoxConstants;
use stacks::burnchains::Txid;
use crate::stacks_common::types::Address;
use crate::stacks_common::util::hash::hex_bytes;
@@ -34,6 +40,13 @@ use stacks_common::util::secp256k1::Secp256k1PublicKey;
use stacks::chainstate::coordinator::comm::CoordinatorChannels;
use stacks::core::BURNCHAIN_TX_SEARCH_WINDOW;
use crate::burnchains::bitcoin_regtest_controller::UTXO;
use crate::operations::BurnchainOpSigner;
use crate::tests::neon_integrations::get_balance;
use crate::Keychain;
fn advance_to_2_1(
mut initial_balances: Vec<InitialBalance>,
) -> (
@@ -143,8 +156,6 @@ fn advance_to_2_1(
// these should all succeed across the epoch 2.1 boundary
for _i in 0..5 {
// also, make *huge* block-commits with invalid marker bytes once we reach the new
// epoch, and verify that it fails.
let tip_info = get_chain_info(&conf);
// this block is the epoch transition?
@@ -291,7 +302,8 @@ fn transition_adds_burn_block_height() {
let http_origin = format!("http://{}", &conf.node.rpc_bind);
// post epoch 2.1 -- we should be able to query any/all burnchain headers after the first
// burnchain block height
// burnchain block height (not the genesis burnchain height, mind you, but the first burnchain
// block height at which the Stacks blockchain begins).
let contract = "
(define-private (test-burn-headers-cls (height uint) (base uint))
(begin
@@ -456,3 +468,451 @@ fn transition_adds_burn_block_height() {
test_observer::clear();
coord_channel.stop_chains_coordinator();
}
#[test]
#[ignore]
fn transition_fixes_bitcoin_rigidity() {
if env::var("BITCOIND_TEST") != Ok("1".into()) {
return;
}
let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap();
let spender_stx_addr: StacksAddress = to_addr(&spender_sk);
let spender_addr: PrincipalData = spender_stx_addr.clone().into();
let _spender_btc_addr = BitcoinAddress::from_bytes(
BitcoinNetworkType::Regtest,
BitcoinAddressType::PublicKeyHash,
&spender_stx_addr.bytes.0,
)
.unwrap();
let spender_2_sk = StacksPrivateKey::from_hex(SK_2).unwrap();
let spender_2_stx_addr: StacksAddress = to_addr(&spender_2_sk);
let spender_2_addr: PrincipalData = spender_2_stx_addr.clone().into();
let epoch_2_05 = 210;
let epoch_2_1 = 215;
test_observer::spawn();
let (mut conf, miner_account) = neon_integration_test_conf();
let mut initial_balances = vec![
InitialBalance {
address: spender_addr.clone(),
amount: 100300,
},
InitialBalance {
address: spender_2_addr.clone(),
amount: 100300,
},
];
conf.initial_balances.append(&mut initial_balances);
conf.events_observers.push(EventObserverConfig {
endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT),
events_keys: vec![EventKeyType::AnyEvent],
});
let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec();
epochs[1].end_height = epoch_2_05;
epochs[2].start_height = epoch_2_05;
epochs[2].end_height = epoch_2_1;
epochs[3].start_height = epoch_2_1;
conf.burnchain.epochs = Some(epochs);
let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path());
let reward_cycle_len = 2000;
let prepare_phase_len = 100;
let pox_constants = PoxConstants::new(
reward_cycle_len,
prepare_phase_len,
4 * prepare_phase_len / 5,
5,
15,
u32::max_value(),
);
burnchain_config.pox_constants = pox_constants.clone();
let mut btcd_controller = BitcoinCoreController::new(conf.clone());
btcd_controller
.start_bitcoind()
.map_err(|_e| ())
.expect("Failed starting bitcoind");
let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain(
conf.clone(),
None,
Some(burnchain_config.clone()),
None,
);
let http_origin = format!("http://{}", &conf.node.rpc_bind);
// bitcoin chain starts at epoch 2.05 boundary, minus 5 blocks to go
btc_regtest_controller.bootstrap_chain(epoch_2_05 - 5);
eprintln!("Chain bootstrapped...");
let mut run_loop = neon::RunLoop::new(conf.clone());
let blocks_processed = run_loop.get_blocks_processed_arc();
let channel = run_loop.get_coordinator_channel().unwrap();
let runloop_burnchain = burnchain_config.clone();
thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0));
// give the run loop some time to start up!
wait_for_runloop(&blocks_processed);
// first block wakes up the run loop
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);
let tip_info = get_chain_info(&conf);
assert_eq!(tip_info.burn_block_height, epoch_2_05 - 4);
// first block will hold our VRF registration
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);
// second block will be the first mined Stacks block
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);
// cross the epoch 2.05 boundary
for _i in 0..3 {
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);
}
let tip_info = get_chain_info(&conf);
assert_eq!(tip_info.burn_block_height, epoch_2_05 + 1);
// okay, let's send a pre-stx op for a transfer-stx op that will get mined before the 2.1 epoch
let pre_stx_op = PreStxOp {
output: spender_stx_addr.clone(),
// to be filled in
txid: Txid([0u8; 32]),
vtxindex: 0,
block_height: 0,
burn_header_hash: BurnchainHeaderHash([0u8; 32]),
};
let mut miner_signer = Keychain::default(conf.node.seed.clone()).generate_op_signer();
assert!(
btc_regtest_controller.submit_operation(
BlockstackOperationType::PreStx(pre_stx_op),
&mut miner_signer,
1
),
"Pre-stx operation should submit successfully"
);
// mine it
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);
// let's fire off a transfer op that will not land in the Stacks 2.1 epoch. It should not be
// applied, even though it's within 6 blocks of the next Stacks block, which will be in epoch
// 2.1. This verifies that the new burnchain consideration window only applies to sortitions
// that happen in Stacks 2.1.
let recipient_sk = StacksPrivateKey::new();
let recipient_addr = to_addr(&recipient_sk);
let transfer_stx_op = TransferStxOp {
sender: spender_stx_addr.clone(),
recipient: recipient_addr.clone(),
transfered_ustx: 100_000,
memo: vec![],
// to be filled in
txid: Txid([0u8; 32]),
vtxindex: 0,
block_height: 0,
burn_header_hash: BurnchainHeaderHash([0u8; 32]),
};
let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false);
assert!(
btc_regtest_controller.submit_operation(
BlockstackOperationType::TransferStx(transfer_stx_op),
&mut spender_signer,
1
),
"Transfer operation should submit successfully"
);
// mine it without a sortition
btc_regtest_controller.build_next_block(1);
// these should all succeed across the epoch 2.1 boundary
for i in 0..3 {
let tip_info = get_chain_info(&conf);
// this block is the epoch transition?
let (chainstate, _) = StacksChainState::open(
false,
conf.burnchain.chain_id,
&conf.get_chainstate_path_str(),
None,
)
.unwrap();
let res = StacksChainState::block_crosses_epoch_boundary(
&chainstate.db(),
&tip_info.stacks_tip_consensus_hash,
&tip_info.stacks_tip,
)
.unwrap();
debug!(
"Epoch transition at {} ({}/{}) height {}: {}",
&StacksBlockHeader::make_index_block_hash(
&tip_info.stacks_tip_consensus_hash,
&tip_info.stacks_tip
),
&tip_info.stacks_tip_consensus_hash,
&tip_info.stacks_tip,
tip_info.burn_block_height,
res
);
if tip_info.burn_block_height >= epoch_2_1 {
if tip_info.burn_block_height == epoch_2_1 {
assert!(res);
}
// pox-2 should be initialized now
let _ = get_contract_src(
&http_origin,
StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(),
"pox-2".to_string(),
true,
)
.unwrap();
} else {
assert!(!res);
// pox-2 should NOT be initialized
let e = get_contract_src(
&http_origin,
StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(),
"pox-2".to_string(),
true,
)
.unwrap_err();
eprintln!("No pox-2: {}", &e);
}
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);
}
let tip_info = get_chain_info(&conf);
assert_eq!(tip_info.burn_block_height, epoch_2_1 + 1);
// stx-transfer did not go through -- it fell in a block before 2.1
assert_eq!(get_balance(&http_origin, &spender_addr), 100_300);
assert_eq!(get_balance(&http_origin, &recipient_addr), 0);
assert_eq!(get_balance(&http_origin, &spender_2_addr), 100_300);
let account = get_account(&http_origin, &miner_account);
assert_eq!(account.nonce, 8);
eprintln!("Begin Stacks 2.1");
// let's query the spender's account:
assert_eq!(get_balance(&http_origin, &spender_addr), 100300);
// okay, let's send a pre-stx op.
let pre_stx_op = PreStxOp {
output: spender_stx_addr.clone(),
// to be filled in
txid: Txid([0u8; 32]),
vtxindex: 0,
block_height: 0,
burn_header_hash: BurnchainHeaderHash([0u8; 32]),
};
let mut miner_signer = Keychain::default(conf.node.seed.clone()).generate_op_signer();
assert!(
btc_regtest_controller.submit_operation(
BlockstackOperationType::PreStx(pre_stx_op),
&mut miner_signer,
1
),
"Pre-stx operation should submit successfully"
);
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);
// let's fire off our transfer op.
let recipient_sk = StacksPrivateKey::new();
let recipient_addr = to_addr(&recipient_sk);
let transfer_stx_op = TransferStxOp {
sender: spender_stx_addr.clone(),
recipient: recipient_addr.clone(),
transfered_ustx: 100_000,
memo: vec![],
// to be filled in
txid: Txid([0u8; 32]),
vtxindex: 0,
block_height: 0,
burn_header_hash: BurnchainHeaderHash([0u8; 32]),
};
let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false);
assert!(
btc_regtest_controller.submit_operation(
BlockstackOperationType::TransferStx(transfer_stx_op),
&mut spender_signer,
1
),
"Transfer operation should submit successfully"
);
// build a couple bitcoin blocks without a stacks block to mine it, up to the edge of the
// window
for _i in 0..BURNCHAIN_TX_SEARCH_WINDOW {
btc_regtest_controller.build_next_block(1);
}
// this block should process the transfer, even though it was mined in a sortition-less block
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);
assert_eq!(get_balance(&http_origin, &spender_addr), 300);
assert_eq!(get_balance(&http_origin, &recipient_addr), 100_000);
assert_eq!(get_balance(&http_origin, &spender_2_addr), 100_300);
// now let's do a pre-stx-op and a transfer op in the same burnchain block...
// NOTE: bitcoind really doesn't want to return the utxo from the first op for some reason,
// so we have to get a little creative...
// okay, let's send a pre-stx op.
let pre_stx_op = PreStxOp {
output: spender_2_stx_addr.clone(),
// to be filled in
txid: Txid([0u8; 32]),
vtxindex: 0,
block_height: 0,
burn_header_hash: BurnchainHeaderHash([0u8; 32]),
};
let mut miner_signer = Keychain::default(conf.node.seed.clone()).generate_op_signer();
let pre_stx_tx = btc_regtest_controller
.submit_manual(
BlockstackOperationType::PreStx(pre_stx_op),
&mut miner_signer,
None,
)
.expect("Pre-stx operation should submit successfully");
let transfer_stx_utxo = UTXO {
txid: pre_stx_tx.txid(),
vout: 1,
script_pub_key: pre_stx_tx.output[1].script_pubkey.clone(),
amount: pre_stx_tx.output[1].value,
confirmations: 0,
};
// let's fire off our transfer op.
let transfer_stx_op = TransferStxOp {
sender: spender_2_stx_addr.clone(),
recipient: recipient_addr.clone(),
transfered_ustx: 100_000,
memo: vec![],
// to be filled in
txid: Txid([0u8; 32]),
vtxindex: 0,
block_height: 0,
burn_header_hash: BurnchainHeaderHash([0u8; 32]),
};
let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false);
btc_regtest_controller
.submit_manual(
BlockstackOperationType::TransferStx(transfer_stx_op),
&mut spender_signer,
Some(transfer_stx_utxo),
)
.expect("Transfer operation should submit successfully");
// build a couple bitcoin blocks without a stacks block to mine it, up to the edge of the
// window
for _i in 0..BURNCHAIN_TX_SEARCH_WINDOW {
btc_regtest_controller.build_next_block(1);
}
// should process the transfer
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);
assert_eq!(get_balance(&http_origin, &spender_addr), 300);
assert_eq!(get_balance(&http_origin, &recipient_addr), 200_000);
assert_eq!(get_balance(&http_origin, &spender_2_addr), 300);
// let's fire off another transfer op that will fall outside the window
let pre_stx_op = PreStxOp {
output: spender_2_stx_addr.clone(),
// to be filled in
txid: Txid([0u8; 32]),
vtxindex: 0,
block_height: 0,
burn_header_hash: BurnchainHeaderHash([0u8; 32]),
};
let mut miner_signer = Keychain::default(conf.node.seed.clone()).generate_op_signer();
let pre_stx_tx = btc_regtest_controller
.submit_manual(
BlockstackOperationType::PreStx(pre_stx_op),
&mut miner_signer,
None,
)
.expect("Pre-stx operation should submit successfully");
let transfer_stx_utxo = UTXO {
txid: pre_stx_tx.txid(),
vout: 1,
script_pub_key: pre_stx_tx.output[1].script_pubkey.clone(),
amount: pre_stx_tx.output[1].value,
confirmations: 0,
};
let transfer_stx_op = TransferStxOp {
sender: spender_stx_addr.clone(),
recipient: recipient_addr.clone(),
transfered_ustx: 123,
memo: vec![],
// to be filled in
txid: Txid([0u8; 32]),
vtxindex: 0,
block_height: 0,
burn_header_hash: BurnchainHeaderHash([0u8; 32]),
};
let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false);
btc_regtest_controller
.submit_manual(
BlockstackOperationType::TransferStx(transfer_stx_op),
&mut spender_signer,
Some(transfer_stx_utxo),
)
.expect("Transfer operation should submit successfully");
// build a couple bitcoin blocks without a stacks block to mine it, up to the edge of the
// window and then past it
for _i in 0..(BURNCHAIN_TX_SEARCH_WINDOW + 1) {
btc_regtest_controller.build_next_block(1);
}
// should NOT process the transfer
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);
assert_eq!(get_balance(&http_origin, &spender_addr), 300);
assert_eq!(get_balance(&http_origin, &recipient_addr), 200_000);
assert_eq!(get_balance(&http_origin, &spender_2_addr), 300);
test_observer::clear();
channel.stop_chains_coordinator();
}

View File

@@ -727,7 +727,7 @@ fn most_recent_utxo_integration_test() {
channel.stop_chains_coordinator();
}
fn get_balance<F: std::fmt::Display>(http_origin: &str, account: &F) -> u128 {
pub fn get_balance<F: std::fmt::Display>(http_origin: &str, account: &F) -> u128 {
get_account(http_origin, account).balance
}