mirror of
https://github.com/alexgo-io/stacks-puppet-node.git
synced 2026-01-12 22:43:42 +08:00
Merge stacks/test/replay-block into jbencin/test/replay-block
This commit is contained in:
@@ -251,6 +251,8 @@ fn inner_get_loglevel() -> slog::Level {
|
||||
|| env::var("BLOCKSTACK_DEBUG") == Ok("1".into())
|
||||
{
|
||||
slog::Level::Debug
|
||||
} else if env::var("STACKS_LOG_CRITONLY") == Ok("1".into()) {
|
||||
slog::Level::Critical
|
||||
} else {
|
||||
slog::Level::Info
|
||||
}
|
||||
|
||||
@@ -5266,7 +5266,7 @@ impl StacksChainState {
|
||||
/// necessary so that the Headers database and Clarity database's
|
||||
/// transactions can commit very close to one another, after the
|
||||
/// event observer has emitted.
|
||||
fn append_block<'a>(
|
||||
pub fn append_block<'a>(
|
||||
chainstate_tx: &mut ChainstateTx,
|
||||
clarity_instance: &'a mut ClarityInstance,
|
||||
burn_dbconn: &mut SortitionHandleTx,
|
||||
@@ -5283,6 +5283,7 @@ impl StacksChainState {
|
||||
burnchain_sortition_burn: u64,
|
||||
user_burns: &[StagingUserBurnSupport],
|
||||
affirmation_weight: u64,
|
||||
do_not_advance: bool,
|
||||
) -> Result<(StacksEpochReceipt, PreCommitClarityBlock<'a>), Error> {
|
||||
debug!(
|
||||
"Process block {:?} with {} transactions",
|
||||
@@ -5654,10 +5655,30 @@ impl StacksChainState {
|
||||
.as_ref()
|
||||
.map(|(_, _, _, info)| info.clone());
|
||||
|
||||
if do_not_advance {
|
||||
let epoch_receipt = StacksEpochReceipt {
|
||||
header: StacksHeaderInfo::regtest_genesis(),
|
||||
tx_receipts,
|
||||
matured_rewards,
|
||||
matured_rewards_info,
|
||||
parent_microblocks_cost: microblock_execution_cost,
|
||||
anchored_block_cost: block_execution_cost,
|
||||
parent_burn_block_hash,
|
||||
parent_burn_block_height,
|
||||
parent_burn_block_timestamp,
|
||||
evaluated_epoch,
|
||||
epoch_transition: applied_epoch_transition,
|
||||
signers_updated: false,
|
||||
};
|
||||
|
||||
return Ok((epoch_receipt, clarity_commit));
|
||||
}
|
||||
|
||||
let parent_block_header = parent_chain_tip
|
||||
.anchored_header
|
||||
.as_stacks_epoch2()
|
||||
.ok_or_else(|| Error::InvalidChildOfNakomotoBlock)?;
|
||||
|
||||
let new_tip = StacksChainState::advance_tip(
|
||||
&mut chainstate_tx.tx,
|
||||
parent_block_header,
|
||||
@@ -5722,7 +5743,7 @@ impl StacksChainState {
|
||||
/// Verify that a Stacks anchored block attaches to its parent anchored block.
|
||||
/// * checks .header.total_work.work
|
||||
/// * checks .header.parent_block
|
||||
fn check_block_attachment(
|
||||
pub fn check_block_attachment(
|
||||
parent_block_header: &StacksBlockHeader,
|
||||
block_header: &StacksBlockHeader,
|
||||
) -> bool {
|
||||
@@ -5749,7 +5770,7 @@ impl StacksChainState {
|
||||
/// The header info will be pulled from the headers DB, so this method only succeeds if the
|
||||
/// parent block has been processed.
|
||||
/// If it's not known, return None.
|
||||
fn get_parent_header_info(
|
||||
pub fn get_parent_header_info(
|
||||
chainstate_tx: &mut ChainstateTx,
|
||||
next_staging_block: &StagingBlock,
|
||||
) -> Result<Option<StacksHeaderInfo>, Error> {
|
||||
@@ -5791,7 +5812,7 @@ impl StacksChainState {
|
||||
}
|
||||
|
||||
/// Extract and parse the block from a loaded staging block, and verify its integrity.
|
||||
fn extract_stacks_block(next_staging_block: &StagingBlock) -> Result<StacksBlock, Error> {
|
||||
pub fn extract_stacks_block(next_staging_block: &StagingBlock) -> Result<StacksBlock, Error> {
|
||||
let block = {
|
||||
StacksBlock::consensus_deserialize(&mut &next_staging_block.block_data[..])
|
||||
.map_err(Error::CodecError)?
|
||||
@@ -5813,7 +5834,7 @@ impl StacksChainState {
|
||||
/// header info), determine which branch connects to the given block. If there are multiple
|
||||
/// branches, punish the parent. Return the portion of the branch that actually connects to
|
||||
/// the given block.
|
||||
fn extract_connecting_microblocks(
|
||||
pub fn extract_connecting_microblocks(
|
||||
parent_block_header_info: &StacksHeaderInfo,
|
||||
next_staging_block: &StagingBlock,
|
||||
block: &StacksBlock,
|
||||
@@ -6065,6 +6086,7 @@ impl StacksChainState {
|
||||
next_staging_block.sortition_burn,
|
||||
&user_supports,
|
||||
block_am.weight(),
|
||||
false,
|
||||
) {
|
||||
Ok(next_chain_tip_info) => next_chain_tip_info,
|
||||
Err(e) => {
|
||||
|
||||
@@ -860,6 +860,47 @@ simulating a miner.
|
||||
return;
|
||||
}
|
||||
|
||||
if argv[1] == "replay-block" {
|
||||
if argv.len() < 3 {
|
||||
eprintln!(
|
||||
"Usage: {} chainstate_path index-block-hash-prefix",
|
||||
&argv[0]
|
||||
);
|
||||
process::exit(1);
|
||||
}
|
||||
let stacks_path = &argv[2];
|
||||
let index_block_hash_prefix = &argv[3];
|
||||
let staging_blocks_db_path = format!("{}/mainnet/chainstate/vm/index.sqlite", stacks_path);
|
||||
let conn =
|
||||
Connection::open_with_flags(&staging_blocks_db_path, OpenFlags::SQLITE_OPEN_READ_ONLY)
|
||||
.unwrap();
|
||||
let mut stmt = conn
|
||||
.prepare(&format!(
|
||||
"SELECT index_block_hash FROM staging_blocks WHERE index_block_hash LIKE \"{}%\"",
|
||||
index_block_hash_prefix
|
||||
))
|
||||
.unwrap();
|
||||
let mut hashes_set = stmt.query(rusqlite::NO_PARAMS).unwrap();
|
||||
|
||||
let mut index_block_hashes: Vec<String> = vec![];
|
||||
while let Ok(Some(row)) = hashes_set.next() {
|
||||
index_block_hashes.push(row.get(0).unwrap());
|
||||
}
|
||||
|
||||
let total = index_block_hashes.len();
|
||||
let mut i = 1;
|
||||
println!("Will check {} blocks.", total);
|
||||
for index_block_hash in index_block_hashes.iter() {
|
||||
if i % 100 == 0 {
|
||||
println!("Checked {}...", i);
|
||||
}
|
||||
i += 1;
|
||||
replay_block(stacks_path, index_block_hash);
|
||||
}
|
||||
println!("Finished!");
|
||||
process::exit(0);
|
||||
}
|
||||
|
||||
if argv[1] == "deserialize-db" {
|
||||
if argv.len() < 4 {
|
||||
eprintln!("Usage: {} clarity_sqlite_db [byte-prefix]", &argv[0]);
|
||||
@@ -1482,3 +1523,188 @@ simulating a miner.
|
||||
|
||||
process::exit(0);
|
||||
}
|
||||
|
||||
fn replay_block(stacks_path: &str, index_block_hash_hex: &str) {
|
||||
let index_block_hash = StacksBlockId::from_hex(index_block_hash_hex).unwrap();
|
||||
let chain_state_path = format!("{}/mainnet/chainstate/", stacks_path);
|
||||
let sort_db_path = format!("{}/mainnet/burnchain/sortition", stacks_path);
|
||||
let burn_db_path = format!("{}/mainnet/burnchain/burnchain.sqlite", stacks_path);
|
||||
let burnchain_blocks_db = BurnchainDB::open(&burn_db_path, false).unwrap();
|
||||
|
||||
let (mut chainstate, _) =
|
||||
StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap();
|
||||
|
||||
let mut sortdb = SortitionDB::connect(
|
||||
&sort_db_path,
|
||||
BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT,
|
||||
&BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(),
|
||||
BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(),
|
||||
STACKS_EPOCHS_MAINNET.as_ref(),
|
||||
PoxConstants::mainnet_default(),
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
let mut sort_tx = sortdb.tx_begin_at_tip();
|
||||
|
||||
let blocks_path = chainstate.blocks_path.clone();
|
||||
let (mut chainstate_tx, clarity_instance) = chainstate
|
||||
.chainstate_tx_begin()
|
||||
.expect("Failed to start chainstate tx");
|
||||
let mut next_staging_block =
|
||||
StacksChainState::load_staging_block_info(&chainstate_tx.tx, &index_block_hash)
|
||||
.expect("Failed to load staging block data")
|
||||
.expect("No such index block hash in block database");
|
||||
|
||||
next_staging_block.block_data = StacksChainState::load_block_bytes(
|
||||
&blocks_path,
|
||||
&next_staging_block.consensus_hash,
|
||||
&next_staging_block.anchored_block_hash,
|
||||
)
|
||||
.unwrap()
|
||||
.unwrap_or(vec![]);
|
||||
|
||||
let next_microblocks = match StacksChainState::find_parent_microblock_stream(
|
||||
&chainstate_tx.tx,
|
||||
&next_staging_block,
|
||||
)
|
||||
.unwrap()
|
||||
{
|
||||
Some(x) => x,
|
||||
None => {
|
||||
println!("No microblock stream found for {}", index_block_hash_hex);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let (burn_header_hash, burn_header_height, burn_header_timestamp, _winning_block_txid) =
|
||||
match SortitionDB::get_block_snapshot_consensus(
|
||||
&sort_tx,
|
||||
&next_staging_block.consensus_hash,
|
||||
)
|
||||
.unwrap()
|
||||
{
|
||||
Some(sn) => (
|
||||
sn.burn_header_hash,
|
||||
sn.block_height as u32,
|
||||
sn.burn_header_timestamp,
|
||||
sn.winning_block_txid,
|
||||
),
|
||||
None => {
|
||||
// shouldn't happen
|
||||
panic!(
|
||||
"CORRUPTION: staging block {}/{} does not correspond to a burn block",
|
||||
&next_staging_block.consensus_hash, &next_staging_block.anchored_block_hash
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
info!(
|
||||
"Process block {}/{} = {} in burn block {}, parent microblock {}",
|
||||
next_staging_block.consensus_hash,
|
||||
next_staging_block.anchored_block_hash,
|
||||
&index_block_hash,
|
||||
&burn_header_hash,
|
||||
&next_staging_block.parent_microblock_hash,
|
||||
);
|
||||
|
||||
let parent_header_info =
|
||||
match StacksChainState::get_parent_header_info(&mut chainstate_tx, &next_staging_block)
|
||||
.unwrap()
|
||||
{
|
||||
Some(hinfo) => hinfo,
|
||||
None => {
|
||||
println!(
|
||||
"Failed to load parent head info for block: {}",
|
||||
index_block_hash_hex
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let block = StacksChainState::extract_stacks_block(&next_staging_block).unwrap();
|
||||
let block_size = next_staging_block.block_data.len() as u64;
|
||||
|
||||
if !StacksChainState::check_block_attachment(&parent_header_info.anchored_header, &block.header)
|
||||
{
|
||||
let msg = format!(
|
||||
"Invalid stacks block {}/{} -- does not attach to parent {}/{}",
|
||||
&next_staging_block.consensus_hash,
|
||||
block.block_hash(),
|
||||
parent_header_info.anchored_header.block_hash(),
|
||||
&parent_header_info.consensus_hash
|
||||
);
|
||||
println!("{}", &msg);
|
||||
return;
|
||||
}
|
||||
|
||||
// validation check -- validate parent microblocks and find the ones that connect the
|
||||
// block's parent to this block.
|
||||
let next_microblocks = StacksChainState::extract_connecting_microblocks(
|
||||
&parent_header_info,
|
||||
&next_staging_block,
|
||||
&block,
|
||||
next_microblocks,
|
||||
)
|
||||
.unwrap();
|
||||
let (last_microblock_hash, last_microblock_seq) = match next_microblocks.len() {
|
||||
0 => (EMPTY_MICROBLOCK_PARENT_HASH.clone(), 0),
|
||||
_ => {
|
||||
let l = next_microblocks.len();
|
||||
(
|
||||
next_microblocks[l - 1].block_hash(),
|
||||
next_microblocks[l - 1].header.sequence,
|
||||
)
|
||||
}
|
||||
};
|
||||
assert_eq!(
|
||||
next_staging_block.parent_microblock_hash,
|
||||
last_microblock_hash
|
||||
);
|
||||
assert_eq!(
|
||||
next_staging_block.parent_microblock_seq,
|
||||
last_microblock_seq
|
||||
);
|
||||
|
||||
// user supports were never activated
|
||||
let user_supports = vec![];
|
||||
|
||||
let block_am = StacksChainState::find_stacks_tip_affirmation_map(
|
||||
&burnchain_blocks_db,
|
||||
sort_tx.tx(),
|
||||
&next_staging_block.consensus_hash,
|
||||
&next_staging_block.anchored_block_hash,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let pox_constants = sort_tx.context.pox_constants.clone();
|
||||
|
||||
let epoch_receipt = match StacksChainState::append_block(
|
||||
&mut chainstate_tx,
|
||||
clarity_instance,
|
||||
&mut sort_tx,
|
||||
&pox_constants,
|
||||
&parent_header_info,
|
||||
&next_staging_block.consensus_hash,
|
||||
&burn_header_hash,
|
||||
burn_header_height,
|
||||
burn_header_timestamp,
|
||||
&block,
|
||||
block_size,
|
||||
&next_microblocks,
|
||||
next_staging_block.commit_burn,
|
||||
next_staging_block.sortition_burn,
|
||||
&user_supports,
|
||||
block_am.weight(),
|
||||
true,
|
||||
) {
|
||||
Ok((_receipt, _)) => {
|
||||
info!("Block processed successfully! block = {}", index_block_hash);
|
||||
}
|
||||
Err(e) => {
|
||||
println!(
|
||||
"Failed processing block! block = {}, error = {:?}",
|
||||
index_block_hash, e
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user