mirror of
https://github.com/alexgo-io/stacks-subnets.git
synced 2026-01-12 08:34:47 +08:00
Merge pull request #1249 from blockstack/feature/burn-block-time
Feature/burn block time
This commit is contained in:
@@ -402,7 +402,8 @@ impl BitcoinBlockParser {
|
||||
block_height: block_height,
|
||||
block_hash: BurnchainHeaderHash::from_bitcoin_hash(&block.bitcoin_hash()),
|
||||
parent_block_hash: BurnchainHeaderHash::from_bitcoin_hash(&block.header.prev_blockhash),
|
||||
txs: accepted_txs
|
||||
txs: accepted_txs,
|
||||
timestamp: block.header.time as u64
|
||||
}
|
||||
}
|
||||
|
||||
@@ -783,7 +784,8 @@ mod tests {
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
],
|
||||
timestamp: 1543267060,
|
||||
})
|
||||
},
|
||||
BlockFixture {
|
||||
@@ -795,6 +797,7 @@ mod tests {
|
||||
block_height: 32,
|
||||
block_hash: to_block_hash(&hex_bytes("4f3757bc236e58b87d6208aa795115002b739bf39268cf69640f0b092e8cdafe").unwrap()),
|
||||
parent_block_hash: to_block_hash(&hex_bytes("25af4b7151b77f6f8235bda83a8062fba621591beef57e18f4697c8b88a298ad").unwrap()),
|
||||
timestamp: 1543272755,
|
||||
txs: vec![
|
||||
BitcoinTransaction {
|
||||
// TOKEN_TRANSFER
|
||||
|
||||
@@ -630,6 +630,16 @@ impl BurnchainIndexer for BitcoinIndexer {
|
||||
Ok(first_block_header_hash)
|
||||
}
|
||||
|
||||
/// Get the first block header timestamp
|
||||
fn get_first_block_header_timestamp(&self, headers_path: &String) -> Result<u64, burnchain_error> {
|
||||
let first_block_height = self.get_first_block_height();
|
||||
let first_headers = self.read_spv_headers(headers_path, first_block_height, first_block_height+1)
|
||||
.map_err(burnchain_error::Bitcoin)?;
|
||||
|
||||
let first_block_header_timestamp = first_headers[0].header.time as u64;
|
||||
Ok(first_block_header_timestamp)
|
||||
}
|
||||
|
||||
/// Read downloaded headers within a range
|
||||
fn read_headers(&self, headers_path: &String, start_block: u64, end_block: u64) -> Result<Vec<BitcoinHeaderIPC>, burnchain_error> {
|
||||
let headers = self.read_spv_headers(headers_path, start_block, end_block)
|
||||
|
||||
@@ -185,15 +185,17 @@ pub struct BitcoinBlock {
|
||||
pub block_hash: BurnchainHeaderHash,
|
||||
pub parent_block_hash: BurnchainHeaderHash,
|
||||
pub txs: Vec<BitcoinTransaction>,
|
||||
pub timestamp: u64
|
||||
}
|
||||
|
||||
impl BitcoinBlock {
|
||||
pub fn new(height: u64, hash: &BurnchainHeaderHash, parent: &BurnchainHeaderHash, txs: &Vec<BitcoinTransaction>) -> BitcoinBlock {
|
||||
pub fn new(height: u64, hash: &BurnchainHeaderHash, parent: &BurnchainHeaderHash, txs: &Vec<BitcoinTransaction>, timestamp: u64) -> BitcoinBlock {
|
||||
BitcoinBlock {
|
||||
block_height: height,
|
||||
block_hash: hash.clone(),
|
||||
parent_block_hash: parent.clone(),
|
||||
txs: txs.clone(),
|
||||
timestamp: timestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,6 +79,7 @@ use util::get_epoch_time_ms;
|
||||
use util::db::DBConn;
|
||||
use util::db::DBTx;
|
||||
use util::vrf::VRFPublicKey;
|
||||
use util::get_epoch_time_secs;
|
||||
|
||||
use core::PEER_VERSION;
|
||||
use core::NETWORK_ID_MAINNET;
|
||||
@@ -255,6 +256,12 @@ impl BurnchainBlock {
|
||||
BurnchainBlock::Bitcoin(ref data) => data.txs.iter().map(|ref tx| BurnchainTransaction::Bitcoin((*tx).clone())).collect()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn timestamp(&self) -> u64 {
|
||||
match *self {
|
||||
BurnchainBlock::Bitcoin(ref data) => data.timestamp
|
||||
}
|
||||
}
|
||||
|
||||
pub fn header(&self, parent_snapshot: &BlockSnapshot) -> BurnchainBlockHeader {
|
||||
match *self {
|
||||
@@ -264,7 +271,8 @@ impl BurnchainBlock {
|
||||
block_hash: data.block_hash.clone(),
|
||||
parent_block_hash: data.parent_block_hash.clone(),
|
||||
num_txs: data.txs.len() as u64,
|
||||
parent_index_root: parent_snapshot.index_root.clone()
|
||||
parent_index_root: parent_snapshot.index_root.clone(),
|
||||
timestamp: data.timestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -386,9 +394,10 @@ impl Burnchain {
|
||||
|
||||
let first_block_height = indexer.get_first_block_height();
|
||||
let first_block_header_hash = indexer.get_first_block_header_hash(&indexer.get_headers_path())?;
|
||||
let first_block_header_timestamp = indexer.get_first_block_header_timestamp(&indexer.get_headers_path())?;
|
||||
|
||||
let db_path = self.get_db_path();
|
||||
BurnDB::connect(&db_path, first_block_height, &first_block_header_hash, readwrite)
|
||||
BurnDB::connect(&db_path, first_block_height, &first_block_header_hash, first_block_header_timestamp, readwrite)
|
||||
.map_err(burnchain_error::DBError)
|
||||
}
|
||||
|
||||
@@ -861,6 +870,7 @@ pub mod tests {
|
||||
|
||||
use util::hash::hex_bytes;
|
||||
use util::log;
|
||||
use util::get_epoch_time_secs;
|
||||
|
||||
use chainstate::burn::operations::{
|
||||
LeaderBlockCommitOp,
|
||||
@@ -1130,6 +1140,7 @@ pub mod tests {
|
||||
let mut block_121_snapshot = BlockSnapshot {
|
||||
block_height: 121,
|
||||
burn_header_hash: block_121_hash.clone(),
|
||||
burn_header_timestamp: 121,
|
||||
parent_burn_header_hash: first_burn_hash.clone(),
|
||||
ops_hash: block_opshash_121.clone(),
|
||||
consensus_hash: ConsensusHash::from_ops(&block_opshash_121, 0, &block_prev_chs_121),
|
||||
@@ -1154,6 +1165,7 @@ pub mod tests {
|
||||
let mut block_122_snapshot = BlockSnapshot {
|
||||
block_height: 122,
|
||||
burn_header_hash: block_122_hash.clone(),
|
||||
burn_header_timestamp: 122,
|
||||
parent_burn_header_hash: block_121_hash.clone(),
|
||||
ops_hash: block_opshash_122.clone(),
|
||||
consensus_hash: ConsensusHash::from_ops(&block_opshash_122, 0, &block_prev_chs_122),
|
||||
@@ -1184,6 +1196,7 @@ pub mod tests {
|
||||
let mut block_123_snapshot = BlockSnapshot {
|
||||
block_height: 123,
|
||||
burn_header_hash: block_123_hash.clone(),
|
||||
burn_header_timestamp: 123,
|
||||
parent_burn_header_hash: block_122_hash.clone(),
|
||||
ops_hash: block_opshash_123.clone(),
|
||||
consensus_hash: ConsensusHash::from_ops(&block_opshash_123, 0, &block_prev_chs_123), // user burns not included, so zero burns this block
|
||||
@@ -1230,11 +1243,11 @@ pub mod tests {
|
||||
let mut db = BurnDB::connect_memory(first_block_height, &first_burn_hash).unwrap();
|
||||
|
||||
// NOTE: the .txs() method will NOT be called, so we can pass an empty vec![] here
|
||||
let block121 = BurnchainBlock::Bitcoin(BitcoinBlock::new(121, &block_121_hash, &first_burn_hash, &vec![]));
|
||||
let block122 = BurnchainBlock::Bitcoin(BitcoinBlock::new(122, &block_122_hash, &block_121_hash, &vec![]));
|
||||
let block123 = BurnchainBlock::Bitcoin(BitcoinBlock::new(123, &block_123_hash, &block_122_hash, &vec![]));
|
||||
let block121 = BurnchainBlock::Bitcoin(BitcoinBlock::new(121, &block_121_hash, &first_burn_hash, &vec![], 121));
|
||||
let block122 = BurnchainBlock::Bitcoin(BitcoinBlock::new(122, &block_122_hash, &block_121_hash, &vec![], 122));
|
||||
let block123 = BurnchainBlock::Bitcoin(BitcoinBlock::new(123, &block_123_hash, &block_122_hash, &vec![], 123));
|
||||
|
||||
let initial_snapshot = BlockSnapshot::initial(first_block_height, &first_burn_hash);
|
||||
let initial_snapshot = BlockSnapshot::initial(first_block_height, &first_burn_hash, first_block_height as u64);
|
||||
|
||||
// process up to 124
|
||||
{
|
||||
@@ -1321,6 +1334,7 @@ pub mod tests {
|
||||
let mut block_124_snapshot = BlockSnapshot {
|
||||
block_height: 124,
|
||||
burn_header_hash: block_124_hash.clone(),
|
||||
burn_header_timestamp: 124,
|
||||
parent_burn_header_hash: block_123_snapshot.burn_header_hash.clone(),
|
||||
ops_hash: block_opshash_124.clone(),
|
||||
consensus_hash: ConsensusHash::from_ops(&block_opshash_124, burn_total, &block_prev_chs_124),
|
||||
@@ -1341,7 +1355,7 @@ pub mod tests {
|
||||
block_124_snapshot.sortition_hash = block_124_snapshot.sortition_hash.mix_VRF_seed(&block_124_winners[scenario_idx].new_seed);
|
||||
}
|
||||
|
||||
let block124 = BurnchainBlock::Bitcoin(BitcoinBlock::new(124, &block_124_hash, &block_123_hash, &vec![]));
|
||||
let block124 = BurnchainBlock::Bitcoin(BitcoinBlock::new(124, &block_124_hash, &block_123_hash, &vec![], 124));
|
||||
|
||||
// process this scenario
|
||||
let sn124 = {
|
||||
@@ -1431,7 +1445,7 @@ pub mod tests {
|
||||
|
||||
// insert all operations
|
||||
let mut db = BurnDB::connect_memory(first_block_height, &first_burn_hash).unwrap();
|
||||
let mut prev_snapshot = BlockSnapshot::initial(first_block_height, &first_burn_hash);
|
||||
let mut prev_snapshot = BlockSnapshot::initial(first_block_height, &first_burn_hash, first_block_height as u64);
|
||||
let mut all_stacks_block_hashes = vec![];
|
||||
|
||||
for i in 0..32 {
|
||||
@@ -1490,7 +1504,7 @@ pub mod tests {
|
||||
|
||||
block_ops.push(BlockstackOperationType::LeaderKeyRegister(next_leader_key));
|
||||
|
||||
let block = BurnchainBlock::Bitcoin(BitcoinBlock::new(first_block_height + (i + 1) as u64, &burn_block_hash, &parent_burn_block_hash, &vec![]));
|
||||
let block = BurnchainBlock::Bitcoin(BitcoinBlock::new(first_block_height + (i + 1) as u64, &burn_block_hash, &parent_burn_block_hash, &vec![], get_epoch_time_secs()));
|
||||
|
||||
// process this block
|
||||
let snapshot = {
|
||||
|
||||
@@ -61,6 +61,7 @@ pub trait BurnchainIndexer {
|
||||
|
||||
fn get_first_block_height(&self) -> u64;
|
||||
fn get_first_block_header_hash(&self, headers_path: &String) -> Result<BurnchainHeaderHash, burnchain_error>;
|
||||
fn get_first_block_header_timestamp(&self, headers_path: &String) -> Result<u64, burnchain_error>;
|
||||
|
||||
fn get_headers_path(&self) -> String;
|
||||
fn get_headers_height(&self, headers_path: &String) -> Result<u64, burnchain_error>;
|
||||
|
||||
@@ -244,6 +244,7 @@ pub struct BurnchainBlockHeader {
|
||||
pub parent_block_hash: BurnchainHeaderHash,
|
||||
pub parent_index_root: TrieHash,
|
||||
pub num_txs: u64,
|
||||
pub timestamp: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
@@ -373,6 +374,7 @@ pub mod test {
|
||||
use util::vrf::*;
|
||||
use util::secp256k1::*;
|
||||
use util::db::*;
|
||||
use util::get_epoch_time_secs;
|
||||
|
||||
use burnchains::Burnchain;
|
||||
use chainstate::burn::operations::BlockstackOperationType;
|
||||
@@ -420,7 +422,8 @@ pub mod test {
|
||||
pub block_height: u64,
|
||||
pub parent_snapshot: BlockSnapshot,
|
||||
pub txs: Vec<BlockstackOperationType>,
|
||||
pub fork_id: u64
|
||||
pub fork_id: u64,
|
||||
pub timestamp: u64
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -648,7 +651,8 @@ pub mod test {
|
||||
parent_snapshot: parent_snapshot.clone(),
|
||||
block_height: parent_snapshot.block_height + 1,
|
||||
txs: vec![],
|
||||
fork_id: fork_id
|
||||
fork_id: fork_id,
|
||||
timestamp: get_epoch_time_secs()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -747,7 +751,7 @@ pub mod test {
|
||||
|
||||
pub fn mine<'a>(&self, tx: &mut BurnDBTx<'a>, burnchain: &Burnchain) -> BlockSnapshot {
|
||||
let block_hash = BurnchainHeaderHash::from_test_data(self.block_height, &self.parent_snapshot.index_root, self.fork_id);
|
||||
let mock_bitcoin_block = BitcoinBlock::new(self.block_height, &block_hash, &self.parent_snapshot.burn_header_hash, &vec![]);
|
||||
let mock_bitcoin_block = BitcoinBlock::new(self.block_height, &block_hash, &self.parent_snapshot.burn_header_hash, &vec![], get_epoch_time_secs());
|
||||
let block = BurnchainBlock::Bitcoin(mock_bitcoin_block);
|
||||
|
||||
// this is basically lifted verbatum from Burnchain::process_block_ops()
|
||||
|
||||
@@ -33,6 +33,7 @@ use std::ops::DerefMut;
|
||||
|
||||
use util::db::{FromRow, FromColumn, query_rows, query_row_columns, query_count, IndexDBTx, db_mkdirs};
|
||||
use util::db::Error as db_error;
|
||||
use util::get_epoch_time_secs;
|
||||
|
||||
use chainstate::ChainstateDB;
|
||||
|
||||
@@ -106,6 +107,7 @@ impl FromRow<BlockSnapshot> for BlockSnapshot {
|
||||
fn from_row<'a>(row: &'a Row) -> Result<BlockSnapshot, db_error> {
|
||||
let block_height_i64 : i64 = row.get("block_height");
|
||||
let burn_header_hash = BurnchainHeaderHash::from_column(row, "burn_header_hash")?;
|
||||
let burn_header_timestamp_i64 : i64 = row.get("burn_header_timestamp");
|
||||
let parent_burn_header_hash = BurnchainHeaderHash::from_column(row, "parent_burn_header_hash")?;
|
||||
let consensus_hash = ConsensusHash::from_column(row, "consensus_hash")?;
|
||||
let ops_hash = OpsHash::from_column(row, "ops_hash")?;
|
||||
@@ -125,11 +127,16 @@ impl FromRow<BlockSnapshot> for BlockSnapshot {
|
||||
return Err(db_error::ParseError);
|
||||
}
|
||||
|
||||
if burn_header_timestamp_i64 < 0 {
|
||||
return Err(db_error::ParseError);
|
||||
}
|
||||
|
||||
let total_burn = total_burn_str.parse::<u64>()
|
||||
.map_err(|_e| db_error::ParseError)?;
|
||||
|
||||
let snapshot = BlockSnapshot {
|
||||
block_height: block_height_i64 as u64,
|
||||
burn_header_timestamp: burn_header_timestamp_i64 as u64,
|
||||
burn_header_hash: burn_header_hash,
|
||||
parent_burn_header_hash: parent_burn_header_hash,
|
||||
consensus_hash: consensus_hash,
|
||||
@@ -285,6 +292,7 @@ const BURNDB_SETUP : &'static [&'static str]= &[
|
||||
CREATE TABLE snapshots(
|
||||
block_height INTEGER NOT NULL,
|
||||
burn_header_hash TEXT UNIQUE NOT NULL,
|
||||
burn_header_timestamp INT NOT NULL,
|
||||
parent_burn_header_hash TEXT NOT NULL,
|
||||
consensus_hash TEXT NOT NULL,
|
||||
ops_hash TEXT NOT NULL,
|
||||
@@ -399,11 +407,11 @@ fn burndb_get_ancestor_block_hash<'a>(tx: &mut BurnDBTx<'a>, block_height: u64,
|
||||
}
|
||||
|
||||
impl BurnDB {
|
||||
fn instantiate(conn: &mut Connection, index_path: &str, first_block_height: u64, first_burn_header_hash: &BurnchainHeaderHash) -> Result<(), db_error> {
|
||||
fn instantiate(conn: &mut Connection, index_path: &str, first_block_height: u64, first_burn_header_hash: &BurnchainHeaderHash, first_burn_header_timestamp: u64) -> Result<(), db_error> {
|
||||
let tx = conn.transaction().map_err(db_error::SqliteError)?;
|
||||
|
||||
// create first (sentinel) snapshot
|
||||
let mut first_snapshot = BlockSnapshot::initial(first_block_height, first_burn_header_hash);
|
||||
let mut first_snapshot = BlockSnapshot::initial(first_block_height, first_burn_header_hash, first_burn_header_timestamp);
|
||||
|
||||
assert!(first_snapshot.parent_burn_header_hash != first_snapshot.burn_header_hash);
|
||||
assert_eq!(first_snapshot.parent_burn_header_hash.as_bytes(), TrieFileStorage::block_sentinel().as_bytes());
|
||||
@@ -423,9 +431,9 @@ impl BurnDB {
|
||||
first_snapshot.index_root = index_root;
|
||||
|
||||
burndbtx.tx.execute("INSERT INTO snapshots \
|
||||
(block_height, burn_header_hash, parent_burn_header_hash, consensus_hash, ops_hash, total_burn, sortition, sortition_hash, winning_block_txid, winning_stacks_block_hash, index_root, num_sortitions) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)",
|
||||
&[&(first_snapshot.block_height as i64) as &dyn ToSql, &first_snapshot.burn_header_hash, &first_snapshot.parent_burn_header_hash, &first_snapshot.consensus_hash, &first_snapshot.ops_hash, &"0".to_string(),
|
||||
(block_height, burn_header_hash, burn_header_timestamp, parent_burn_header_hash, consensus_hash, ops_hash, total_burn, sortition, sortition_hash, winning_block_txid, winning_stacks_block_hash, index_root, num_sortitions) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)",
|
||||
&[&(first_snapshot.block_height as i64) as &dyn ToSql, &first_snapshot.burn_header_hash, &(first_snapshot.burn_header_timestamp as i64), &first_snapshot.parent_burn_header_hash, &first_snapshot.consensus_hash, &first_snapshot.ops_hash, &"0".to_string(),
|
||||
&first_snapshot.sortition as &dyn ToSql, &first_snapshot.sortition_hash, &first_snapshot.winning_block_txid, &first_snapshot.winning_stacks_block_hash, &first_snapshot.index_root,
|
||||
&(first_snapshot.num_sortitions as i64) as &dyn ToSql])
|
||||
.map_err(db_error::SqliteError)?;
|
||||
@@ -436,7 +444,7 @@ impl BurnDB {
|
||||
|
||||
/// Open the burn database at the given path. Open read-only or read/write.
|
||||
/// If opened for read/write and it doesn't exist, instantiate it.
|
||||
pub fn connect(path: &String, first_block_height: u64, first_burn_hash: &BurnchainHeaderHash, readwrite: bool) -> Result<BurnDB, db_error> {
|
||||
pub fn connect(path: &String, first_block_height: u64, first_burn_hash: &BurnchainHeaderHash, first_burn_header_timestamp: u64, readwrite: bool) -> Result<BurnDB, db_error> {
|
||||
let mut create_flag = false;
|
||||
let open_flags = match fs::metadata(path) {
|
||||
Err(e) => {
|
||||
@@ -470,7 +478,7 @@ impl BurnDB {
|
||||
|
||||
if create_flag {
|
||||
// instantiate!
|
||||
BurnDB::instantiate(&mut conn, &index_path, first_block_height, first_burn_hash)?;
|
||||
BurnDB::instantiate(&mut conn, &index_path, first_block_height, first_burn_hash, first_burn_header_timestamp)?;
|
||||
}
|
||||
else {
|
||||
// validate -- must contain the given first block and first block hash
|
||||
@@ -513,7 +521,7 @@ impl BurnDB {
|
||||
let db_path_dir = format!("/tmp/test-blockstack-burndb-{}", to_hex(&buf));
|
||||
let (db_path, index_path) = db_mkdirs(&db_path_dir)?;
|
||||
|
||||
BurnDB::instantiate(&mut conn, &index_path, first_block_height, first_burn_hash)?;
|
||||
BurnDB::instantiate(&mut conn, &index_path, first_block_height, first_burn_hash, get_epoch_time_secs())?;
|
||||
|
||||
let marf = BurnDB::open_index(&index_path)?;
|
||||
|
||||
@@ -580,9 +588,9 @@ impl BurnDB {
|
||||
let total_burn_str = format!("{}", snapshot.total_burn);
|
||||
|
||||
tx.execute("INSERT INTO snapshots \
|
||||
(block_height, burn_header_hash, parent_burn_header_hash, consensus_hash, ops_hash, total_burn, sortition, sortition_hash, winning_block_txid, winning_stacks_block_hash, index_root, num_sortitions) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)",
|
||||
&[&(snapshot.block_height as i64) as &dyn ToSql, &snapshot.burn_header_hash, &snapshot.parent_burn_header_hash, &snapshot.consensus_hash, &snapshot.ops_hash, &total_burn_str,
|
||||
(block_height, burn_header_hash, burn_header_timestamp, parent_burn_header_hash, consensus_hash, ops_hash, total_burn, sortition, sortition_hash, winning_block_txid, winning_stacks_block_hash, index_root, num_sortitions) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)",
|
||||
&[&(snapshot.block_height as i64) as &dyn ToSql, &snapshot.burn_header_hash, &(snapshot.burn_header_timestamp as i64), &snapshot.parent_burn_header_hash, &snapshot.consensus_hash, &snapshot.ops_hash, &total_burn_str,
|
||||
&snapshot.sortition as &dyn ToSql, &snapshot.sortition_hash, &snapshot.winning_block_txid, &snapshot.winning_stacks_block_hash, &snapshot.index_root,
|
||||
&(snapshot.num_sortitions as i64) as &dyn ToSql])
|
||||
.map_err(db_error::SqliteError)?;
|
||||
@@ -1477,6 +1485,7 @@ mod tests {
|
||||
use super::*;
|
||||
|
||||
use util::db::Error as db_error;
|
||||
use util::get_epoch_time_secs;
|
||||
|
||||
use chainstate::burn::operations::{
|
||||
LeaderBlockCommitOp,
|
||||
@@ -1949,6 +1958,7 @@ mod tests {
|
||||
for i in 0..255 {
|
||||
let snapshot_row = BlockSnapshot {
|
||||
block_height: i+1,
|
||||
burn_header_timestamp: get_epoch_time_secs(),
|
||||
burn_header_hash: BurnchainHeaderHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,i as u8]).unwrap(),
|
||||
parent_burn_header_hash: BurnchainHeaderHash::from_bytes(&[(if i == 0 { 0x10 } else { 0 }) as u8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,(if i == 0 { 0xff } else { i - 1 }) as u8]).unwrap(),
|
||||
consensus_hash: ConsensusHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,(i+1) as u8]).unwrap(),
|
||||
@@ -2015,6 +2025,7 @@ mod tests {
|
||||
for i in 0..256 {
|
||||
let snapshot_row = BlockSnapshot {
|
||||
block_height: i+1,
|
||||
burn_header_timestamp: get_epoch_time_secs(),
|
||||
burn_header_hash: BurnchainHeaderHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,i as u8]).unwrap(),
|
||||
parent_burn_header_hash: BurnchainHeaderHash::from_bytes(&[(if i == 0 { 0x10 } else { 0 }) as u8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,(if i == 0 { 0xff } else { i - 1 }) as u8]).unwrap(),
|
||||
consensus_hash: ConsensusHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,i as u8]).unwrap(),
|
||||
@@ -2162,6 +2173,7 @@ mod tests {
|
||||
|
||||
let mut first_snapshot = BlockSnapshot {
|
||||
block_height: block_height - 2,
|
||||
burn_header_timestamp: get_epoch_time_secs(),
|
||||
burn_header_hash: first_burn_hash.clone(),
|
||||
parent_burn_header_hash: BurnchainHeaderHash([0xff; 32]),
|
||||
consensus_hash: ConsensusHash::from_hex("0000000000000000000000000000000000000000").unwrap(),
|
||||
@@ -2177,6 +2189,7 @@ mod tests {
|
||||
|
||||
let mut snapshot_with_sortition = BlockSnapshot {
|
||||
block_height: block_height,
|
||||
burn_header_timestamp: get_epoch_time_secs(),
|
||||
burn_header_hash: BurnchainHeaderHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2]).unwrap(),
|
||||
parent_burn_header_hash: BurnchainHeaderHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]).unwrap(),
|
||||
consensus_hash: ConsensusHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]).unwrap(),
|
||||
@@ -2192,6 +2205,7 @@ mod tests {
|
||||
|
||||
let snapshot_without_sortition = BlockSnapshot {
|
||||
block_height: block_height - 1,
|
||||
burn_header_timestamp: get_epoch_time_secs(),
|
||||
burn_header_hash: BurnchainHeaderHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]).unwrap(),
|
||||
parent_burn_header_hash: BurnchainHeaderHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]).unwrap(),
|
||||
consensus_hash: ConsensusHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2]).unwrap(),
|
||||
|
||||
@@ -113,6 +113,7 @@ pub enum Opcodes {
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct BlockSnapshot {
|
||||
pub block_height: u64,
|
||||
pub burn_header_timestamp: u64,
|
||||
pub burn_header_hash: BurnchainHeaderHash,
|
||||
pub parent_burn_header_hash: BurnchainHeaderHash,
|
||||
pub consensus_hash: ConsensusHash,
|
||||
@@ -323,7 +324,8 @@ mod tests {
|
||||
use util::db::Error as db_error;
|
||||
|
||||
use rusqlite::Connection;
|
||||
|
||||
|
||||
use util::get_epoch_time_secs;
|
||||
|
||||
#[test]
|
||||
fn get_prev_consensus_hashes() {
|
||||
@@ -337,6 +339,7 @@ mod tests {
|
||||
for i in 1..256 {
|
||||
let snapshot_row = BlockSnapshot {
|
||||
block_height: i,
|
||||
burn_header_timestamp: get_epoch_time_secs(),
|
||||
burn_header_hash: BurnchainHeaderHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,i as u8]).unwrap(),
|
||||
parent_burn_header_hash: BurnchainHeaderHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,(if i == 0 { 0xff } else { i-1 }) as u8]).unwrap(),
|
||||
consensus_hash: ConsensusHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,i as u8]).unwrap(),
|
||||
|
||||
@@ -395,6 +395,7 @@ mod tests {
|
||||
use util::vrf::VRFPublicKey;
|
||||
use util::hash::hex_bytes;
|
||||
use util::log;
|
||||
use util::get_epoch_time_secs;
|
||||
|
||||
use chainstate::stacks::StacksAddress;
|
||||
use chainstate::stacks::StacksPublicKey;
|
||||
@@ -483,7 +484,8 @@ mod tests {
|
||||
block_hash: op.burn_header_hash.clone(),
|
||||
parent_block_hash: op.burn_header_hash.clone(),
|
||||
num_txs: 1,
|
||||
parent_index_root: TrieHash::from_empty_data()
|
||||
parent_index_root: TrieHash::from_empty_data(),
|
||||
timestamp: get_epoch_time_secs()
|
||||
}
|
||||
},
|
||||
None => {
|
||||
@@ -492,7 +494,8 @@ mod tests {
|
||||
block_hash: BurnchainHeaderHash([0u8; 32]),
|
||||
parent_block_hash: BurnchainHeaderHash([0u8; 32]),
|
||||
num_txs: 0,
|
||||
parent_index_root: TrieHash::from_empty_data()
|
||||
parent_index_root: TrieHash::from_empty_data(),
|
||||
timestamp: get_epoch_time_secs()
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -636,6 +639,7 @@ mod tests {
|
||||
for i in 0..block_header_hashes.len() {
|
||||
let mut snapshot_row = BlockSnapshot {
|
||||
block_height: (i + 1 + first_block_height as usize) as u64,
|
||||
burn_header_timestamp: get_epoch_time_secs(),
|
||||
burn_header_hash: block_header_hashes[i].clone(),
|
||||
parent_burn_header_hash: prev_snapshot.burn_header_hash.clone(),
|
||||
consensus_hash: ConsensusHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,(i+1) as u8]).unwrap(),
|
||||
@@ -913,7 +917,8 @@ mod tests {
|
||||
block_hash: fixture.op.burn_header_hash.clone(),
|
||||
parent_block_hash: fixture.op.burn_header_hash.clone(),
|
||||
num_txs: 1,
|
||||
parent_index_root: tip_index_root.clone()
|
||||
parent_index_root: tip_index_root.clone(),
|
||||
timestamp: get_epoch_time_secs()
|
||||
};
|
||||
assert_eq!(fixture.res, fixture.op.check(&burnchain, &header, &mut tx));
|
||||
}
|
||||
|
||||
@@ -237,6 +237,7 @@ mod tests {
|
||||
|
||||
use util::hash::hex_bytes;
|
||||
use util::log;
|
||||
use util::get_epoch_time_secs;
|
||||
|
||||
use chainstate::burn::operations::{
|
||||
LeaderBlockCommitOp,
|
||||
@@ -333,6 +334,7 @@ mod tests {
|
||||
parent_block_hash: op.burn_header_hash.clone(),
|
||||
num_txs: 1,
|
||||
parent_index_root: TrieHash::from_empty_data(),
|
||||
timestamp: get_epoch_time_secs()
|
||||
}
|
||||
},
|
||||
None => {
|
||||
@@ -342,6 +344,7 @@ mod tests {
|
||||
parent_block_hash: BurnchainHeaderHash([0u8; 32]),
|
||||
num_txs: 0,
|
||||
parent_index_root: TrieHash::from_empty_data(),
|
||||
timestamp: get_epoch_time_secs()
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -454,6 +457,7 @@ mod tests {
|
||||
for i in 0..10 {
|
||||
let mut snapshot_row = BlockSnapshot {
|
||||
block_height: i + 1 + first_block_height,
|
||||
burn_header_timestamp: get_epoch_time_secs(),
|
||||
burn_header_hash: block_header_hashes[i as usize].clone(),
|
||||
parent_burn_header_hash: prev_snapshot.burn_header_hash.clone(),
|
||||
consensus_hash: ConsensusHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,(i+1) as u8]).unwrap(),
|
||||
@@ -530,7 +534,8 @@ mod tests {
|
||||
block_hash: fixture.op.burn_header_hash.clone(),
|
||||
parent_block_hash: fixture.op.burn_header_hash.clone(),
|
||||
num_txs: 1,
|
||||
parent_index_root: tip_root_index.clone()
|
||||
parent_index_root: tip_root_index.clone(),
|
||||
timestamp: get_epoch_time_secs()
|
||||
};
|
||||
assert_eq!(fixture.res, fixture.op.check(&burnchain, &header, &mut tx));
|
||||
}
|
||||
|
||||
@@ -266,6 +266,7 @@ mod tests {
|
||||
|
||||
use util::hash::{hex_bytes, Hash160};
|
||||
use util::log;
|
||||
use util::get_epoch_time_secs;
|
||||
|
||||
struct OpFixture {
|
||||
txstr: String,
|
||||
@@ -345,6 +346,7 @@ mod tests {
|
||||
parent_block_hash: op.burn_header_hash.clone(),
|
||||
num_txs: 1,
|
||||
parent_index_root: TrieHash::from_empty_data(),
|
||||
timestamp: get_epoch_time_secs()
|
||||
}
|
||||
},
|
||||
None => {
|
||||
@@ -354,6 +356,7 @@ mod tests {
|
||||
parent_block_hash: BurnchainHeaderHash([0u8; 32]),
|
||||
num_txs: 0,
|
||||
parent_index_root: TrieHash::from_empty_data(),
|
||||
timestamp: get_epoch_time_secs()
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -463,6 +466,7 @@ mod tests {
|
||||
for i in 0..10 {
|
||||
let mut snapshot_row = BlockSnapshot {
|
||||
block_height: i + 1 + first_block_height,
|
||||
burn_header_timestamp: get_epoch_time_secs(),
|
||||
burn_header_hash: block_header_hashes[i as usize].clone(),
|
||||
parent_burn_header_hash: prev_snapshot.burn_header_hash.clone(),
|
||||
consensus_hash: ConsensusHash::from_bytes(&[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,(i + 1) as u8]).unwrap(),
|
||||
@@ -547,7 +551,8 @@ mod tests {
|
||||
block_hash: fixture.op.burn_header_hash.clone(),
|
||||
parent_block_hash: fixture.op.burn_header_hash.clone(),
|
||||
num_txs: 1,
|
||||
parent_index_root: tip_index_root.clone()
|
||||
parent_index_root: tip_index_root.clone(),
|
||||
timestamp: get_epoch_time_secs()
|
||||
};
|
||||
let mut tx = db.tx_begin().unwrap();
|
||||
assert_eq!(fixture.res, fixture.op.check(&burnchain, &header, &mut tx));
|
||||
|
||||
@@ -66,13 +66,14 @@ use util::log;
|
||||
|
||||
impl BlockSnapshot {
|
||||
/// Create the sentinel block snapshot -- the first one
|
||||
pub fn initial(first_block_height: u64, first_burn_header_hash: &BurnchainHeaderHash) -> BlockSnapshot {
|
||||
pub fn initial(first_block_height: u64, first_burn_header_hash: &BurnchainHeaderHash, first_burn_header_timestamp: u64) -> BlockSnapshot {
|
||||
let mut parent_hash_bytes = [0u8; 32];
|
||||
parent_hash_bytes.copy_from_slice(TrieFileStorage::block_sentinel().as_bytes());
|
||||
|
||||
BlockSnapshot {
|
||||
block_height: first_block_height,
|
||||
burn_header_hash: first_burn_header_hash.clone(),
|
||||
burn_header_timestamp: first_burn_header_timestamp,
|
||||
parent_burn_header_hash: BurnchainHeaderHash(parent_hash_bytes),
|
||||
consensus_hash: ConsensusHash([0u8; 20]),
|
||||
ops_hash: OpsHash([0u8; 32]),
|
||||
@@ -175,6 +176,7 @@ impl BlockSnapshot {
|
||||
Ok(BlockSnapshot {
|
||||
block_height: block_height,
|
||||
burn_header_hash: block_hash,
|
||||
burn_header_timestamp: block_header.timestamp,
|
||||
parent_burn_header_hash: parent_block_hash,
|
||||
consensus_hash: ch,
|
||||
ops_hash: ops_hash,
|
||||
@@ -269,6 +271,7 @@ impl BlockSnapshot {
|
||||
Ok(BlockSnapshot {
|
||||
block_height: block_height,
|
||||
burn_header_hash: block_hash,
|
||||
burn_header_timestamp: block_header.timestamp,
|
||||
parent_burn_header_hash: parent_block_hash,
|
||||
consensus_hash: next_ch,
|
||||
ops_hash: next_ops_hash,
|
||||
@@ -298,6 +301,7 @@ mod test {
|
||||
use util::vrf::VRFPrivateKey;
|
||||
|
||||
use util::hash::hex_bytes;
|
||||
use util::get_epoch_time_secs;
|
||||
|
||||
use address::*;
|
||||
|
||||
@@ -326,7 +330,8 @@ mod test {
|
||||
block_hash: BurnchainHeaderHash([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0x01,0x24]),
|
||||
parent_block_hash: first_burn_hash.clone(),
|
||||
num_txs: 0,
|
||||
parent_index_root: TrieHash::from_empty_data()
|
||||
parent_index_root: TrieHash::from_empty_data(),
|
||||
timestamp: get_epoch_time_secs()
|
||||
};
|
||||
|
||||
let initial_snapshot = BurnDB::get_first_block_snapshot(db.conn()).unwrap();
|
||||
|
||||
@@ -1179,9 +1179,9 @@ mod test {
|
||||
microblock_pubkey_hash: Hash160([9u8; 20])
|
||||
};
|
||||
|
||||
let mut burn_chain_tip = BlockSnapshot::initial(122, &BurnchainHeaderHash([3u8; 32]));
|
||||
let mut stacks_chain_tip = BlockSnapshot::initial(122, &BurnchainHeaderHash([3u8; 32]));
|
||||
let sortition_chain_tip = BlockSnapshot::initial(122, &BurnchainHeaderHash([3u8; 32]));
|
||||
let mut burn_chain_tip = BlockSnapshot::initial(122, &BurnchainHeaderHash([3u8; 32]), 0);
|
||||
let mut stacks_chain_tip = BlockSnapshot::initial(122, &BurnchainHeaderHash([3u8; 32]), 1);
|
||||
let sortition_chain_tip = BlockSnapshot::initial(122, &BurnchainHeaderHash([3u8; 32]), 2);
|
||||
|
||||
let leader_key = LeaderKeyRegisterOp {
|
||||
consensus_hash: ConsensusHash::from_bytes(&hex_bytes("0000000000000000000000000000000000000000").unwrap()).unwrap(),
|
||||
|
||||
@@ -202,22 +202,24 @@ impl StacksChainState {
|
||||
assert!(block_reward.burnchain_commit_burn < i64::max_value() as u64);
|
||||
assert!(block_reward.burnchain_sortition_burn < i64::max_value() as u64);
|
||||
|
||||
let index_block_hash = StacksBlockHeader::make_index_block_hash(&block_reward.burn_header_hash, &block_reward.block_hash);
|
||||
|
||||
let args: &[&dyn ToSql] = &[&block_reward.address.to_string(), &block_reward.block_hash, &block_reward.burn_header_hash, &block_reward.parent_block_hash, &block_reward.parent_burn_header_hash,
|
||||
&format!("{}", block_reward.coinbase), &format!("{}", block_reward.tx_fees_anchored), &format!("{}", block_reward.tx_fees_streamed), &format!("{}", block_reward.stx_burns),
|
||||
&(block_reward.burnchain_commit_burn as i64) as &dyn ToSql, &(block_reward.burnchain_sortition_burn as i64) as &dyn ToSql, &format!("{}", block_reward.fill), &(block_reward.stacks_block_height as i64) as &dyn ToSql,
|
||||
&true as &dyn ToSql, &0i64 as &dyn ToSql];
|
||||
&true as &dyn ToSql, &0i64 as &dyn ToSql, &index_block_hash as &dyn ToSql];
|
||||
|
||||
tx.execute("INSERT INTO payments (address,block_hash,burn_header_hash,parent_block_hash,parent_burn_header_hash,coinbase,tx_fees_anchored,tx_fees_streamed,stx_burns,burnchain_commit_burn,burnchain_sortition_burn,fill,stacks_block_height,miner,vtxindex) \
|
||||
VALUES (?1,?2,?3,?4,?5,?6,?7,?8,?9,?10,?11,?12,?13,?14,?15)", args)
|
||||
tx.execute("INSERT INTO payments (address,block_hash,burn_header_hash,parent_block_hash,parent_burn_header_hash,coinbase,tx_fees_anchored,tx_fees_streamed,stx_burns,burnchain_commit_burn,burnchain_sortition_burn,fill,stacks_block_height,miner,vtxindex,index_block_hash) \
|
||||
VALUES (?1,?2,?3,?4,?5,?6,?7,?8,?9,?10,?11,?12,?13,?14,?15,?16)", args)
|
||||
.map_err(|e| Error::DBError(db_error::SqliteError(e)))?;
|
||||
|
||||
for user_support in user_burns.iter() {
|
||||
let args: &[&dyn ToSql] = &[&user_support.address.to_string(), &block_reward.block_hash, &block_reward.burn_header_hash, &block_reward.parent_block_hash, &block_reward.parent_burn_header_hash,
|
||||
&format!("{}", block_reward.coinbase), &"0".to_string(), &"0".to_string(), &"0".to_string(),
|
||||
&(user_support.burn_amount as i64) as &dyn ToSql, &(block_reward.burnchain_sortition_burn as i64) as &dyn ToSql, &format!("{}", block_reward.fill), &(block_reward.stacks_block_height as i64) as &dyn ToSql,
|
||||
&false as &dyn ToSql, &user_support.vtxindex as &dyn ToSql];
|
||||
tx.execute("INSERT INTO payments (address,block_hash,burn_header_hash,parent_block_hash,parent_burn_header_hash,coinbase,tx_fees_anchored,tx_fees_streamed,stx_burns,burnchain_commit_burn,burnchain_sortition_burn,fill,stacks_block_height,miner,vtxindex) \
|
||||
VALUES (?1,?2,?3,?4,?5,?6,?7,?8,?9,?10,?11,?12,?13,?14,?15)",
|
||||
&false as &dyn ToSql, &user_support.vtxindex as &dyn ToSql, &index_block_hash as &dyn ToSql];
|
||||
tx.execute("INSERT INTO payments (address,block_hash,burn_header_hash,parent_block_hash,parent_burn_header_hash,coinbase,tx_fees_anchored,tx_fees_streamed,stx_burns,burnchain_commit_burn,burnchain_sortition_burn,fill,stacks_block_height,miner,vtxindex,index_block_hash) \
|
||||
VALUES (?1,?2,?3,?4,?5,?6,?7,?8,?9,?10,?11,?12,?13,?14,?15,?16)",
|
||||
args)
|
||||
.map_err(|e| Error::DBError(db_error::SqliteError(e)))?;
|
||||
}
|
||||
@@ -241,6 +243,26 @@ impl StacksChainState {
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
/// Get the miner info at a particular burn/stacks block
|
||||
pub fn get_miner_info(conn: &DBConn, burn_block_hash: &BurnchainHeaderHash, stacks_block_hash: &BlockHeaderHash) -> Result<Option<MinerPaymentSchedule>, Error> {
|
||||
let qry = "SELECT * FROM payments WHERE burn_header_hash = ?1 AND block_hash = ?2 AND miner = 1".to_string();
|
||||
let args = [burn_block_hash as &dyn ToSql, stacks_block_hash as &dyn ToSql];
|
||||
let mut rows = query_rows::<MinerPaymentSchedule, _>(conn, &qry, &args).map_err(Error::DBError)?;
|
||||
let len = rows.len();
|
||||
match len {
|
||||
0 => {
|
||||
test_debug!("No miner information for {}/{}", burn_block_hash, stacks_block_hash);
|
||||
Ok(None)
|
||||
},
|
||||
1 => {
|
||||
Ok(rows.pop())
|
||||
},
|
||||
_ => {
|
||||
panic!("Multiple miners for {}/{}", burn_block_hash, stacks_block_hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate the total reward for a miner (or user burn support), given a sample of scheduled miner payments.
|
||||
/// The scheduled miner payments must be in order by block height (sample[0] is the oldest).
|
||||
/// The first tuple item is the miner's reward; the second tuple item is the list of
|
||||
@@ -460,7 +482,15 @@ mod test {
|
||||
}
|
||||
|
||||
let mut tx = chainstate.headers_tx_begin().unwrap();
|
||||
let tip = StacksChainState::advance_tip(&mut tx, &parent_header_info.anchored_header, &parent_header_info.burn_header_hash, &new_tip.anchored_header, &new_tip.burn_header_hash, new_tip.microblock_tail.clone(), &block_reward, &user_burns).unwrap();
|
||||
let tip = StacksChainState::advance_tip(&mut tx,
|
||||
&parent_header_info.anchored_header,
|
||||
&parent_header_info.burn_header_hash,
|
||||
&new_tip.anchored_header,
|
||||
&new_tip.burn_header_hash,
|
||||
new_tip.burn_header_timestamp,
|
||||
new_tip.microblock_tail.clone(),
|
||||
&block_reward,
|
||||
&user_burns).unwrap();
|
||||
tx.commit().unwrap();
|
||||
tip
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ use util::db::{
|
||||
};
|
||||
|
||||
use util::strings::StacksString;
|
||||
|
||||
use util::get_epoch_time_secs;
|
||||
use util::hash::to_hex;
|
||||
|
||||
use chainstate::burn::db::burndb::*;
|
||||
@@ -98,6 +98,7 @@ pub struct StagingMicroblock {
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct StagingBlock {
|
||||
pub burn_header_hash: BurnchainHeaderHash,
|
||||
pub burn_header_timestamp: u64,
|
||||
pub anchored_block_hash: BlockHeaderHash,
|
||||
pub parent_burn_header_hash: BurnchainHeaderHash,
|
||||
pub parent_anchored_block_hash: BlockHeaderHash,
|
||||
@@ -162,6 +163,7 @@ impl FromRow<StagingBlock> for StagingBlock {
|
||||
let anchored_block_hash : BlockHeaderHash = BlockHeaderHash::from_column(row, "anchored_block_hash")?;
|
||||
let parent_anchored_block_hash : BlockHeaderHash = BlockHeaderHash::from_column(row, "parent_anchored_block_hash")?;
|
||||
let burn_header_hash : BurnchainHeaderHash = BurnchainHeaderHash::from_column(row, "burn_header_hash")?;
|
||||
let burn_header_timestamp_i64 : i64 = row.get("burn_header_timestamp");
|
||||
let parent_burn_header_hash: BurnchainHeaderHash = BurnchainHeaderHash::from_column(row, "parent_burn_header_hash")?;
|
||||
let parent_microblock_hash : BlockHeaderHash = BlockHeaderHash::from_column(row, "parent_microblock_hash")?;
|
||||
let parent_microblock_seq : u16 = row.get("parent_microblock_seq");
|
||||
@@ -173,17 +175,29 @@ impl FromRow<StagingBlock> for StagingBlock {
|
||||
let sortition_burn_i64 : i64 = row.get("sortition_burn");
|
||||
let block_data : Vec<u8> = vec![];
|
||||
|
||||
if commit_burn_i64 < 0 {
|
||||
return Err(db_error::ParseError);
|
||||
}
|
||||
if sortition_burn_i64 < 0 {
|
||||
return Err(db_error::ParseError);
|
||||
}
|
||||
if burn_header_timestamp_i64 < 0 {
|
||||
return Err(db_error::ParseError);
|
||||
}
|
||||
|
||||
let processed = if processed_i64 != 0 { true } else { false };
|
||||
let attacheable = if attacheable_i64 != 0 { true } else { false };
|
||||
let orphaned = if orphaned_i64 == 0 { true } else { false };
|
||||
|
||||
let commit_burn = commit_burn_i64 as u64;
|
||||
let sortition_burn = sortition_burn_i64 as u64;
|
||||
let burn_header_timestamp = burn_header_timestamp_i64 as u64;
|
||||
|
||||
Ok(StagingBlock {
|
||||
anchored_block_hash,
|
||||
parent_anchored_block_hash,
|
||||
burn_header_hash,
|
||||
burn_header_timestamp,
|
||||
parent_burn_header_hash,
|
||||
parent_microblock_hash,
|
||||
parent_microblock_seq,
|
||||
@@ -253,6 +267,7 @@ const STACKS_BLOCK_INDEX_SQL : &'static [&'static str]= &[
|
||||
CREATE TABLE staging_blocks(anchored_block_hash TEXT NOT NULL,
|
||||
parent_anchored_block_hash TEXT NOT NULL,
|
||||
burn_header_hash TEXT NOT NULL,
|
||||
burn_header_timestamp INT NOT NULL,
|
||||
parent_burn_header_hash TEXT NOT NULL,
|
||||
parent_microblock_hash TEXT NOT NULL,
|
||||
parent_microblock_seq INT NOT NULL,
|
||||
@@ -853,9 +868,10 @@ impl StacksChainState {
|
||||
/// Store a preprocessed block, queuing it up for subsequent processing.
|
||||
/// The caller should at least verify that the block is attached to some fork in the burn
|
||||
/// chain.
|
||||
fn store_staging_block<'a>(tx: &mut BlocksDBTx<'a>, burn_hash: &BurnchainHeaderHash, block: &StacksBlock, parent_burn_header_hash: &BurnchainHeaderHash, commit_burn: u64, sortition_burn: u64) -> Result<(), Error> {
|
||||
fn store_staging_block<'a>(tx: &mut BlocksDBTx<'a>, burn_hash: &BurnchainHeaderHash, burn_header_timestamp: u64, block: &StacksBlock, parent_burn_header_hash: &BurnchainHeaderHash, commit_burn: u64, sortition_burn: u64) -> Result<(), Error> {
|
||||
assert!(commit_burn < i64::max_value() as u64);
|
||||
assert!(sortition_burn < i64::max_value() as u64);
|
||||
assert!(burn_header_timestamp < i64::max_value() as u64);
|
||||
|
||||
let block_hash = block.block_hash();
|
||||
let mut block_bytes = vec![];
|
||||
@@ -878,10 +894,10 @@ impl StacksChainState {
|
||||
|
||||
// store block metadata
|
||||
let sql = "INSERT OR REPLACE INTO staging_blocks \
|
||||
(anchored_block_hash, parent_anchored_block_hash, burn_header_hash, parent_burn_header_hash, parent_microblock_hash, parent_microblock_seq, microblock_pubkey_hash, attacheable, processed, orphaned, commit_burn, sortition_burn) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)";
|
||||
(anchored_block_hash, parent_anchored_block_hash, burn_header_hash, burn_header_timestamp, parent_burn_header_hash, parent_microblock_hash, parent_microblock_seq, microblock_pubkey_hash, attacheable, processed, orphaned, commit_burn, sortition_burn) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)";
|
||||
let args: &[&dyn ToSql] = &[
|
||||
&block_hash, &block.header.parent_block, &burn_hash, &parent_burn_header_hash,
|
||||
&block_hash, &block.header.parent_block, &burn_hash, &(burn_header_timestamp as i64), &parent_burn_header_hash,
|
||||
&block.header.parent_microblock, &block.header.parent_microblock_sequence,
|
||||
&block.header.microblock_pubkey_hash, &attacheable, &0, &0, &(commit_burn as i64), &(sortition_burn as i64)];
|
||||
|
||||
@@ -1495,7 +1511,7 @@ impl StacksChainState {
|
||||
///
|
||||
/// TODO: consider how full the block is (i.e. how much computational budget it consumes) when
|
||||
/// deciding whether or not it can be processed.
|
||||
pub fn preprocess_anchored_block<'a>(&mut self, burn_tx: &mut BurnDBTx<'a>, burn_header_hash: &BurnchainHeaderHash, block: &StacksBlock, parent_burn_header_hash: &BurnchainHeaderHash) -> Result<bool, Error> {
|
||||
pub fn preprocess_anchored_block<'a>(&mut self, burn_tx: &mut BurnDBTx<'a>, burn_header_hash: &BurnchainHeaderHash, burn_header_timestamp: u64, block: &StacksBlock, parent_burn_header_hash: &BurnchainHeaderHash) -> Result<bool, Error> {
|
||||
// already in queue or already processed?
|
||||
if StacksChainState::has_stored_block(&self.blocks_path, burn_header_hash, &block.block_hash())? || StacksChainState::has_staging_block(&self.blocks_db, burn_header_hash, &block.block_hash())? {
|
||||
test_debug!("Block already stored and/or processed: {}/{}", burn_header_hash, &block.block_hash());
|
||||
@@ -1519,7 +1535,7 @@ impl StacksChainState {
|
||||
let mut block_tx = self.blocks_tx_begin()?;
|
||||
|
||||
// queue block up for processing
|
||||
StacksChainState::store_staging_block(&mut block_tx, burn_header_hash, &block, parent_burn_header_hash, commit_burn, sortition_burn)?;
|
||||
StacksChainState::store_staging_block(&mut block_tx, burn_header_hash, burn_header_timestamp, &block, parent_burn_header_hash, commit_burn, sortition_burn)?;
|
||||
|
||||
// store users who burned for this block so they'll get rewarded if we process it
|
||||
StacksChainState::store_staging_block_user_burn_supports(&mut block_tx, burn_header_hash, &block.block_hash(), &user_burns)?;
|
||||
@@ -1926,7 +1942,8 @@ impl StacksChainState {
|
||||
fn append_block<'a>(chainstate_tx: &mut ChainstateTx<'a>,
|
||||
clarity_instance: &'a mut ClarityInstance,
|
||||
parent_chain_tip: &StacksHeaderInfo,
|
||||
chain_tip_burn_header_hash: &BurnchainHeaderHash,
|
||||
chain_tip_burn_header_hash: &BurnchainHeaderHash,
|
||||
chain_tip_burn_header_timestamp: u64,
|
||||
block: &StacksBlock,
|
||||
microblocks: &Vec<StacksMicroblock>, // parent microblocks
|
||||
burnchain_commit_burn: u64,
|
||||
@@ -2053,8 +2070,9 @@ impl StacksChainState {
|
||||
let new_tip = StacksChainState::advance_tip(&mut chainstate_tx.headers_tx,
|
||||
&parent_chain_tip.anchored_header,
|
||||
&parent_chain_tip.burn_header_hash,
|
||||
&block.header,
|
||||
&block.header,
|
||||
chain_tip_burn_header_hash,
|
||||
chain_tip_burn_header_timestamp,
|
||||
microblock_tail_opt,
|
||||
&scheduled_miner_reward,
|
||||
user_burns)
|
||||
@@ -2189,6 +2207,7 @@ impl StacksChainState {
|
||||
clarity_instance,
|
||||
&parent_block_header_info,
|
||||
&next_staging_block.burn_header_hash,
|
||||
next_staging_block.burn_header_timestamp,
|
||||
&block,
|
||||
&next_microblocks,
|
||||
next_staging_block.commit_burn,
|
||||
@@ -2464,9 +2483,9 @@ pub mod test {
|
||||
assert!(StacksChainState::load_staging_block_data(&chainstate.blocks_db, burn_header, &block.block_hash()).unwrap().is_none());
|
||||
}
|
||||
|
||||
fn store_staging_block(chainstate: &mut StacksChainState, burn_header: &BurnchainHeaderHash, block: &StacksBlock, parent_burn_header: &BurnchainHeaderHash, commit_burn: u64, sortition_burn: u64) {
|
||||
fn store_staging_block(chainstate: &mut StacksChainState, burn_header: &BurnchainHeaderHash, burn_header_timestamp: u64, block: &StacksBlock, parent_burn_header: &BurnchainHeaderHash, commit_burn: u64, sortition_burn: u64) {
|
||||
let mut tx = chainstate.blocks_tx_begin().unwrap();
|
||||
StacksChainState::store_staging_block(&mut tx, burn_header, block, parent_burn_header, commit_burn, sortition_burn).unwrap();
|
||||
StacksChainState::store_staging_block(&mut tx, burn_header, burn_header_timestamp, block, parent_burn_header, commit_burn, sortition_burn).unwrap();
|
||||
tx.commit().unwrap();
|
||||
}
|
||||
|
||||
@@ -2553,7 +2572,7 @@ pub mod test {
|
||||
|
||||
assert!(StacksChainState::load_staging_block_data(&chainstate.blocks_db, &BurnchainHeaderHash([2u8; 32]), &block.block_hash()).unwrap().is_none());
|
||||
|
||||
store_staging_block(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), &block, &BurnchainHeaderHash([1u8; 32]), 1, 2);
|
||||
store_staging_block(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), get_epoch_time_secs(), &block, &BurnchainHeaderHash([1u8; 32]), 1, 2);
|
||||
|
||||
assert_block_staging_not_processed(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), &block);
|
||||
assert_block_not_stored(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), &block);
|
||||
@@ -2577,7 +2596,7 @@ pub mod test {
|
||||
|
||||
assert!(StacksChainState::load_staging_block_data(&chainstate.blocks_db, &BurnchainHeaderHash([2u8; 32]), &block.block_hash()).unwrap().is_none());
|
||||
|
||||
store_staging_block(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), &block, &BurnchainHeaderHash([1u8; 32]), 1, 2);
|
||||
store_staging_block(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), get_epoch_time_secs(), &block, &BurnchainHeaderHash([1u8; 32]), 1, 2);
|
||||
|
||||
assert_block_staging_not_processed(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), &block);
|
||||
assert_block_not_stored(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), &block);
|
||||
@@ -2604,11 +2623,12 @@ pub mod test {
|
||||
assert!(fs::metadata(&path).is_err());
|
||||
assert!(!StacksChainState::has_stored_block(&chainstate.blocks_path, &BurnchainHeaderHash([2u8; 32]), µblocks[0].block_hash()).unwrap());
|
||||
assert!(StacksChainState::load_microblock_stream(&chainstate.blocks_path, &BurnchainHeaderHash([2u8; 32]), µblocks[0].block_hash()).is_err());
|
||||
|
||||
|
||||
StacksChainState::store_microblock_stream(&chainstate.blocks_path, &BurnchainHeaderHash([2u8; 32]), µblocks).unwrap();
|
||||
|
||||
assert!(fs::metadata(&path).is_ok());
|
||||
assert!(StacksChainState::has_stored_block(&chainstate.blocks_path, &BurnchainHeaderHash([2u8; 32]), µblocks[0].block_hash()).unwrap());
|
||||
|
||||
assert!(StacksChainState::load_microblock_stream(&chainstate.blocks_path, &BurnchainHeaderHash([2u8; 32]), µblocks[0].block_hash()).unwrap().is_some());
|
||||
assert_eq!(StacksChainState::load_microblock_stream(&chainstate.blocks_path, &BurnchainHeaderHash([2u8; 32]), µblocks[0].block_hash()).unwrap().unwrap(), microblocks);
|
||||
|
||||
@@ -2629,7 +2649,7 @@ pub mod test {
|
||||
assert!(StacksChainState::load_staging_microblock(&chainstate.blocks_db, &BurnchainHeaderHash([2u8; 32]), &block.block_hash(), µblocks[0].block_hash()).unwrap().is_none());
|
||||
assert!(StacksChainState::load_staging_microblock_stream(&chainstate.blocks_db, &chainstate.blocks_path, &BurnchainHeaderHash([2u8; 32]), &block.block_hash(), u16::max_value()).unwrap().is_none());
|
||||
|
||||
store_staging_block(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), &block, &BurnchainHeaderHash([1u8; 32]), 1, 2);
|
||||
store_staging_block(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), get_epoch_time_secs(), &block, &BurnchainHeaderHash([1u8; 32]), 1, 2);
|
||||
for mb in microblocks.iter() {
|
||||
store_staging_microblock(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), &block.block_hash(), mb);
|
||||
}
|
||||
@@ -2681,7 +2701,7 @@ pub mod test {
|
||||
assert!(StacksChainState::load_staging_microblock(&chainstate.blocks_db, &BurnchainHeaderHash([2u8; 32]), &block.block_hash(), µblocks[0].block_hash()).unwrap().is_none());
|
||||
assert!(StacksChainState::load_staging_microblock_stream(&chainstate.blocks_db, &chainstate.blocks_path, &BurnchainHeaderHash([2u8; 32]), &block.block_hash(), u16::max_value()).unwrap().is_none());
|
||||
|
||||
store_staging_block(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), &block, &BurnchainHeaderHash([1u8; 32]), 1, 2);
|
||||
store_staging_block(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), get_epoch_time_secs(), &block, &BurnchainHeaderHash([1u8; 32]), 1, 2);
|
||||
for mb in microblocks.iter() {
|
||||
store_staging_microblock(&mut chainstate, &BurnchainHeaderHash([2u8; 32]), &block.block_hash(), mb);
|
||||
}
|
||||
@@ -3009,7 +3029,7 @@ pub mod test {
|
||||
// store each block
|
||||
for ((block, burn_header), parent_burn_header) in blocks.iter().zip(&burn_headers).zip(&parent_burn_headers) {
|
||||
assert!(StacksChainState::load_staging_block_data(&chainstate.blocks_db, burn_header, &block.block_hash()).unwrap().is_none());
|
||||
store_staging_block(&mut chainstate, burn_header, block, parent_burn_header, 1, 2);
|
||||
store_staging_block(&mut chainstate, burn_header, get_epoch_time_secs(), block, parent_burn_header, 1, 2);
|
||||
assert_block_staging_not_processed(&mut chainstate, burn_header, block);
|
||||
}
|
||||
|
||||
@@ -3079,7 +3099,7 @@ pub mod test {
|
||||
// store each block, in reverse order!
|
||||
for ((block, burn_header), parent_burn_header) in blocks.iter().zip(&burn_headers).zip(&parent_burn_headers).rev() {
|
||||
assert!(StacksChainState::load_staging_block_data(&chainstate.blocks_db, burn_header, &block.block_hash()).unwrap().is_none());
|
||||
store_staging_block(&mut chainstate, burn_header, block, parent_burn_header, 1, 2);
|
||||
store_staging_block(&mut chainstate, burn_header, get_epoch_time_secs(), block, parent_burn_header, 1, 2);
|
||||
assert_block_staging_not_processed(&mut chainstate, burn_header, block);
|
||||
}
|
||||
|
||||
@@ -3157,7 +3177,7 @@ pub mod test {
|
||||
// store each block in reverse order, except for block_1
|
||||
for ((block, burn_header), parent_burn_header) in blocks[1..].iter().zip(&burn_headers[1..]).zip(&parent_burn_headers[1..]).rev() {
|
||||
assert!(StacksChainState::load_staging_block_data(&chainstate.blocks_db, burn_header, &block.block_hash()).unwrap().is_none());
|
||||
store_staging_block(&mut chainstate, burn_header, block, parent_burn_header, 1, 2);
|
||||
store_staging_block(&mut chainstate, burn_header, get_epoch_time_secs(), block, parent_burn_header, 1, 2);
|
||||
assert_block_staging_not_processed(&mut chainstate, burn_header, block);
|
||||
}
|
||||
|
||||
@@ -3171,7 +3191,7 @@ pub mod test {
|
||||
|
||||
// store block 1
|
||||
assert!(StacksChainState::load_staging_block_data(&chainstate.blocks_db, &burn_headers[0], &block_1.block_hash()).unwrap().is_none());
|
||||
store_staging_block(&mut chainstate, &burn_headers[0], &block_1, &parent_burn_headers[0], 1, 2);
|
||||
store_staging_block(&mut chainstate, &burn_headers[0], get_epoch_time_secs(), &block_1, &parent_burn_headers[0], 1, 2);
|
||||
assert_block_staging_not_processed(&mut chainstate, &burn_headers[0], &block_1);
|
||||
|
||||
// first block is attacheable
|
||||
@@ -3253,7 +3273,7 @@ pub mod test {
|
||||
|
||||
// store block 1 to staging
|
||||
assert!(StacksChainState::load_staging_block_data(&chainstate.blocks_db, &burn_headers[0], &blocks[0].block_hash()).unwrap().is_none());
|
||||
store_staging_block(&mut chainstate, &burn_headers[0], &blocks[0], &parent_burn_headers[0], 1, 2);
|
||||
store_staging_block(&mut chainstate, &burn_headers[0], get_epoch_time_secs(), &blocks[0], &parent_burn_headers[0], 1, 2);
|
||||
assert_block_staging_not_processed(&mut chainstate, &burn_headers[0], &blocks[0]);
|
||||
|
||||
set_block_processed(&mut chainstate, &burn_headers[0], &blocks[0].block_hash(), true);
|
||||
@@ -3265,7 +3285,7 @@ pub mod test {
|
||||
// this is what happens at the end of append_block()
|
||||
// store block to staging and process it
|
||||
assert!(StacksChainState::load_staging_block_data(&chainstate.blocks_db, &burn_headers[i], &blocks[i].block_hash()).unwrap().is_none());
|
||||
store_staging_block(&mut chainstate, &burn_headers[i], &blocks[i], &parent_burn_headers[i], 1, 2);
|
||||
store_staging_block(&mut chainstate, &burn_headers[i], get_epoch_time_secs(), &blocks[i], &parent_burn_headers[i], 1, 2);
|
||||
assert_block_staging_not_processed(&mut chainstate, &burn_headers[i], &blocks[i]);
|
||||
|
||||
// set different parts of this stream as confirmed
|
||||
@@ -3337,7 +3357,7 @@ pub mod test {
|
||||
// store blocks to staging
|
||||
for i in 0..blocks.len() {
|
||||
assert!(StacksChainState::load_staging_block_data(&chainstate.blocks_db, &burn_headers[i], &blocks[i].block_hash()).unwrap().is_none());
|
||||
store_staging_block(&mut chainstate, &burn_headers[i], &blocks[i], &parent_burn_headers[i], 1, 2);
|
||||
store_staging_block(&mut chainstate, &burn_headers[i], get_epoch_time_secs(), &blocks[i], &parent_burn_headers[i], 1, 2);
|
||||
assert_block_staging_not_processed(&mut chainstate, &burn_headers[i], &blocks[i]);
|
||||
}
|
||||
|
||||
@@ -3402,7 +3422,7 @@ pub mod test {
|
||||
|
||||
// store block to staging
|
||||
assert!(StacksChainState::load_staging_block_data(&chainstate.blocks_db, &burn_header, &block.block_hash()).unwrap().is_none());
|
||||
store_staging_block(&mut chainstate, &burn_header, &block, &parent_burn_header, 1, 2);
|
||||
store_staging_block(&mut chainstate, &burn_header, get_epoch_time_secs(), &block, &parent_burn_header, 1, 2);
|
||||
assert_block_staging_not_processed(&mut chainstate, &burn_header, &block);
|
||||
|
||||
// drop microblocks
|
||||
|
||||
@@ -112,11 +112,13 @@ impl StacksChainState {
|
||||
/// Insert a block header that is paired with an already-existing block commit and snapshot
|
||||
pub fn insert_stacks_block_header<'a>(tx: &mut StacksDBTx<'a>, tip_info: &StacksHeaderInfo) -> Result<(), Error> {
|
||||
assert_eq!(tip_info.block_height, tip_info.anchored_header.total_work.work);
|
||||
assert!(tip_info.burn_header_timestamp < i64::max_value() as u64);
|
||||
|
||||
let header = &tip_info.anchored_header;
|
||||
let index_root = &tip_info.index_root;
|
||||
let burn_header_hash = &tip_info.burn_header_hash;
|
||||
let block_height = tip_info.block_height;
|
||||
let burn_header_timestamp = tip_info.burn_header_timestamp;
|
||||
|
||||
let total_work_str = format!("{}", header.total_work.work);
|
||||
let total_burn_str = format!("{}", header.total_work.burn);
|
||||
@@ -127,11 +129,11 @@ impl StacksChainState {
|
||||
let args: &[&dyn ToSql] = &[
|
||||
&header.version, &total_burn_str, &total_work_str, &header.proof, &header.parent_block, &header.parent_microblock, &header.parent_microblock_sequence,
|
||||
&header.tx_merkle_root, &header.state_index_root, &header.microblock_pubkey_hash,
|
||||
&block_hash, &tip_info.index_block_hash(), &burn_header_hash, &(block_height as i64), &index_root];
|
||||
&block_hash, &tip_info.index_block_hash(), &burn_header_hash, &(burn_header_timestamp as i64), &(block_height as i64), &index_root];
|
||||
|
||||
tx.execute("INSERT INTO block_headers \
|
||||
(version, total_burn, total_work, proof, parent_block, parent_microblock, parent_microblock_sequence, tx_merkle_root, state_index_root, microblock_pubkey_hash, block_hash, index_block_hash, burn_header_hash, block_height, index_root) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15)", args)
|
||||
(version, total_burn, total_work, proof, parent_block, parent_microblock, parent_microblock_sequence, tx_merkle_root, state_index_root, microblock_pubkey_hash, block_hash, index_block_hash, burn_header_hash, burn_header_timestamp, block_height, index_root) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16)", args)
|
||||
.map_err(|e| Error::DBError(db_error::SqliteError(e)))?;
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -132,7 +132,8 @@ pub struct StacksHeaderInfo {
|
||||
pub microblock_tail: Option<StacksMicroblockHeader>,
|
||||
pub block_height: u64,
|
||||
pub index_root: TrieHash,
|
||||
pub burn_header_hash: BurnchainHeaderHash
|
||||
pub burn_header_hash: BurnchainHeaderHash,
|
||||
pub burn_header_timestamp: u64
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
@@ -153,6 +154,7 @@ impl StacksHeaderInfo {
|
||||
block_height: StacksBlockHeader::genesis().total_work.work,
|
||||
index_root: TrieHash([0u8; 32]),
|
||||
burn_header_hash: FIRST_BURNCHAIN_BLOCK_HASH.clone(),
|
||||
burn_header_timestamp: FIRST_BURNCHAIN_BLOCK_TIMESTAMP
|
||||
}
|
||||
}
|
||||
pub fn is_genesis(&self) -> bool {
|
||||
@@ -182,11 +184,15 @@ impl FromRow<StacksHeaderInfo> for StacksHeaderInfo {
|
||||
let block_height_i64 : i64 = row.get("block_height");
|
||||
let index_root = TrieHash::from_column(row, "index_root")?;
|
||||
let burn_header_hash = BurnchainHeaderHash::from_column(row, "burn_header_hash")?;
|
||||
let burn_header_timestamp_i64 : i64 = row.get("burn_header_timestamp");
|
||||
let stacks_header = StacksBlockHeader::from_row(row)?;
|
||||
|
||||
if block_height_i64 < 0 {
|
||||
return Err(db_error::ParseError);
|
||||
}
|
||||
if burn_header_timestamp_i64 < 0 {
|
||||
return Err(db_error::ParseError);
|
||||
}
|
||||
|
||||
if block_height_i64 as u64 != stacks_header.total_work.work {
|
||||
return Err(db_error::ParseError);
|
||||
@@ -197,7 +203,8 @@ impl FromRow<StacksHeaderInfo> for StacksHeaderInfo {
|
||||
microblock_tail: None,
|
||||
block_height: block_height_i64 as u64,
|
||||
index_root: index_root,
|
||||
burn_header_hash: burn_header_hash
|
||||
burn_header_hash: burn_header_hash,
|
||||
burn_header_timestamp: burn_header_timestamp_i64 as u64
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -321,6 +328,7 @@ const STACKS_CHAIN_STATE_SQL : &'static [&'static str]= &[
|
||||
block_height INTEGER NOT NULL,
|
||||
index_root TEXT NOT NULL, -- root hash of the internal, not-consensus-critical MARF that allows us to track chainstate fork metadata
|
||||
burn_header_hash TEXT UNIQUE NOT NULL, -- all burn header hashes are guaranteed to be unique
|
||||
burn_header_timestamp INT NOT NULL, -- timestamp from burnchain block header
|
||||
|
||||
PRIMARY KEY(burn_header_hash,block_hash)
|
||||
);
|
||||
@@ -349,6 +357,7 @@ const STACKS_CHAIN_STATE_SQL : &'static [&'static str]= &[
|
||||
|
||||
-- internal use
|
||||
stacks_block_height INTEGER NOT NULL,
|
||||
index_block_hash TEXT NOT NULL, -- NOTE: can't enforce UNIQUE here, because there will be multiple entries per block
|
||||
vtxindex INT NOT NULL -- user burn support vtxindex
|
||||
);
|
||||
"#,
|
||||
@@ -656,6 +665,7 @@ impl StacksChainState {
|
||||
index_root: first_root_hash,
|
||||
block_height: 0,
|
||||
burn_header_hash: FIRST_BURNCHAIN_BLOCK_HASH.clone(),
|
||||
burn_header_timestamp: FIRST_BURNCHAIN_BLOCK_TIMESTAMP
|
||||
};
|
||||
|
||||
StacksChainState::insert_stacks_block_header(&mut headers_tx, &first_tip_info)?;
|
||||
@@ -879,6 +889,7 @@ impl StacksChainState {
|
||||
parent_burn_block: &BurnchainHeaderHash,
|
||||
new_tip: &StacksBlockHeader,
|
||||
new_burn_block: &BurnchainHeaderHash,
|
||||
new_burn_block_timestamp: u64,
|
||||
microblock_tail_opt: Option<StacksMicroblockHeader>,
|
||||
block_reward: &MinerPaymentSchedule,
|
||||
user_burns: &Vec<StagingUserBurnSupport>) -> Result<StacksHeaderInfo, Error>
|
||||
@@ -890,14 +901,7 @@ impl StacksChainState {
|
||||
new_tip.total_work.work);
|
||||
}
|
||||
|
||||
let parent_hash =
|
||||
if parent_tip.is_genesis() {
|
||||
TrieFileStorage::block_sentinel()
|
||||
}
|
||||
else {
|
||||
parent_tip.index_block_hash(parent_burn_block)
|
||||
};
|
||||
|
||||
let parent_hash = StacksChainState::get_index_hash(parent_burn_block, parent_tip);
|
||||
let indexed_keys = vec![
|
||||
format!("chainstate::pubkey_hash::{}", new_tip.microblock_pubkey_hash)
|
||||
];
|
||||
@@ -919,7 +923,8 @@ impl StacksChainState {
|
||||
microblock_tail: microblock_tail_opt,
|
||||
index_root: root_hash,
|
||||
block_height: new_tip.total_work.work,
|
||||
burn_header_hash: new_burn_block.clone()
|
||||
burn_header_hash: new_burn_block.clone(),
|
||||
burn_header_timestamp: new_burn_block_timestamp
|
||||
};
|
||||
|
||||
StacksChainState::insert_stacks_block_header(headers_tx, &new_tip_info)?;
|
||||
|
||||
@@ -75,13 +75,14 @@ impl StacksBlockBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn first(miner_id: usize, genesis_burn_header_hash: &BurnchainHeaderHash, proof: &VRFProof, microblock_privkey: &StacksPrivateKey) -> StacksBlockBuilder {
|
||||
pub fn first(miner_id: usize, genesis_burn_header_hash: &BurnchainHeaderHash, genesis_burn_header_timestamp: u64, proof: &VRFProof, microblock_privkey: &StacksPrivateKey) -> StacksBlockBuilder {
|
||||
let genesis_chain_tip = StacksHeaderInfo {
|
||||
anchored_header: StacksBlockHeader::genesis(),
|
||||
microblock_tail: None,
|
||||
block_height: 0,
|
||||
index_root: TrieHash([0u8; 32]),
|
||||
burn_header_hash: genesis_burn_header_hash.clone()
|
||||
burn_header_hash: genesis_burn_header_hash.clone(),
|
||||
burn_header_timestamp: genesis_burn_header_timestamp
|
||||
};
|
||||
|
||||
let mut builder = StacksBlockBuilder::from_parent(miner_id, &genesis_chain_tip, &StacksWorkScore::initial(), proof, microblock_privkey);
|
||||
@@ -722,7 +723,7 @@ pub mod test {
|
||||
let (builder, parent_block_snapshot_opt) = match parent_stacks_block {
|
||||
None => {
|
||||
// first stacks block
|
||||
let builder = StacksBlockBuilder::first(miner.id, &burn_block.parent_snapshot.burn_header_hash, &proof, &miner.next_microblock_privkey());
|
||||
let builder = StacksBlockBuilder::first(miner.id, &burn_block.parent_snapshot.burn_header_hash, burn_block.parent_snapshot.burn_header_timestamp, &proof, &miner.next_microblock_privkey());
|
||||
(builder, None)
|
||||
},
|
||||
Some(parent_stacks_block) => {
|
||||
@@ -791,7 +792,7 @@ pub mod test {
|
||||
|
||||
// "discover" this stacks block
|
||||
test_debug!("\n\nPreprocess Stacks block {}/{}", &commit_snapshot.burn_header_hash, &block_hash);
|
||||
let block_res = node.chainstate.preprocess_anchored_block(&mut tx, &commit_snapshot.burn_header_hash, &stacks_block, &parent_block_burn_header_hash).unwrap();
|
||||
let block_res = node.chainstate.preprocess_anchored_block(&mut tx, &commit_snapshot.burn_header_hash, commit_snapshot.burn_header_timestamp, &stacks_block, &parent_block_burn_header_hash).unwrap();
|
||||
|
||||
// "discover" this stacks microblock stream
|
||||
for mblock in stacks_microblocks.iter() {
|
||||
|
||||
@@ -24,13 +24,13 @@ use chainstate::burn::BlockHeaderHash;
|
||||
use util::log;
|
||||
|
||||
// fork set identifier -- to be mixed with the consensus hash (encodes the version)
|
||||
pub const SYSTEM_FORK_SET_VERSION : [u8; 4] = [22u8, 0u8, 0u8, 0u8];
|
||||
pub const SYSTEM_FORK_SET_VERSION : [u8; 4] = [23u8, 0u8, 0u8, 0u8];
|
||||
|
||||
// p2p network version
|
||||
pub const PEER_VERSION : u32 = 0x16000000; // 22.0.0.0
|
||||
pub const PEER_VERSION : u32 = 0x17000000; // 23.0.0.0
|
||||
|
||||
// network identifiers
|
||||
pub const NETWORK_ID_MAINNET : u32 = 0x16000000;
|
||||
pub const NETWORK_ID_MAINNET : u32 = 0x17000000;
|
||||
pub const NETWORK_ID_TESTNET : u32 = 0xff000000;
|
||||
|
||||
// default port
|
||||
@@ -38,6 +38,8 @@ pub const NETWORK_P2P_PORT : u16 = 6265;
|
||||
|
||||
// first burnchain block hash
|
||||
pub const FIRST_BURNCHAIN_BLOCK_HASH : BurnchainHeaderHash = BurnchainHeaderHash([0u8; 32]);
|
||||
pub const FIRST_BURNCHAIN_BLOCK_TIMESTAMP : u64 = 0;
|
||||
|
||||
pub const FIRST_BURNCHAIN_BLOCK_HASH_TESTNET : BurnchainHeaderHash = BurnchainHeaderHash([1u8; 32]);
|
||||
pub const FIRST_BURNCHAIN_BLOCK_HASH_REGTEST : BurnchainHeaderHash = BurnchainHeaderHash([2u8; 32]);
|
||||
|
||||
@@ -47,7 +49,7 @@ pub const EMPTY_MICROBLOCK_PARENT_HASH : BlockHeaderHash = BlockHeaderHash([0u8;
|
||||
pub const BOOT_BLOCK_HASH : BlockHeaderHash = BlockHeaderHash([0xff; 32]);
|
||||
pub const BURNCHAIN_BOOT_BLOCK_HASH : BurnchainHeaderHash = BurnchainHeaderHash([0xff; 32]);
|
||||
|
||||
pub const CHAINSTATE_VERSION: &'static str = "22.0.0.0";
|
||||
pub const CHAINSTATE_VERSION: &'static str = "23.0.0.0";
|
||||
|
||||
/// Synchronize burn transactions from the Bitcoin blockchain
|
||||
pub fn sync_burnchain_bitcoin(working_dir: &String, network_name: &String) -> Result<u64, burnchain_error> {
|
||||
|
||||
@@ -94,8 +94,8 @@ impl FromStr for PeerHost {
|
||||
Ok(socketaddr) => Ok(PeerHost::IP(PeerAddress::from_socketaddr(&socketaddr), socketaddr.port())),
|
||||
Err(_) => {
|
||||
// try as DNS-name:port
|
||||
let mut host = None;
|
||||
let mut port = None;
|
||||
let host;
|
||||
let port;
|
||||
let parts : Vec<&str> = header.split(":").collect();
|
||||
if parts.len() == 0 {
|
||||
return Err(net_error::DeserializeError("Failed to parse PeerHost: no parts".to_string()));
|
||||
|
||||
@@ -1376,7 +1376,7 @@ mod test {
|
||||
block_hash: BurnchainHeaderHash(block_hash_bytes),
|
||||
parent_block_hash: BurnchainHeaderHash(prev_block_hash_bytes),
|
||||
txs: vec![],
|
||||
// timestamp: get_epoch_time_secs()
|
||||
timestamp: get_epoch_time_secs()
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1835,38 +1835,78 @@ mod test {
|
||||
// next, make peer 1 discover peer 2's neighbors and peer 2's in/out degree.
|
||||
// Do two full walks
|
||||
let mut i = 0;
|
||||
let mut walk_1_count = 0;
|
||||
let mut walk_2_count = 0;
|
||||
while walk_1_count < 20 && walk_2_count < 20 {
|
||||
let _ = peer_1.step();
|
||||
let _ = peer_2.step();
|
||||
let mut did_connect = false;
|
||||
while !did_connect {
|
||||
let mut walk_1_count = 0;
|
||||
let mut walk_2_count = 0;
|
||||
while walk_1_count < 20 && walk_2_count < 20 {
|
||||
let _ = peer_1.step();
|
||||
let _ = peer_2.step();
|
||||
|
||||
for j in 0..10 {
|
||||
let _ = peer_2_neighbors[j].step();
|
||||
for j in 0..10 {
|
||||
let _ = peer_2_neighbors[j].step();
|
||||
}
|
||||
|
||||
walk_1_count = peer_1.network.walk_total_step_count;
|
||||
walk_2_count = peer_2.network.walk_total_step_count;
|
||||
|
||||
test_debug!("peer 1 took {} walk steps; peer 2 took {} walk steps", walk_1_count, walk_2_count);
|
||||
|
||||
match peer_1.network.walk {
|
||||
Some(ref w) => {
|
||||
assert_eq!(w.result.broken_connections.len(), 0);
|
||||
assert_eq!(w.result.replaced_neighbors.len(), 0);
|
||||
}
|
||||
None => {}
|
||||
};
|
||||
|
||||
match peer_2.network.walk {
|
||||
Some(ref w) => {
|
||||
assert_eq!(w.result.broken_connections.len(), 0);
|
||||
assert_eq!(w.result.replaced_neighbors.len(), 0);
|
||||
}
|
||||
None => {}
|
||||
};
|
||||
|
||||
i += 1;
|
||||
}
|
||||
|
||||
// peer 1 must have handshaked with all of peer 2's neighbors if this test will pass
|
||||
let peer_1_dbconn = peer_1.get_peerdb_conn();
|
||||
let mut num_handshakes = 0;
|
||||
for peer in &peer_2_neighbors {
|
||||
let n = peer.to_neighbor();
|
||||
let p_opt = PeerDB::get_peer(peer_1_dbconn, n.addr.network_id, &n.addr.addrbytes, n.addr.port).unwrap();
|
||||
match p_opt {
|
||||
None => {
|
||||
test_debug!("no such peer: {:?}", &n.addr);
|
||||
},
|
||||
Some(p) => {
|
||||
assert_eq!(p.public_key, n.public_key);
|
||||
assert_eq!(p.expire_block, n.expire_block);
|
||||
num_handshakes += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if num_handshakes < 10 {
|
||||
continue;
|
||||
}
|
||||
|
||||
walk_1_count = peer_1.network.walk_total_step_count;
|
||||
walk_2_count = peer_2.network.walk_total_step_count;
|
||||
|
||||
test_debug!("peer 1 took {} walk steps; peer 2 took {} walk steps", walk_1_count, walk_2_count);
|
||||
|
||||
match peer_1.network.walk {
|
||||
Some(ref w) => {
|
||||
assert_eq!(w.result.broken_connections.len(), 0);
|
||||
assert_eq!(w.result.replaced_neighbors.len(), 0);
|
||||
// peer 1 learned that peer 2 has an out-degree of 10 (10 neighbors) and an in-degree of 1 if this test will pass
|
||||
let n2 = peer_2.to_neighbor();
|
||||
let p2_opt = PeerDB::get_peer(peer_1_dbconn, n2.addr.network_id, &n2.addr.addrbytes, n2.addr.port).unwrap();
|
||||
match p2_opt {
|
||||
None => {
|
||||
test_debug!("no peer 2");
|
||||
},
|
||||
Some(p2) => {
|
||||
if p2.out_degree >= 11 && p2.in_degree >= 1 {
|
||||
assert_eq!(p2.out_degree, 11);
|
||||
did_connect = true;
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
};
|
||||
|
||||
match peer_2.network.walk {
|
||||
Some(ref w) => {
|
||||
assert_eq!(w.result.broken_connections.len(), 0);
|
||||
assert_eq!(w.result.replaced_neighbors.len(), 0);
|
||||
}
|
||||
None => {}
|
||||
};
|
||||
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
info!("Completed walk round {} step(s)", i);
|
||||
@@ -1879,37 +1919,6 @@ mod test {
|
||||
assert!(stats_1.last_recv_time > 0);
|
||||
assert!(stats_1.bytes_rx > 0);
|
||||
assert!(stats_1.bytes_tx > 0);
|
||||
|
||||
// peer 1 handshaked with all of peer 2's neighbors
|
||||
let peer_1_dbconn = peer_1.get_peerdb_conn();
|
||||
for peer in &peer_2_neighbors {
|
||||
let n = peer.to_neighbor();
|
||||
let p_opt = PeerDB::get_peer(peer_1_dbconn, n.addr.network_id, &n.addr.addrbytes, n.addr.port).unwrap();
|
||||
match p_opt {
|
||||
None => {
|
||||
test_debug!("no such peer: {:?}", &n.addr);
|
||||
assert!(false);
|
||||
},
|
||||
Some(p) => {
|
||||
assert_eq!(p.public_key, n.public_key);
|
||||
assert_eq!(p.expire_block, n.expire_block);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// peer 1 learned that peer 2 has an out-degree of 10 (10 neighbors) and an in-degree of 1
|
||||
let n2 = peer_2.to_neighbor();
|
||||
let p2_opt = PeerDB::get_peer(peer_1_dbconn, n2.addr.network_id, &n2.addr.addrbytes, n2.addr.port).unwrap();
|
||||
match p2_opt {
|
||||
None => {
|
||||
test_debug!("no peer 2");
|
||||
assert!(false);
|
||||
},
|
||||
Some(p2) => {
|
||||
assert_eq!(p2.out_degree, 11);
|
||||
assert_eq!(p2.in_degree, 1); // just peer 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1946,39 +1955,94 @@ mod test {
|
||||
// next, make peer 1 discover peer 2's neighbors and peer 2's in/out degree.
|
||||
// Do two full walks
|
||||
let mut i = 0;
|
||||
let mut walk_1_count = 0;
|
||||
let mut walk_2_count = 0;
|
||||
while walk_1_count < 20 && walk_2_count < 20 {
|
||||
let _ = peer_1.step();
|
||||
let _ = peer_2.step();
|
||||
|
||||
for j in 0..10 {
|
||||
let _ = peer_2_neighbors[j].step();
|
||||
let mut did_handshakes = false;
|
||||
while !did_handshakes {
|
||||
let mut walk_1_count = 0;
|
||||
let mut walk_2_count = 0;
|
||||
while walk_1_count < 20 && walk_2_count < 20 {
|
||||
let _ = peer_1.step();
|
||||
let _ = peer_2.step();
|
||||
|
||||
for j in 0..10 {
|
||||
let _ = peer_2_neighbors[j].step();
|
||||
}
|
||||
|
||||
walk_1_count = peer_1.network.walk_total_step_count;
|
||||
walk_2_count = peer_2.network.walk_total_step_count;
|
||||
|
||||
test_debug!("peer 1 took {} walk steps; peer 2 took {} walk steps", walk_1_count, walk_2_count);
|
||||
|
||||
match peer_1.network.walk {
|
||||
Some(ref w) => {
|
||||
assert_eq!(w.result.broken_connections.len(), 0);
|
||||
assert_eq!(w.result.replaced_neighbors.len(), 0);
|
||||
}
|
||||
None => {}
|
||||
};
|
||||
|
||||
match peer_2.network.walk {
|
||||
Some(ref w) => {
|
||||
assert_eq!(w.result.broken_connections.len(), 0);
|
||||
assert_eq!(w.result.replaced_neighbors.len(), 0);
|
||||
}
|
||||
None => {}
|
||||
};
|
||||
|
||||
i += 1;
|
||||
}
|
||||
|
||||
peer_1.dump_frontier();
|
||||
peer_2.dump_frontier();
|
||||
|
||||
// check if peer 1 handshaked with all of peer 2's _fresh_ neighbors
|
||||
let peer_1_dbconn = peer_1.get_peerdb_conn();
|
||||
let mut num_contacted = 0; // should be 5 when test finishes
|
||||
for i in 0..5 {
|
||||
let peer = &peer_2_neighbors[i];
|
||||
let n = peer.to_neighbor();
|
||||
let p_opt = PeerDB::get_peer(peer_1_dbconn, n.addr.network_id, &n.addr.addrbytes, n.addr.port).unwrap();
|
||||
match p_opt {
|
||||
None => {
|
||||
test_debug!("no such peer: {:?}", &n.addr);
|
||||
},
|
||||
Some(p) => {
|
||||
assert_eq!(p.public_key, n.public_key);
|
||||
assert_eq!(p.expire_block, n.expire_block);
|
||||
num_contacted += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let stale_peer = &peer_2_neighbors[i+5];
|
||||
let stale_n = stale_peer.to_neighbor();
|
||||
let stale_peer_opt = PeerDB::get_peer(peer_1_dbconn, stale_n.addr.network_id, &stale_n.addr.addrbytes, stale_n.addr.port).unwrap();
|
||||
match stale_peer_opt {
|
||||
None => {},
|
||||
Some(_) => {
|
||||
test_debug!("stale peer contacted: {:?}", &stale_n.addr);
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if num_contacted < 5 {
|
||||
continue;
|
||||
}
|
||||
|
||||
walk_1_count = peer_1.network.walk_total_step_count;
|
||||
walk_2_count = peer_2.network.walk_total_step_count;
|
||||
|
||||
test_debug!("peer 1 took {} walk steps; peer 2 took {} walk steps", walk_1_count, walk_2_count);
|
||||
|
||||
match peer_1.network.walk {
|
||||
Some(ref w) => {
|
||||
assert_eq!(w.result.broken_connections.len(), 0);
|
||||
assert_eq!(w.result.replaced_neighbors.len(), 0);
|
||||
// peer 1 learned that peer 2 has an out-degree of 6 (peer_1 + 5 fresh neighbors) and an in-degree of 1
|
||||
let n2 = peer_2.to_neighbor();
|
||||
let p2_opt = PeerDB::get_peer(peer_1_dbconn, n2.addr.network_id, &n2.addr.addrbytes, n2.addr.port).unwrap();
|
||||
match p2_opt {
|
||||
None => {
|
||||
test_debug!("no peer 2");
|
||||
},
|
||||
Some(p2) => {
|
||||
if p2.out_degree >= 6 && p2.in_degree >= 1 {
|
||||
assert_eq!(p2.out_degree, 6);
|
||||
did_handshakes = true;
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
};
|
||||
|
||||
match peer_2.network.walk {
|
||||
Some(ref w) => {
|
||||
assert_eq!(w.result.broken_connections.len(), 0);
|
||||
assert_eq!(w.result.replaced_neighbors.len(), 0);
|
||||
}
|
||||
None => {}
|
||||
};
|
||||
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Completed walk round {} step(s)", i);
|
||||
|
||||
@@ -1990,49 +2054,6 @@ mod test {
|
||||
assert!(stats_1.last_recv_time > 0);
|
||||
assert!(stats_1.bytes_rx > 0);
|
||||
assert!(stats_1.bytes_tx > 0);
|
||||
|
||||
// peer 1 handshaked with all of peer 2's _fresh_ neighbors
|
||||
let peer_1_dbconn = peer_1.get_peerdb_conn();
|
||||
for i in 0..5 {
|
||||
let peer = &peer_2_neighbors[i];
|
||||
let n = peer.to_neighbor();
|
||||
let p_opt = PeerDB::get_peer(peer_1_dbconn, n.addr.network_id, &n.addr.addrbytes, n.addr.port).unwrap();
|
||||
match p_opt {
|
||||
None => {
|
||||
test_debug!("no such peer: {:?}", &n.addr);
|
||||
assert!(false);
|
||||
},
|
||||
Some(p) => {
|
||||
assert_eq!(p.public_key, n.public_key);
|
||||
assert_eq!(p.expire_block, n.expire_block);
|
||||
}
|
||||
}
|
||||
|
||||
let stale_peer = &peer_2_neighbors[i+5];
|
||||
let stale_n = stale_peer.to_neighbor();
|
||||
let stale_peer_opt = PeerDB::get_peer(peer_1_dbconn, stale_n.addr.network_id, &stale_n.addr.addrbytes, stale_n.addr.port).unwrap();
|
||||
match stale_peer_opt {
|
||||
None => {},
|
||||
Some(_) => {
|
||||
test_debug!("stale peer contacted: {:?}", &stale_n.addr);
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// peer 1 learned that peer 2 has an out-degree of 5 (5 fresh neighbors) and an in-degree of 1
|
||||
let n2 = peer_2.to_neighbor();
|
||||
let p2_opt = PeerDB::get_peer(peer_1_dbconn, n2.addr.network_id, &n2.addr.addrbytes, n2.addr.port).unwrap();
|
||||
match p2_opt {
|
||||
None => {
|
||||
test_debug!("no peer 2");
|
||||
assert!(false);
|
||||
},
|
||||
Some(p2) => {
|
||||
assert_eq!(p2.out_degree, 6);
|
||||
assert_eq!(p2.in_degree, 1); // just peer 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1453,18 +1453,20 @@ mod test {
|
||||
|
||||
let mut h = p2p.new_handle();
|
||||
|
||||
use std::net::TcpListener;
|
||||
let listener = TcpListener::bind("127.0.0.1:2100").unwrap();
|
||||
|
||||
// start fake endpoint, which will accept once and wait 5 seconds
|
||||
let endpoint_thread = thread::spawn(move || {
|
||||
use std::net::TcpListener;
|
||||
let listener = TcpListener::bind("127.0.0.1:2100").unwrap();
|
||||
let (sock, addr) = listener.accept().unwrap();
|
||||
test_debug!("Accepted {:?}", &addr);
|
||||
thread::sleep(time::Duration::from_millis(5000));
|
||||
});
|
||||
|
||||
p2p.bind(&"127.0.0.1:2000".parse().unwrap()).unwrap();
|
||||
|
||||
// start dispatcher
|
||||
let p2p_thread = thread::spawn(move || {
|
||||
p2p.bind(&"127.0.0.1:2000".parse().unwrap()).unwrap();
|
||||
for i in 0..3 {
|
||||
test_debug!("dispatch batch {}", i);
|
||||
let dispatch_count = p2p.dispatch_requests();
|
||||
|
||||
@@ -11,6 +11,7 @@ use chainstate::burn::db::burndb::{BurnDB};
|
||||
use chainstate::burn::{BlockSnapshot};
|
||||
use chainstate::burn::operations::{BlockstackOperationType};
|
||||
use util::hash::Sha256Sum;
|
||||
use util::get_epoch_time_secs;
|
||||
|
||||
/// BurnchainSimulator is simulating a simplistic burnchain.
|
||||
pub struct BurnchainSimulator {
|
||||
@@ -62,7 +63,7 @@ impl BurnchainSimulator {
|
||||
}
|
||||
|
||||
pub fn make_genesis_block(&mut self) -> BurnchainState {
|
||||
let db = match BurnDB::connect(&self.config.burnchain_path, 0, &BurnchainHeaderHash([0u8; 32]), true) {
|
||||
let db = match BurnDB::connect(&self.config.burnchain_path, 0, &BurnchainHeaderHash([0u8; 32]), get_epoch_time_secs(), true) {
|
||||
Ok(db) => db,
|
||||
Err(_) => panic!("Error while connecting to burnchain db")
|
||||
};
|
||||
@@ -146,7 +147,8 @@ impl BurnchainSimulator {
|
||||
current_block.block_height + 1,
|
||||
&BurnchainHeaderHash::from_bytes(next_hash.as_bytes()).unwrap(),
|
||||
¤t_block.burn_header_hash,
|
||||
&vec![]));
|
||||
&vec![],
|
||||
get_epoch_time_secs()));
|
||||
block.header(¤t_block)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ use chainstate::burn::{ConsensusHash, SortitionHash, BlockSnapshot, VRFSeed, Blo
|
||||
use net::StacksMessageType;
|
||||
use util::hash::Sha256Sum;
|
||||
use util::vrf::{VRFProof, VRFPublicKey};
|
||||
use util::get_epoch_time_secs;
|
||||
|
||||
pub const TESTNET_CHAIN_ID: u32 = 0x00000000;
|
||||
|
||||
@@ -283,8 +284,9 @@ impl Node {
|
||||
|
||||
// Preprocess the anchored block
|
||||
self.chain_state.preprocess_anchored_block(
|
||||
&mut tx,
|
||||
&mut tx,
|
||||
&burn_header_hash,
|
||||
get_epoch_time_secs(),
|
||||
&anchored_block,
|
||||
&parent_burn_header_hash).unwrap();
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ impl <'a> LeaderTenure {
|
||||
};
|
||||
|
||||
let block_builder = match last_sortitioned_block.block_height {
|
||||
1 => StacksBlockBuilder::first(1, &parent_block.burn_header_hash, &vrf_proof, µblock_secret_key),
|
||||
1 => StacksBlockBuilder::first(1, &parent_block.burn_header_hash, parent_block.burn_header_timestamp, &vrf_proof, µblock_secret_key),
|
||||
_ => StacksBlockBuilder::from_parent(1, &parent_block, &ratio, &vrf_proof, µblock_secret_key)
|
||||
};
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ use vm::contracts::Contract;
|
||||
use vm::errors::{Error, InterpreterError, RuntimeErrorType, CheckErrors, InterpreterResult as Result, IncomparableError};
|
||||
use vm::types::{Value, OptionalData, TypeSignature, TupleTypeSignature, PrincipalData, StandardPrincipalData, QualifiedContractIdentifier, NONE};
|
||||
|
||||
use chainstate::stacks::db::StacksHeaderInfo;
|
||||
use chainstate::stacks::db::{StacksHeaderInfo, MinerPaymentSchedule};
|
||||
use chainstate::burn::{VRFSeed, BlockHeaderHash};
|
||||
use burnchains::BurnchainHeaderHash;
|
||||
|
||||
@@ -62,6 +62,14 @@ fn get_stacks_header_info(conn: &DBConn, id_bhh: &BlockHeaderHash) -> Option<Sta
|
||||
.expect("Unexpected SQL failure querying block header table")
|
||||
}
|
||||
|
||||
fn get_miner_info(conn: &DBConn, id_bhh: &BlockHeaderHash) -> Option<MinerPaymentSchedule> {
|
||||
conn.query_row("SELECT * FROM payments WHERE index_block_hash = ? AND miner = 1",
|
||||
[id_bhh].iter(),
|
||||
|x| MinerPaymentSchedule::from_row(x).expect("Bad payment info in database"))
|
||||
.optional()
|
||||
.expect("Unexpected SQL failure querying payment table")
|
||||
}
|
||||
|
||||
impl HeadersDB for DBConn {
|
||||
fn get_stacks_block_header_hash_for_block(&self, id_bhh: &BlockHeaderHash) -> Option<BlockHeaderHash> {
|
||||
get_stacks_header_info(self, id_bhh)
|
||||
@@ -73,8 +81,9 @@ impl HeadersDB for DBConn {
|
||||
.map(|x| x.burn_header_hash)
|
||||
}
|
||||
|
||||
fn get_burn_block_time_for_block(&self, _id_bhh: &BlockHeaderHash) -> Option<u64> {
|
||||
panic!("Block time data not available in burn header db")
|
||||
fn get_burn_block_time_for_block(&self, id_bhh: &BlockHeaderHash) -> Option<u64> {
|
||||
get_stacks_header_info(self, id_bhh)
|
||||
.map(|x| x.burn_header_timestamp)
|
||||
}
|
||||
|
||||
fn get_vrf_seed_for_block(&self, id_bhh: &BlockHeaderHash) -> Option<VRFSeed> {
|
||||
@@ -82,8 +91,9 @@ impl HeadersDB for DBConn {
|
||||
.map(|x| VRFSeed::from_proof(&x.anchored_header.proof))
|
||||
}
|
||||
|
||||
fn get_miner_address(&self, _id_bhh: &BlockHeaderHash) -> Option<StacksAddress> {
|
||||
panic!("Miner address data not available in burn header db")
|
||||
fn get_miner_address(&self, id_bhh: &BlockHeaderHash) -> Option<StacksAddress> {
|
||||
get_miner_info(self, id_bhh)
|
||||
.map(|x| x.address)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -72,7 +72,9 @@ const GET_INFO_CONTRACT: &'static str = "
|
||||
((stacks-hash (buff 32))
|
||||
(id-hash (buff 32))
|
||||
(btc-hash (buff 32))
|
||||
(vrf-seed (buff 32))))
|
||||
(vrf-seed (buff 32))
|
||||
(burn-block-time uint)
|
||||
(stacks-miner principal)))
|
||||
(define-private (test-1) (get-block-info? time u1))
|
||||
(define-private (test-2) (get-block-info? time block-height))
|
||||
(define-private (test-3) (get-block-info? time u100000))
|
||||
@@ -80,6 +82,9 @@ const GET_INFO_CONTRACT: &'static str = "
|
||||
(define-private (test-5) (get-block-info? header-hash (- block-height u1)))
|
||||
(define-private (test-6) (get-block-info? burnchain-header-hash u1))
|
||||
(define-private (test-7) (get-block-info? vrf-seed u1))
|
||||
(define-private (test-8) (get-block-info? miner-address u1))
|
||||
(define-private (test-9) (get-block-info? miner-address block-height))
|
||||
(define-private (test-10) (get-block-info? miner-address u100000))
|
||||
|
||||
(define-private (get-block-id-hash (height uint)) (unwrap-panic
|
||||
(get id-hash (map-get? block-data ((height height))))))
|
||||
@@ -105,14 +110,22 @@ const GET_INFO_CONTRACT: &'static str = "
|
||||
(print (get vrf-seed block-info)))
|
||||
(is-eq (print (unwrap-panic (at-block block-to-check (get-block-info? burnchain-header-hash (- block-height u1)))))
|
||||
(print (unwrap-panic (get-block-info? burnchain-header-hash (- height u1))))
|
||||
(print (get btc-hash block-info))))))
|
||||
(print (get btc-hash block-info)))
|
||||
(is-eq (print (unwrap-panic (at-block block-to-check (get-block-info? time (- block-height u1)))))
|
||||
(print (unwrap-panic (get-block-info? time (- height u1))))
|
||||
(print (get burn-block-time block-info)))
|
||||
(is-eq (print (unwrap-panic (at-block block-to-check (get-block-info? miner-address (- block-height u1)))))
|
||||
(print (unwrap-panic (get-block-info? miner-address (- height u1))))
|
||||
(print (get stacks-miner block-info))))))
|
||||
|
||||
(define-private (inner-update-info (height uint))
|
||||
(let ((value (tuple
|
||||
(stacks-hash (unwrap-panic (get-block-info? header-hash height)))
|
||||
(id-hash (unwrap-panic (get-block-info? id-header-hash height)))
|
||||
(btc-hash (unwrap-panic (get-block-info? burnchain-header-hash height)))
|
||||
(vrf-seed (unwrap-panic (get-block-info? vrf-seed height))))))
|
||||
(vrf-seed (unwrap-panic (get-block-info? vrf-seed height)))
|
||||
(burn-block-time (unwrap-panic (get-block-info? time height)))
|
||||
(stacks-miner (unwrap-panic (get-block-info? miner-address height))))))
|
||||
(ok (map-set block-data ((height height)) value))))
|
||||
|
||||
(define-public (update-info)
|
||||
@@ -176,11 +189,44 @@ fn integration_test_get_info() {
|
||||
eprintln!("Current Block: {} Parent Block: {}", bhh, parent);
|
||||
let parent_val = Value::buff_from(parent.as_bytes().to_vec()).unwrap();
|
||||
|
||||
// find header metadata
|
||||
let mut headers = vec![];
|
||||
for block in blocks.iter() {
|
||||
let header = StacksChainState::get_anchored_block_header_info(&chain_state.headers_db, &block.0, &block.1).unwrap().unwrap();
|
||||
headers.push(header);
|
||||
}
|
||||
|
||||
let tip_header_info = headers.last().unwrap();
|
||||
|
||||
// find miner metadata
|
||||
let mut miners = vec![];
|
||||
for block in blocks.iter() {
|
||||
let miner = StacksChainState::get_miner_info(&chain_state.headers_db, &block.0, &block.1).unwrap().unwrap();
|
||||
miners.push(miner);
|
||||
}
|
||||
|
||||
let tip_miner = miners.last().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
chain_state.clarity_eval_read_only(
|
||||
bhh, &contract_identifier, "block-height"),
|
||||
Value::UInt(2));
|
||||
|
||||
assert_eq!(
|
||||
chain_state.clarity_eval_read_only(
|
||||
bhh, &contract_identifier, "(test-1)"),
|
||||
Value::some(Value::UInt(headers[0].burn_header_timestamp as u128)));
|
||||
|
||||
assert_eq!(
|
||||
chain_state.clarity_eval_read_only(
|
||||
bhh, &contract_identifier, "(test-2)"),
|
||||
Value::none());
|
||||
|
||||
assert_eq!(
|
||||
chain_state.clarity_eval_read_only(
|
||||
bhh, &contract_identifier, "(test-3)"),
|
||||
Value::none());
|
||||
|
||||
assert_eq!(
|
||||
chain_state.clarity_eval_read_only(
|
||||
bhh, &contract_identifier, "(test-4 u1)"),
|
||||
@@ -209,6 +255,23 @@ fn integration_test_get_info() {
|
||||
chain_state.clarity_eval_read_only(
|
||||
bhh, &contract_identifier, "(test-7)"),
|
||||
Value::some(Value::buff_from(last_vrf_seed).unwrap()));
|
||||
|
||||
// verify that we can get the block miner
|
||||
assert_eq!(
|
||||
chain_state.clarity_eval_read_only(
|
||||
bhh, &contract_identifier, "(test-8)"),
|
||||
Value::some(Value::Principal(miners[0].address.to_account_principal())));
|
||||
|
||||
assert_eq!(
|
||||
chain_state.clarity_eval_read_only(
|
||||
bhh, &contract_identifier, "(test-9)"),
|
||||
Value::none());
|
||||
|
||||
assert_eq!(
|
||||
chain_state.clarity_eval_read_only(
|
||||
bhh, &contract_identifier, "(test-10)"),
|
||||
Value::none());
|
||||
|
||||
},
|
||||
3 => {
|
||||
assert_eq!(Value::Bool(true), chain_state.clarity_eval_read_only(
|
||||
|
||||
Reference in New Issue
Block a user