From 74cc4169524696a4246f7a05ade0ff76b97f27f1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 00:11:32 -0400 Subject: [PATCH 01/20] chore: convert immunefi bug report into test vectors and update the indexer's find_bitcoin_reorg() method to check the original and reorg chain's total work in order to decide whether to move forward with reorg processing. --- src/burnchains/bitcoin/indexer.rs | 435 ++++++++++++++++++++++++++++-- 1 file changed, 416 insertions(+), 19 deletions(-) diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index 1ffd8ed1b..07814ab5c 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -385,41 +385,111 @@ impl BitcoinIndexer { .and_then(|_r| Ok(spv_client.end_block_height.unwrap())) } + #[cfg(test)] + fn new_reorg_spv_client( + reorg_headers_path: &str, + start_block: u64, + end_block: Option, + network_id: BitcoinNetworkType, + ) -> Result { + SpvClient::new_without_migration( + &reorg_headers_path, + start_block, + end_block, + network_id, + true, + true, + ) + } + + #[cfg(not(test))] + fn new_reorg_spv_client( + reorg_headers_path: &str, + start_block: u64, + end_block: Option, + network_id: BitcoinNetworkType, + ) -> Result { + SpvClient::new( + &reorg_headers_path, + start_block, + end_block, + network_id, + true, + true, + ) + } + /// Create a SPV client for starting reorg processing fn setup_reorg_headers( &mut self, canonical_spv_client: &SpvClient, reorg_headers_path: &str, start_block: u64, + remove_old: bool, ) -> Result { - if PathBuf::from(&reorg_headers_path).exists() { - fs::remove_file(&reorg_headers_path).map_err(|e| { - error!("Failed to remove {}", reorg_headers_path); - btc_error::Io(e) - })?; + if remove_old { + if PathBuf::from(&reorg_headers_path).exists() { + fs::remove_file(&reorg_headers_path).map_err(|e| { + error!("Failed to remove {}", reorg_headers_path); + btc_error::Io(e) + })?; + } } // bootstrap reorg client - let mut reorg_spv_client = SpvClient::new( - &reorg_headers_path, + let mut reorg_spv_client = BitcoinIndexer::new_reorg_spv_client( + reorg_headers_path, start_block, Some(start_block + REORG_BATCH_SIZE), self.runtime.network_id, - true, - true, )?; + if start_block > 0 { - let start_header = canonical_spv_client - .read_block_header(start_block)? - .expect(&format!("BUG: missing block header for {}", start_block)); - reorg_spv_client.insert_block_headers_before(start_block - 1, vec![start_header])?; + if start_block > BLOCK_DIFFICULTY_CHUNK_SIZE { + if remove_old { + let interval_start_block = start_block / BLOCK_DIFFICULTY_CHUNK_SIZE - 2; + let base_block = interval_start_block * BLOCK_DIFFICULTY_CHUNK_SIZE; + let interval_headers = + canonical_spv_client.read_block_headers(base_block, start_block)?; + assert!( + interval_headers.len() == (start_block - base_block) as usize, + "BUG: missing headers for {}-{}", + base_block, + start_block + ); + + test_debug!( + "Copy headers {}-{}", + base_block, + base_block + interval_headers.len() as u64 + ); + reorg_spv_client + .insert_block_headers_before(base_block - 1, interval_headers)?; + + let last_interval = canonical_spv_client.find_highest_work_score_interval()?; + + // copy over the relevant difficulty intervals as well + for interval in interval_start_block..(last_interval + 1) { + test_debug!("Copy interval {} to {}", interval, &reorg_headers_path); + let work_score = canonical_spv_client + .find_interval_work(interval)? + .expect(&format!("FATAL: no work score for interval {}", interval)); + reorg_spv_client.store_interval_work(interval, work_score)?; + } + } + } else { + // no full difficulty intervals yet + let interval_headers = canonical_spv_client.read_block_headers(1, start_block)?; + + reorg_spv_client.insert_block_headers_before(0, interval_headers)?; + } } Ok(reorg_spv_client) } /// Search for a bitcoin reorg. Return the offset into the canonical bitcoin headers where - /// the reorg starts. Returns the hight of the highest common ancestor, and its block hash. + /// the reorg starts. Returns the hight of the highest common ancestor. /// Note that under certain testnet settings, the bitcoin chain itself can shrink. pub fn find_bitcoin_reorg( &mut self, @@ -454,7 +524,7 @@ impl BitcoinIndexer { // bootstrap reorg client let mut start_block = canonical_end_block.saturating_sub(REORG_BATCH_SIZE); let mut reorg_spv_client = - self.setup_reorg_headers(&orig_spv_client, reorg_headers_path, start_block)?; + self.setup_reorg_headers(&orig_spv_client, reorg_headers_path, start_block, true)?; let mut discontiguous_header_error_count = 0; while !found_common_ancestor { @@ -493,6 +563,7 @@ impl BitcoinIndexer { &orig_spv_client, reorg_headers_path, start_block, + false, )?; continue; } @@ -600,10 +671,27 @@ impl BitcoinIndexer { // try again start_block = start_block.saturating_sub(REORG_BATCH_SIZE); reorg_spv_client = - self.setup_reorg_headers(&orig_spv_client, reorg_headers_path, start_block)?; + self.setup_reorg_headers(&orig_spv_client, reorg_headers_path, start_block, false)?; } - debug!("Bitcoin headers history is consistent up to {}", new_tip); + let reorg_total_work = reorg_spv_client.update_chain_work()?; + let orig_total_work = orig_spv_client.get_chain_work()?; + + debug!("Bitcoin headers history is consistent up to {}. Orig chainwork: {}, reorg chainwork: {}", new_tip, orig_total_work, reorg_total_work); + if orig_total_work < reorg_total_work { + let reorg_tip = reorg_spv_client.get_headers_height()?; + let hdr_reorg = reorg_spv_client + .read_block_header(reorg_tip - 1)? + .expect("FATAL: no tip hash for existing chain tip"); + info!( + "New canonical Bitcoin chain found! New tip is {}", + &hdr_reorg.header.bitcoin_hash() + ); + } else { + // ignore the reorg + test_debug!("Reorg chain does not overtake original Bitcoin chain"); + new_tip = orig_spv_client.get_headers_height()?; + } let hdr_reorg = reorg_spv_client.read_block_header(new_tip)?; let hdr_canonical = orig_spv_client.read_block_header(new_tip)?; @@ -1224,8 +1312,8 @@ mod test { peer_port: port, rpc_port: port + 1, // ignored rpc_ssl: false, - username: None, - password: None, + username: Some("blockstack".to_string()), + password: Some("blockstacksystem".to_string()), timeout: 30, spv_headers_path: "/tmp/test_indexer_sync_headers.sqlite".to_string(), first_block: 0, @@ -1241,4 +1329,313 @@ mod test { let last_block = indexer.sync_headers(0, None).unwrap(); eprintln!("sync'ed to block {}", last_block); } + + #[test] + fn test_spv_check_work_reorg_ignored() { + if !env::var("BLOCKSTACK_SPV_HEADERS_DB").is_ok() { + eprintln!("Skipping test_spv_check_work_reorg_ignored -- no BLOCKSTACK_SPV_HEADERS_DB envar set"); + return; + } + let db_path_source = env::var("BLOCKSTACK_SPV_HEADERS_DB").unwrap(); + let db_path = "/tmp/test_spv_check_work_reorg_ignored.dat".to_string(); + let reorg_db_path = "/tmp/test_spv_check_work_ignored.dat.reorg".to_string(); + + if fs::metadata(&db_path).is_ok() { + fs::remove_file(&db_path).unwrap(); + } + + if fs::metadata(&reorg_db_path).is_ok() { + fs::remove_file(&reorg_db_path).unwrap(); + } + + fs::copy(&db_path_source, &db_path).unwrap(); + + { + // set up SPV client so we don't have chain work at first + let mut spv_client = SpvClient::new_without_migration( + &db_path, + 0, + None, + BitcoinNetworkType::Mainnet, + true, + false, + ) + .unwrap(); + + assert!( + spv_client.get_headers_height().unwrap() >= 40322, + "This test needs headers up to 40320" + ); + spv_client.drop_headers(40320).unwrap(); + } + + let mut spv_client = + SpvClient::new(&db_path, 0, None, BitcoinNetworkType::Mainnet, true, false).unwrap(); + + assert_eq!(spv_client.get_headers_height().unwrap(), 40321); + let total_work_before = spv_client.update_chain_work().unwrap(); + assert_eq!(total_work_before, spv_client.get_chain_work().unwrap()); + + let total_work_before_idempotent = spv_client.update_chain_work().unwrap(); + assert_eq!(total_work_before, total_work_before_idempotent); + + // fake block headers for mainnet 40319-40320, which is on a difficulty adjustment boundary + let bad_headers = vec![ + LoneBlockHeader { + header: BlockHeader { + version: 1, + prev_blockhash: Sha256dHash::from_hex( + "000000000683a474ef810000fd22f0edde4cf33ae76ae506b220e57aeeafeaa4", + ) + .unwrap(), + merkle_root: Sha256dHash::from_hex( + "b4d736ca74838036ebd19b085c3eeb9ffec2307f6452347cdd8ddaa249686f39", + ) + .unwrap(), + time: 1716199659, + bits: 486575299, + nonce: 201337507, + }, + tx_count: VarInt(0), + }, + LoneBlockHeader { + header: BlockHeader { + version: 1, + prev_blockhash: Sha256dHash::from_hex( + "000000006f403731d720174cd6875e331ac079b438cf53aa685f9cd068fd4ca8", + ) + .unwrap(), + merkle_root: Sha256dHash::from_hex( + "a86b3c149f204d4cb47c67bf9bfeea2719df101dd6e6fc3f0e60d86efeba22a8", + ) + .unwrap(), + time: 1716161259, + bits: 486604799, + nonce: 144574511, + }, + tx_count: VarInt(0), + }, + ]; + + let mut indexer = BitcoinIndexer::new( + BitcoinIndexerConfig::test_default(db_path.to_string()), + BitcoinIndexerRuntime::new(BitcoinNetworkType::Mainnet), + ); + + let mut inserted_bad_header = false; + + let new_tip = indexer + .find_bitcoin_reorg( + &db_path, + &reorg_db_path, + |ref mut indexer, ref mut reorg_spv_client, start_block, end_block_opt| { + let end_block = + end_block_opt.unwrap_or(start_block + BLOCK_DIFFICULTY_CHUNK_SIZE); + + let mut ret = vec![]; + for block_height in start_block..end_block { + if block_height > 40320 { + break; + } + if block_height >= 40319 && block_height <= 40320 { + test_debug!("insert bad header {}", block_height); + ret.push(bad_headers[(block_height - 40319) as usize].clone()); + inserted_bad_header = true; + } else { + let orig_spv_client = SpvClient::new_without_migration( + &db_path, + 0, + None, + BitcoinNetworkType::Mainnet, + true, + false, + ) + .unwrap(); + let hdr = orig_spv_client.read_block_header(block_height)?.unwrap(); + ret.push(hdr); + } + } + + test_debug!( + "add headers after {} (bad header: {})", + start_block, + inserted_bad_header + ); + reorg_spv_client + .insert_block_headers_after(start_block - 1, ret) + .unwrap(); + Ok(()) + }, + ) + .unwrap(); + + assert!(inserted_bad_header); + + // reorg is ignored + assert_eq!(new_tip, 40321); + let hdr = spv_client.read_block_header(new_tip - 1).unwrap().unwrap(); + eprintln!("{}", &hdr.header.bitcoin_hash()); + let total_work_after = spv_client.update_chain_work().unwrap(); + assert_eq!(total_work_after, total_work_before); + } + + #[test] + fn test_spv_check_work_reorg_accepted() { + if !env::var("BLOCKSTACK_SPV_HEADERS_DB").is_ok() { + eprintln!("Skipping test_spv_check_work_reorg_accepted -- no BLOCKSTACK_SPV_HEADERS_DB envar set"); + return; + } + let db_path_source = env::var("BLOCKSTACK_SPV_HEADERS_DB").unwrap(); + let db_path = "/tmp/test_spv_check_work_reorg_accepted.dat".to_string(); + let reorg_db_path = "/tmp/test_spv_check_work_reorg_accepted.dat.reorg".to_string(); + + if fs::metadata(&db_path).is_ok() { + fs::remove_file(&db_path).unwrap(); + } + + if fs::metadata(&reorg_db_path).is_ok() { + fs::remove_file(&reorg_db_path).unwrap(); + } + + fs::copy(&db_path_source, &db_path).unwrap(); + + // set up SPV client so we don't have chain work at first + let mut spv_client = SpvClient::new_without_migration( + &db_path, + 0, + None, + BitcoinNetworkType::Mainnet, + true, + false, + ) + .unwrap(); + + assert!( + spv_client.get_headers_height().unwrap() >= 40322, + "This test needs headers up to 40320" + ); + spv_client.drop_headers(40320).unwrap(); + + assert_eq!(spv_client.get_headers_height().unwrap(), 40321); + + // fake block headers for mainnet 40319-40320, which is on a difficulty adjustment boundary + let bad_headers = vec![ + LoneBlockHeader { + header: BlockHeader { + version: 1, + prev_blockhash: Sha256dHash::from_hex( + "000000000683a474ef810000fd22f0edde4cf33ae76ae506b220e57aeeafeaa4", + ) + .unwrap(), + merkle_root: Sha256dHash::from_hex( + "b4d736ca74838036ebd19b085c3eeb9ffec2307f6452347cdd8ddaa249686f39", + ) + .unwrap(), + time: 1716199659, + bits: 486575299, + nonce: 201337507, + }, + tx_count: VarInt(0), + }, + LoneBlockHeader { + header: BlockHeader { + version: 1, + prev_blockhash: Sha256dHash::from_hex( + "000000006f403731d720174cd6875e331ac079b438cf53aa685f9cd068fd4ca8", + ) + .unwrap(), + merkle_root: Sha256dHash::from_hex( + "a86b3c149f204d4cb47c67bf9bfeea2719df101dd6e6fc3f0e60d86efeba22a8", + ) + .unwrap(), + time: 1716161259, + bits: 486604799, + nonce: 144574511, + }, + tx_count: VarInt(0), + }, + ]; + + // get the canonical chain's headers for this range + let good_headers = spv_client.read_block_headers(40319, 40321).unwrap(); + assert_eq!(good_headers.len(), 2); + assert_eq!( + good_headers[0].header.prev_blockhash, + bad_headers[0].header.prev_blockhash + ); + assert!(good_headers[0].header != bad_headers[0].header); + assert!(good_headers[1].header != bad_headers[1].header); + + // put these bad headers into the "main" chain + spv_client + .insert_block_headers_after(40318, bad_headers.clone()) + .unwrap(); + + // *now* calculate main chain work + SpvClient::test_db_migrate(spv_client.conn_mut()).unwrap(); + let total_work_before = spv_client.update_chain_work().unwrap(); + assert_eq!(total_work_before, spv_client.get_chain_work().unwrap()); + + let total_work_before_idempotent = spv_client.update_chain_work().unwrap(); + assert_eq!(total_work_before, total_work_before_idempotent); + + let mut indexer = BitcoinIndexer::new( + BitcoinIndexerConfig::test_default(db_path.to_string()), + BitcoinIndexerRuntime::new(BitcoinNetworkType::Mainnet), + ); + + let mut inserted_good_header = false; + + let new_tip = indexer + .find_bitcoin_reorg( + &db_path, + &reorg_db_path, + |ref mut indexer, ref mut reorg_spv_client, start_block, end_block_opt| { + let end_block = + end_block_opt.unwrap_or(start_block + BLOCK_DIFFICULTY_CHUNK_SIZE); + + let mut ret = vec![]; + for block_height in start_block..end_block { + if block_height > 40320 { + break; + } + if block_height >= 40319 && block_height <= 40320 { + test_debug!("insert good header {}", block_height); + ret.push(good_headers[(block_height - 40319) as usize].clone()); + inserted_good_header = true; + } else { + let orig_spv_client = SpvClient::new_without_migration( + &db_path, + 0, + None, + BitcoinNetworkType::Mainnet, + true, + false, + ) + .unwrap(); + let hdr = orig_spv_client.read_block_header(block_height)?.unwrap(); + ret.push(hdr); + } + } + + test_debug!( + "add headers after {} (good header: {})", + start_block, + inserted_good_header + ); + reorg_spv_client + .insert_block_headers_after(start_block - 1, ret) + .unwrap(); + Ok(()) + }, + ) + .unwrap(); + + assert!(inserted_good_header); + + // chain reorg detected! + assert_eq!(new_tip, 40318); + let total_work_after = spv_client.update_chain_work().unwrap(); + assert_eq!(total_work_after, total_work_before); + } } From 04d2275aa05527b9f7f536280d04f4323113bd91 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 00:12:26 -0400 Subject: [PATCH 02/20] chore: add InvalidDifficulty variant --- src/burnchains/bitcoin/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/burnchains/bitcoin/mod.rs b/src/burnchains/bitcoin/mod.rs index 3ce099dc6..c9b7ab5d3 100644 --- a/src/burnchains/bitcoin/mod.rs +++ b/src/burnchains/bitcoin/mod.rs @@ -79,6 +79,8 @@ pub enum Error { MissingHeader, /// Invalid target InvalidPoW, + /// Bad difficulty + InvalidDifficulty, /// Wrong number of bytes for constructing an address InvalidByteSequence, /// Configuration error @@ -107,6 +109,7 @@ impl fmt::Display for Error { Error::NoncontiguousHeader => write!(f, "Non-contiguous header"), Error::MissingHeader => write!(f, "Missing header"), Error::InvalidPoW => write!(f, "Invalid proof of work"), + Error::InvalidDifficulty => write!(f, "Chain difficulty cannot decrease"), Error::InvalidByteSequence => write!(f, "Invalid sequence of bytes"), Error::ConfigError(ref e_str) => fmt::Display::fmt(e_str, f), Error::BlockchainHeight => write!(f, "Value is beyond the end of the blockchain"), @@ -133,6 +136,7 @@ impl error::Error for Error { Error::NoncontiguousHeader => None, Error::MissingHeader => None, Error::InvalidPoW => None, + Error::InvalidDifficulty => None, Error::InvalidByteSequence => None, Error::ConfigError(ref _e_str) => None, Error::BlockchainHeight => None, From 998b9b73d8db73e7cd584fdba6d48c0203b352e3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 00:12:39 -0400 Subject: [PATCH 03/20] feat: update the SPV client to track the total chain work over time. BUT, right now the work calculation for a difficulty adjustment interval is getting the wrong answers and I don't know why. --- src/burnchains/bitcoin/spv.rs | 452 +++++++++++++++++++++++++++++++--- 1 file changed, 421 insertions(+), 31 deletions(-) diff --git a/src/burnchains/bitcoin/spv.rs b/src/burnchains/bitcoin/spv.rs index 7631aae25..c86d8b8df 100644 --- a/src/burnchains/bitcoin/spv.rs +++ b/src/burnchains/bitcoin/spv.rs @@ -39,13 +39,14 @@ use crate::burnchains::bitcoin::Error as btc_error; use crate::burnchains::bitcoin::PeerMessage; use rusqlite::types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; +use rusqlite::OptionalExtension; use rusqlite::Row; use rusqlite::Transaction; use rusqlite::{Connection, OpenFlags, NO_PARAMS}; use crate::util_lib::db::{ - query_row, query_rows, sqlite_open, tx_begin_immediate, tx_busy_handler, u64_to_sql, DBConn, - DBTx, Error as db_error, FromColumn, FromRow, + query_int, query_row, query_rows, sqlite_open, tx_begin_immediate, tx_busy_handler, u64_to_sql, + DBConn, DBTx, Error as db_error, FromColumn, FromRow, }; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{hex_bytes, to_hex}; @@ -67,7 +68,7 @@ const BITCOIN_GENESIS_BLOCK_HASH_REGTEST: &'static str = pub const BLOCK_DIFFICULTY_CHUNK_SIZE: u64 = 2016; const BLOCK_DIFFICULTY_INTERVAL: u32 = 14 * 24 * 60 * 60; // two weeks, in seconds -pub const SPV_DB_VERSION: &'static str = "1"; +pub const SPV_DB_VERSION: &'static str = "2"; const SPV_INITIAL_SCHEMA: &[&'static str] = &[ r#" @@ -84,6 +85,13 @@ const SPV_INITIAL_SCHEMA: &[&'static str] = &[ "CREATE TABLE db_config(version TEXT NOT NULL);", ]; +const SPV_SCHEMA_2: &[&'static str] = &[r#" + CREATE TABLE chain_work( + interval INTEGER PRIMARY KEY, + work TEXT NOT NULL -- 32-byte (256-bit) integer + ); + "#]; + pub struct SpvClient { pub headers_path: String, pub start_block_height: u64, @@ -93,6 +101,9 @@ pub struct SpvClient { readwrite: bool, reverse_order: bool, headers_db: DBConn, + + // only writeable in #[cfg(test)] + ignore_work_checks: bool, } impl FromColumn for Sha256dHash { @@ -130,7 +141,7 @@ impl SpvClient { readwrite: bool, reverse_order: bool, ) -> Result { - let conn = SpvClient::db_open(headers_path, readwrite)?; + let conn = SpvClient::db_open(headers_path, readwrite, true)?; let mut client = SpvClient { headers_path: headers_path.to_owned(), start_block_height: start_block, @@ -140,19 +151,59 @@ impl SpvClient { readwrite: readwrite, reverse_order: reverse_order, headers_db: conn, + ignore_work_checks: false, }; if readwrite { - client.init_block_headers()?; + client.init_block_headers(true)?; } Ok(client) } + #[cfg(test)] + pub fn new_without_migration( + headers_path: &str, + start_block: u64, + end_block: Option, + network_id: BitcoinNetworkType, + readwrite: bool, + reverse_order: bool, + ) -> Result { + let conn = SpvClient::db_open(headers_path, readwrite, false)?; + let mut client = SpvClient { + headers_path: headers_path.to_owned(), + start_block_height: start_block, + end_block_height: end_block, + cur_block_height: start_block, + network_id: network_id, + readwrite: readwrite, + reverse_order: reverse_order, + headers_db: conn, + ignore_work_checks: true, + }; + + if readwrite { + client.init_block_headers(false)?; + } + + Ok(client) + } + + #[cfg(test)] + pub fn set_ignore_work_checks(&mut self, ignore: bool) { + self.ignore_work_checks = ignore; + } + pub fn conn(&self) -> &DBConn { &self.headers_db } + #[cfg(test)] + pub fn conn_mut(&mut self) -> &mut DBConn { + &mut self.headers_db + } + pub fn tx_begin<'a>(&'a mut self) -> Result, btc_error> { if !self.readwrite { return Err(db_error::ReadOnly.into()); @@ -169,6 +220,9 @@ impl SpvClient { for row_text in SPV_INITIAL_SCHEMA { tx.execute_batch(row_text).map_err(db_error::SqliteError)?; } + for row_text in SPV_SCHEMA_2 { + tx.execute_batch(row_text).map_err(db_error::SqliteError)?; + } tx.execute( "INSERT INTO db_config (version) VALUES (?1)", @@ -180,7 +234,57 @@ impl SpvClient { Ok(()) } - fn db_open(headers_path: &str, readwrite: bool) -> Result { + fn db_get_version(conn: &DBConn) -> Result { + let version_str = conn + .query_row("SELECT MAX(version) FROM db_config", NO_PARAMS, |row| { + let version: String = row.get_unwrap(0); + Ok(version) + }) + .optional() + .map_err(db_error::SqliteError)? + .unwrap_or("0".to_string()); + Ok(version_str) + } + + fn db_set_version(tx: &Transaction, version: &str) -> Result<(), btc_error> { + tx.execute("UPDATE db_config SET version = ?1", &[version]) + .map_err(db_error::SqliteError) + .map_err(|e| e.into()) + .and_then(|_| Ok(())) + } + + #[cfg(test)] + pub fn test_db_migrate(conn: &mut DBConn) -> Result<(), btc_error> { + SpvClient::db_migrate(conn) + } + + fn db_migrate(conn: &mut DBConn) -> Result<(), btc_error> { + let version = SpvClient::db_get_version(conn)?; + while version != SPV_DB_VERSION { + let version = SpvClient::db_get_version(conn)?; + match version.as_str() { + "1" => { + debug!("Migrate SPV DB from schema 1 to 2"); + let tx = tx_begin_immediate(conn)?; + for row_text in SPV_SCHEMA_2 { + tx.execute_batch(row_text).map_err(db_error::SqliteError)?; + } + + SpvClient::db_set_version(&tx, "2")?; + tx.commit().map_err(db_error::SqliteError)?; + } + SPV_DB_VERSION => { + break; + } + _ => { + panic!("Unrecognized SPV version {}", &version); + } + } + } + Ok(()) + } + + fn db_open(headers_path: &str, readwrite: bool, migrate: bool) -> Result { let mut create_flag = false; let open_flags = if fs::metadata(headers_path).is_err() { // need to create @@ -205,6 +309,9 @@ impl SpvClient { if create_flag { SpvClient::db_instantiate(&mut conn)?; } + if readwrite && migrate { + SpvClient::db_migrate(&mut conn)?; + } Ok(conn) } @@ -229,6 +336,234 @@ impl SpvClient { indexer.peer_communicate(self, true) } + /// Calculate the work of a single header given the first and last header in the interval + fn get_expected_work_in_range( + first_header: &LoneBlockHeader, + last_header: &LoneBlockHeader, + ) -> Uint256 { + let (_, target) = SpvClient::get_target_between_headers(&first_header, &last_header); + let work = + (Uint256::max() - target) / (target + Uint256::from_u64(1)) + Uint256::from_u64(1); + test_debug!("{}, {}", &work, &target); + work + } + + /// Calculate the total work over a full interval of headers. + fn get_full_interval_work(interval_headers: &Vec) -> Uint256 { + assert_eq!(interval_headers.len() as u64, BLOCK_DIFFICULTY_CHUNK_SIZE); + let first_header = interval_headers + .first() + .expect("FATAL: no first header in non-empty list of headers"); + let last_header = interval_headers + .last() + .expect("FATAL: no last header in non-empty list of headers"); + SpvClient::get_expected_work_in_range(first_header, last_header) + * Uint256::from_u64(BLOCK_DIFFICULTY_CHUNK_SIZE) + } + + /// Calculate a partial interval's work, given the last full interval before it + fn get_partial_interval_work( + &self, + last_full_interval: u64, + partial_interval_len: usize, + ) -> Result, btc_error> { + let last_interval_work = self.get_interval_header_work(last_full_interval)?; + if let Some(last_interval_work) = last_interval_work { + let work = last_interval_work * Uint256::from_u64(partial_interval_len as u64); + Ok(Some(work)) + } else { + Ok(None) + } + } + + /// Calculate the work done by a single header in `interval`, if we have the headers for that + /// interval + pub fn get_interval_header_work(&self, interval: u64) -> Result, btc_error> { + let first_header = + match self.read_block_header((interval - 1) * BLOCK_DIFFICULTY_CHUNK_SIZE)? { + Some(res) => res, + None => { + test_debug!( + "No header at height {}", + (interval - 1) * BLOCK_DIFFICULTY_CHUNK_SIZE + ); + return Ok(None); + } + }; + + let last_header = + match self.read_block_header(interval * BLOCK_DIFFICULTY_CHUNK_SIZE - 1)? { + Some(res) => res, + None => { + test_debug!( + "No header at height {}", + interval * BLOCK_DIFFICULTY_CHUNK_SIZE - 1 + ); + return Ok(None); + } + }; + + Ok(Some(SpvClient::get_expected_work_in_range( + &first_header, + &last_header, + ))) + } + + /// Find the highest interval for which we have a chain work score. + /// The interval corresponds to blocks (interval - 1) * 2016 ... interval * 2016 + pub fn find_highest_work_score_interval(&self) -> Result { + let max_interval_opt: Option = self + .conn() + .query_row( + "SELECT interval FROM chain_work ORDER BY interval DESC LIMIT 1", + NO_PARAMS, + |row| row.get(0), + ) + .optional() + .map_err(db_error::SqliteError)?; + + Ok(max_interval_opt.map(|x| x as u64).unwrap_or(0)) + } + + /// Find the total work score for an interval, if it has been calculated + pub fn find_interval_work(&self, interval: u64) -> Result, btc_error> { + let work_hex: Option = self + .conn() + .query_row( + "SELECT work FROM chain_work WHERE interval = ?1", + &[&u64_to_sql(interval)?], + |row| row.get(0), + ) + .optional() + .map_err(db_error::SqliteError)?; + Ok(work_hex.map(|x| Uint256::from_hex_le(&x).expect("FATAL: work is not a uint256"))) + } + + /// Store an interval's running total work. + /// The interval must not yet have an interval work score, or must be less than or equal to the + /// currently-stored interval. + pub fn store_interval_work(&mut self, interval: u64, work: Uint256) -> Result<(), btc_error> { + if let Some(cur_work) = self.find_interval_work(interval)? { + if cur_work > work && !self.ignore_work_checks { + error!( + "Tried to store work {} to interval {}, which has work {} already", + work, interval, cur_work + ); + return Err(btc_error::InvalidDifficulty); + } + } + + let tx = self.tx_begin()?; + let args: &[&dyn ToSql] = &[&u64_to_sql(interval)?, &work.to_hex_le()]; + tx.execute( + "INSERT OR REPLACE INTO chain_work (interval,work) VALUES (?1,?2)", + args, + ) + .map_err(db_error::SqliteError)?; + + tx.commit().map_err(db_error::SqliteError)?; + Ok(()) + } + + /// Update the total chain work table up to a given interval (even if partial). + /// Returns the total work + pub fn update_chain_work(&mut self) -> Result { + let highest_interval = self.find_highest_work_score_interval()?; + let mut work_so_far = if highest_interval > 0 { + self.find_interval_work(highest_interval - 1)? + .expect("FATAL: no work score for highest known interval") + } else { + Uint256::from_u64(0) + }; + + let last_interval = self.get_headers_height()? / BLOCK_DIFFICULTY_CHUNK_SIZE + 1; + + debug!( + "Highest work-calculation interval is {} (height {}), work {}; update to {}", + highest_interval, + highest_interval * BLOCK_DIFFICULTY_CHUNK_SIZE, + work_so_far, + last_interval + ); + for interval in (highest_interval + 1)..(last_interval + 1) { + let mut partial = false; + let interval_headers = self.read_block_headers( + (interval - 1) * BLOCK_DIFFICULTY_CHUNK_SIZE, + interval * BLOCK_DIFFICULTY_CHUNK_SIZE, + )?; + let interval_work = if interval_headers.len() == BLOCK_DIFFICULTY_CHUNK_SIZE as usize { + // full interval + let work = SpvClient::get_full_interval_work(&interval_headers); + work_so_far = work_so_far + work; + self.store_interval_work(interval - 1, work_so_far)?; + work + } else { + // partial (and last) interval + let work = if interval > 2 { + let work = self + .get_partial_interval_work(interval - 2, interval_headers.len())? + .expect(&format!( + "FATAL: do not have work score for interval {}", + interval - 2 + )); + + work_so_far = work_so_far + work; + work + } else { + Uint256::from_u64(0) + }; + + partial = true; + work + }; + + debug!( + "Chain work in {} interval {} ({}-{}) is {}, total is {}", + if partial { "partial" } else { "full" }, + interval - 1, + (interval - 1) * BLOCK_DIFFICULTY_CHUNK_SIZE, + (interval - 1) * BLOCK_DIFFICULTY_CHUNK_SIZE + (interval_headers.len() as u64), + interval_work, + work_so_far + ); + if partial { + break; + } + } + + Ok(work_so_far) + } + + /// Get the total chain work. + /// You will have needed to call update_chain_work() prior to this after inserting new headers. + pub fn get_chain_work(&self) -> Result { + let highest_full_interval = self.find_highest_work_score_interval()?; + if highest_full_interval == 0 { + return Ok(Uint256::from_u64(0)); + } + + let highest_interval_work = self + .find_interval_work(highest_full_interval)? + .expect("FATAL: have interval but no work"); + + let partial_interval = highest_full_interval + 1; + let partial_interval_headers = self.read_block_headers( + partial_interval * BLOCK_DIFFICULTY_CHUNK_SIZE, + (partial_interval + 1) * BLOCK_DIFFICULTY_CHUNK_SIZE, + )?; + assert!(partial_interval_headers.len() < BLOCK_DIFFICULTY_CHUNK_SIZE as usize); + + let partial_interval_work = self + .get_partial_interval_work(highest_full_interval, partial_interval_headers.len())? + .expect(&format!( + "FATAL: no work score for interval {}", + highest_full_interval + )); + + debug!("Chain work: highest work-calculated interval is {} with total work {} partial {} ({} headers)", &highest_full_interval, &highest_interval_work, &partial_interval_work, partial_interval_headers.len()); + Ok(highest_interval_work + partial_interval_work) + } + /// Validate a headers message we requested /// * must have at least one header /// * headers must be contiguous @@ -292,6 +627,24 @@ impl SpvClient { Some(res) => res.header, }; + // each header's timestamp must exceed the median of the past 11 blocks + if block_height > 11 { + let past_11_headers = + self.read_block_headers(block_height - 11, block_height)?; + let mut past_timestamps: Vec = + past_11_headers.iter().map(|hdr| hdr.header.time).collect(); + past_timestamps.sort(); + + if header_i.time < past_timestamps[5] { + error!( + "Block {} timestamp {} < {} (median of {:?})", + block_height, header_i.time, past_timestamps[5], &past_timestamps + ); + return Err(btc_error::InvalidPoW); + } + } + + // header difficulty must not change in a difficulty interval let (bits, difficulty) = match self.get_target(block_height, &header_i, &headers, i)? { Some(x) => x, @@ -307,7 +660,7 @@ impl SpvClient { return Err(btc_error::InvalidPoW); } let header_hash = header_i.bitcoin_hash().into_le(); - if difficulty <= header_hash { + if difficulty < header_hash { error!( "block {} hash {} has less work than difficulty {} in {}", block_height, @@ -429,8 +782,9 @@ impl SpvClient { .and_then(|_x| Ok(())) } - /// Initialize the block headers file with the genesis block hash - fn init_block_headers(&mut self) -> Result<(), btc_error> { + /// Initialize the block headers file with the genesis block hash. + /// Optionally sip migration for testing. + fn init_block_headers(&mut self, migrate: bool) -> Result<(), btc_error> { assert!(self.readwrite, "SPV header DB is open read-only"); let (genesis_block, genesis_block_hash_str) = match self.network_id { BitcoinNetworkType::Mainnet => ( @@ -464,6 +818,10 @@ impl SpvClient { tx.commit().map_err(db_error::SqliteError)?; debug!("Initialized block headers at {}", self.headers_path); + + if migrate { + self.update_chain_work()?; + } return Ok(()); } @@ -471,7 +829,7 @@ impl SpvClient { /// -- validate them /// -- store them /// Can error if there has been a reorg, or if the headers don't correspond to headers we asked - /// for. + /// for, or if the new chain has less total work than the old chain. fn handle_headers( &mut self, insert_height: u64, @@ -482,6 +840,7 @@ impl SpvClient { let num_headers = block_headers.len(); let first_header_hash = block_headers[0].header.bitcoin_hash(); let last_header_hash = block_headers[block_headers.len() - 1].header.bitcoin_hash(); + let total_work_before = self.get_chain_work()?; if !self.reverse_order { // fetching headers in ascending order @@ -530,6 +889,15 @@ impl SpvClient { } if num_headers > 0 { + let total_work_after = self.update_chain_work()?; + if total_work_after < total_work_before { + error!( + "New headers represent less work than the old headers ({} < {})", + total_work_before, total_work_after + ); + return Err(btc_error::InvalidDifficulty); + } + debug!( "Handled {} Headers: {}-{}", num_headers, first_header_hash, last_header_hash @@ -707,8 +1075,44 @@ impl SpvClient { Ok(()) } + /// Determine the (bits, target) between two headers + pub fn get_target_between_headers( + first_header: &LoneBlockHeader, + last_header: &LoneBlockHeader, + ) -> (u32, Uint256) { + let max_target = Uint256([ + 0x0000000000000000, + 0x0000000000000000, + 0x0000000000000000, + 0x00000000ffff0000, + ]); + + // find actual timespan as being clamped between +/- 4x of the target timespan + let mut actual_timespan = (last_header.header.time - first_header.header.time) as u64; + let target_timespan = BLOCK_DIFFICULTY_INTERVAL as u64; + if actual_timespan < (target_timespan / 4) { + actual_timespan = target_timespan / 4; + } + if actual_timespan > (target_timespan * 4) { + actual_timespan = target_timespan * 4; + } + + let last_target = last_header.header.target(); + let new_target = + (last_target * Uint256::from_u64(actual_timespan)) / Uint256::from_u64(target_timespan); + let target = cmp::min(new_target, max_target); + + let bits = BlockHeader::compact_target_from_u256(&target); + let target = BlockHeader::compact_target_to_u256(bits); + + (bits, target) + } + /// Determine the target difficult over a given difficulty adjustment interval /// the `interval` parameter is the difficulty interval -- a 2016-block interval. + /// * On mainnet, `headers_in_range` can be empty. If it's not empty, then the 0th element is + /// treated as the parent of `current_header`. On testnet, `headers_in_range` must be a range + /// of headers in the given `interval`. /// Returns (new bits, new target) pub fn get_target( &self, @@ -758,7 +1162,7 @@ impl SpvClient { if current_header_height % BLOCK_DIFFICULTY_CHUNK_SIZE != 0 && self.network_id == BitcoinNetworkType::Testnet { - // In Testnet mode, if the new block's timestamp is more than 2* 10 minutes + // In Testnet mode, if the new block's timestamp is more than 2 * 60 * 10 minutes // then allow mining of a min-difficulty block. if current_header.time > parent_header.time + 10 * 60 * 2 { return Ok(Some((max_target_bits, max_target))); @@ -775,34 +1179,20 @@ impl SpvClient { let first_header = match self.read_block_header((interval - 1) * BLOCK_DIFFICULTY_CHUNK_SIZE)? { - Some(res) => res.header, + Some(res) => res, None => return Ok(None), }; let last_header = match self.read_block_header(interval * BLOCK_DIFFICULTY_CHUNK_SIZE - 1)? { - Some(res) => res.header, + Some(res) => res, None => return Ok(None), }; - // find actual timespan as being clamped between +/- 4x of the target timespan - let mut actual_timespan = (last_header.time - first_header.time) as u64; - let target_timespan = BLOCK_DIFFICULTY_INTERVAL as u64; - if actual_timespan < (target_timespan / 4) { - actual_timespan = target_timespan / 4; - } - if actual_timespan > (target_timespan * 4) { - actual_timespan = target_timespan * 4; - } - - let last_target = last_header.target(); - let new_target = - last_target * Uint256::from_u64(actual_timespan) / Uint256::from_u64(target_timespan); - let target = cmp::min(new_target, max_target); - - let bits = BlockHeader::compact_target_from_u256(&target); - - Ok(Some((bits, target))) + Ok(Some(SpvClient::get_target_between_headers( + &first_header, + &last_header, + ))) } /// Ask for the next batch of headers (note that this will return the maximal size of headers) From 11f6d7ee16ff313180774b656def93d3384fe7a2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 00:13:11 -0400 Subject: [PATCH 04/20] chore: unused mut --- src/chainstate/burn/db/sortdb.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 09f3e0ebb..cdfafa945 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -7891,7 +7891,7 @@ pub mod tests { // drop descendancy information { - let mut db_tx = db.tx_begin().unwrap(); + let db_tx = db.tx_begin().unwrap(); db_tx .execute("DELETE FROM block_commit_parents", NO_PARAMS) .unwrap(); From 9425a39ff76a32996650fecc0f3afee066907124 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 00:13:24 -0400 Subject: [PATCH 05/20] feat: add static method to convert a bitcoin target to a compact target, which is what gets used in the difficulty comparison --- .../src/deps_common/bitcoin/blockdata/block.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/block.rs b/stacks-common/src/deps_common/bitcoin/blockdata/block.rs index 9ff4d9f73..40a103771 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/block.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/block.rs @@ -74,20 +74,17 @@ pub struct LoneBlockHeader { impl BlockHeader { /// Computes the target [0, T] that a blockhash must land in to be valid - pub fn target(&self) -> Uint256 { + pub fn compact_target_to_u256(bits: u32) -> Uint256 { // This is a floating-point "compact" encoding originally used by // OpenSSL, which satoshi put into consensus code, so we're stuck // with it. The exponent needs to have 3 subtracted from it, hence // this goofy decoding code: let (mant, expt) = { - let unshifted_expt = self.bits >> 24; + let unshifted_expt = bits >> 24; if unshifted_expt <= 3 { - ( - (self.bits & 0xFFFFFF) >> (8 * (3 - unshifted_expt as usize)), - 0, - ) + ((bits & 0xFFFFFF) >> (8 * (3 - unshifted_expt as usize)), 0) } else { - (self.bits & 0xFFFFFF, 8 * ((self.bits >> 24) - 3)) + (bits & 0xFFFFFF, 8 * ((bits >> 24) - 3)) } }; @@ -99,6 +96,11 @@ impl BlockHeader { } } + /// Computes the target [0, T] that a blockhash must land in to be valid + pub fn target(&self) -> Uint256 { + BlockHeader::compact_target_to_u256(self.bits) + } + /// Computes the target value in float format from Uint256 format. pub fn compact_target_from_u256(value: &Uint256) -> u32 { let mut size = (value.bits() + 7) / 8; From e8b2cecee96ff93d46e470b79a10849dfebdf840 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 00:13:45 -0400 Subject: [PATCH 06/20] chore: add uint256 codec to/from hex strings --- stacks-common/src/util/uint.rs | 36 ++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/stacks-common/src/util/uint.rs b/stacks-common/src/util/uint.rs index 466c8d3ab..bcf69dfe1 100644 --- a/stacks-common/src/util/uint.rs +++ b/stacks-common/src/util/uint.rs @@ -19,6 +19,7 @@ //! Implementation of a various large-but-fixed sized unsigned integer types. //! The functions here are designed to be fast. //! +use crate::util::hash::{hex_bytes, to_hex}; /// Borrowed with gratitude from Andrew Poelstra's rust-bitcoin library use std::fmt; @@ -141,6 +142,31 @@ macro_rules! construct_uint { } ret } + + /// from a little-endian hex string + /// padding is expected + pub fn from_hex_le(hex: &str) -> Option<$name> { + let bytes = hex_bytes(hex).ok()?; + if bytes.len() % 8 != 0 { + return None; + } + if bytes.len() / 8 != $n_words { + return None; + } + let mut ret = [0u64; $n_words]; + for i in 0..(bytes.len() / 8) { + let mut next_bytes = [0u8; 8]; + next_bytes.copy_from_slice(&bytes[8 * i..(8 * (i + 1))]); + let next = u64::from_le_bytes(next_bytes); + ret[i] = next; + } + Some($name(ret)) + } + + /// to a little-endian hex string + pub fn to_hex_le(&self) -> String { + to_hex(&self.to_u8_slice()) + } } impl ::std::ops::Add<$name> for $name { @@ -671,4 +697,14 @@ mod tests { Uint256([0, 0xDEADBEEFDEADBEEF, 0xDEADBEEFDEADBEEF, 0]) ); } + + #[test] + pub fn hex_codec() { + let init = Uint256::from_u64(0xDEADBEEFDEADBEEF); + // little-endian representation + let hex_init = "efbeaddeefbeadde000000000000000000000000000000000000000000000000"; + assert_eq!(Uint256::from_hex_le(&hex_init).unwrap(), init); + assert_eq!(&init.to_hex_le(), hex_init); + assert_eq!(Uint256::from_hex_le(&init.to_hex_le()).unwrap(), init); + } } From c6ac355f3475b021b1b0ca9484b326cd0c7de2f1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 13:55:54 -0400 Subject: [PATCH 07/20] feat: have find_bitcoin_reorg() actually merge the reorg headers and chainwork into the original SPV database, instead of re-downloading the same headers and hoping for the best. Also, update the mainnet unit test to compare chainwork against known chainwork from bitcoind --- src/burnchains/bitcoin/indexer.rs | 1612 ++++++++++++++++++++++++++++- 1 file changed, 1580 insertions(+), 32 deletions(-) diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index 07814ab5c..c35d9616b 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -15,6 +15,7 @@ // along with this program. If not, see . use rand::{thread_rng, Rng}; +use std::cmp; use std::fs; use std::net; use std::net::Shutdown; @@ -447,6 +448,10 @@ impl BitcoinIndexer { if start_block > 0 { if start_block > BLOCK_DIFFICULTY_CHUNK_SIZE { if remove_old { + // set up a .reorg db + // * needs the last difficulty interval of headers (note that the current + // interval is `start_block / BLOCK_DIFFICULTY_CHUNK_SIZE - 1). + // * needs the last interval's chain work calculation let interval_start_block = start_block / BLOCK_DIFFICULTY_CHUNK_SIZE - 2; let base_block = interval_start_block * BLOCK_DIFFICULTY_CHUNK_SIZE; let interval_headers = @@ -479,8 +484,8 @@ impl BitcoinIndexer { } } else { // no full difficulty intervals yet - let interval_headers = canonical_spv_client.read_block_headers(1, start_block)?; - + let interval_headers = + canonical_spv_client.read_block_headers(1, start_block + 1)?; reorg_spv_client.insert_block_headers_before(0, interval_headers)?; } } @@ -492,10 +497,29 @@ impl BitcoinIndexer { /// the reorg starts. Returns the hight of the highest common ancestor. /// Note that under certain testnet settings, the bitcoin chain itself can shrink. pub fn find_bitcoin_reorg( + &mut self, + canonical_headers_path: &str, + reorg_headers_path: &str, + load_reorg_headers: F, + ) -> Result + where + F: FnMut(&mut BitcoinIndexer, &mut SpvClient, u64, Option) -> Result<(), btc_error>, + { + // always check chain work, except in testing + self.inner_find_bitcoin_reorg( + canonical_headers_path, + reorg_headers_path, + load_reorg_headers, + true, + ) + } + + fn inner_find_bitcoin_reorg( &mut self, canonical_headers_path: &str, reorg_headers_path: &str, mut load_reorg_headers: F, + check_chain_work: bool, ) -> Result where F: FnMut(&mut BitcoinIndexer, &mut SpvClient, u64, Option) -> Result<(), btc_error>, @@ -674,23 +698,62 @@ impl BitcoinIndexer { self.setup_reorg_headers(&orig_spv_client, reorg_headers_path, start_block, false)?; } - let reorg_total_work = reorg_spv_client.update_chain_work()?; - let orig_total_work = orig_spv_client.get_chain_work()?; + if check_chain_work { + let reorg_total_work = reorg_spv_client.update_chain_work()?; + let orig_total_work = orig_spv_client.get_chain_work()?; - debug!("Bitcoin headers history is consistent up to {}. Orig chainwork: {}, reorg chainwork: {}", new_tip, orig_total_work, reorg_total_work); - if orig_total_work < reorg_total_work { - let reorg_tip = reorg_spv_client.get_headers_height()?; - let hdr_reorg = reorg_spv_client - .read_block_header(reorg_tip - 1)? - .expect("FATAL: no tip hash for existing chain tip"); - info!( - "New canonical Bitcoin chain found! New tip is {}", - &hdr_reorg.header.bitcoin_hash() - ); - } else { - // ignore the reorg - test_debug!("Reorg chain does not overtake original Bitcoin chain"); - new_tip = orig_spv_client.get_headers_height()?; + debug!("Bitcoin headers history is consistent up to {}. Orig chainwork: {}, reorg chainwork: {}", new_tip, orig_total_work, reorg_total_work); + + if orig_total_work < reorg_total_work { + let reorg_tip = reorg_spv_client.get_headers_height()?; + let hdr_reorg = reorg_spv_client + .read_block_header(reorg_tip - 1)? + .expect("FATAL: no tip hash for existing chain tip"); + info!( + "New canonical Bitcoin chain found! New tip is {}", + &hdr_reorg.header.bitcoin_hash() + ); + + // merge the new headers and chain difficulty to the original headers + let mut orig_spv_client = SpvClient::new( + canonical_headers_path, + 0, + None, + self.runtime.network_id, + true, + false, + )?; + + // copy over new headers + if new_tip > 0 { + let new_headers = + reorg_spv_client.read_block_headers(new_tip, reorg_tip + 1)?; + orig_spv_client.drop_headers(new_tip)?; + orig_spv_client.insert_block_headers_after(new_tip - 1, new_headers)?; + } + + // copy over new chain work + let orig_highest_interval = orig_spv_client.find_highest_work_score_interval()?; + let reorg_highest_interval = reorg_spv_client.find_highest_work_score_interval()?; + for interval in cmp::min(orig_highest_interval, reorg_highest_interval) + ..(cmp::max(orig_highest_interval, reorg_highest_interval) + 1) + { + if let Some(work_score) = reorg_spv_client.find_interval_work(interval)? { + test_debug!( + "Copy work score for interval {} ({}) to original SPV client DB", + interval, + &work_score + ); + orig_spv_client + .store_interval_work(interval, work_score) + .expect("FATAL: failed to store better chain work"); + } + } + } else { + // ignore the reorg + test_debug!("Reorg chain does not overtake original Bitcoin chain"); + new_tip = orig_spv_client.get_headers_height()?; + } } let hdr_reorg = reorg_spv_client.read_block_header(new_tip)?; @@ -925,6 +988,7 @@ mod test { deserialize, serialize, BitcoinHash, }; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; + use stacks_common::util::uint::Uint256; use std::env; @@ -940,6 +1004,9 @@ mod test { if fs::metadata(path_2).is_ok() { fs::remove_file(path_2).unwrap(); } + if fs::metadata(path_reorg).is_ok() { + fs::remove_file(path_reorg).unwrap(); + } // two header sets -- both of which build off of the genesis block let headers_1 = vec![ @@ -1062,6 +1129,9 @@ mod test { .insert_block_headers_after(0, headers_2.clone()) .unwrap(); + spv_client.update_chain_work().unwrap(); + spv_client_reorg.update_chain_work().unwrap(); + assert_eq!(spv_client.read_block_headers(0, 10).unwrap().len(), 4); assert_eq!(spv_client_reorg.read_block_headers(0, 10).unwrap().len(), 4); @@ -1072,7 +1142,7 @@ mod test { BitcoinIndexerRuntime::new(BitcoinNetworkType::Regtest), ); let common_ancestor_height = indexer - .find_bitcoin_reorg( + .inner_find_bitcoin_reorg( path_1, path_reorg, |ref mut indexer, ref mut spv_client, start_block, end_block_opt| { @@ -1081,18 +1151,20 @@ mod test { let hdrs = spv_client_reorg .read_block_headers(start_block, end_block) .unwrap(); + if start_block > 0 { + test_debug!("insert at {}: {:?}", start_block - 1, &hdrs); spv_client .insert_block_headers_before(start_block - 1, hdrs) .unwrap(); } else if hdrs.len() > 0 { - spv_client - .insert_block_headers_before(0, hdrs[1..].to_vec()) - .unwrap(); + test_debug!("insert at {}: {:?}", 0, &hdrs); + spv_client.test_write_block_headers(0, hdrs).unwrap(); } Ok(()) }, + false, ) .unwrap(); @@ -1113,7 +1185,7 @@ mod test { fs::remove_file(path_2).unwrap(); } - // two header sets -- both of which build off of the genesis block + // two header sets -- both of which build off of same first block let headers_1 = vec![ LoneBlockHeader { header: BlockHeader { @@ -1244,7 +1316,7 @@ mod test { BitcoinIndexerRuntime::new(BitcoinNetworkType::Regtest), ); let common_ancestor_height = indexer - .find_bitcoin_reorg( + .inner_find_bitcoin_reorg( path_1, path_reorg, |ref mut indexer, ref mut spv_client, start_block, end_block_opt| { @@ -1258,13 +1330,12 @@ mod test { .insert_block_headers_before(start_block - 1, hdrs) .unwrap(); } else if hdrs.len() > 0 { - spv_client - .insert_block_headers_before(0, hdrs[1..].to_vec()) - .unwrap(); + test_debug!("insert at {}: {:?}", 0, &hdrs); + spv_client.test_write_block_headers(0, hdrs).unwrap(); } - Ok(()) }, + false, ) .unwrap(); @@ -1307,6 +1378,7 @@ mod test { } }; + let db_path = "/tmp/test_indexer_sync_headers.sqlite"; let indexer_conf = BitcoinIndexerConfig { peer_host: host, peer_port: port, @@ -1315,7 +1387,7 @@ mod test { username: Some("blockstack".to_string()), password: Some("blockstacksystem".to_string()), timeout: 30, - spv_headers_path: "/tmp/test_indexer_sync_headers.sqlite".to_string(), + spv_headers_path: db_path.to_string(), first_block: 0, magic_bytes: MagicBytes([105, 100]), epochs: None, @@ -1328,6 +1400,1482 @@ mod test { let mut indexer = BitcoinIndexer::new(indexer_conf, BitcoinIndexerRuntime::new(mode)); let last_block = indexer.sync_headers(0, None).unwrap(); eprintln!("sync'ed to block {}", last_block); + + // compare against known-good chain work + let chain_work: Vec<(u64, &str)> = vec![ + ( + 0, + "000000000000000000000000000000000000000000000000000007e007e007e0", + ), + ( + 1, + "00000000000000000000000000000000000000000000000000000fc00fc00fc0", + ), + ( + 2, + "000000000000000000000000000000000000000000000000000017a017a017a0", + ), + ( + 3, + "00000000000000000000000000000000000000000000000000001f801f801f80", + ), + ( + 4, + "0000000000000000000000000000000000000000000000000000276027602760", + ), + ( + 5, + "00000000000000000000000000000000000000000000000000002f402f402f40", + ), + ( + 6, + "0000000000000000000000000000000000000000000000000000372037203720", + ), + ( + 7, + "00000000000000000000000000000000000000000000000000003f003f003f00", + ), + ( + 8, + "000000000000000000000000000000000000000000000000000046e046e046e0", + ), + ( + 9, + "00000000000000000000000000000000000000000000000000004ec04ec04ec0", + ), + ( + 10, + "000000000000000000000000000000000000000000000000000056a056a056a0", + ), + ( + 11, + "00000000000000000000000000000000000000000000000000005e805e805e80", + ), + ( + 12, + "0000000000000000000000000000000000000000000000000000666066606660", + ), + ( + 13, + "00000000000000000000000000000000000000000000000000006e406e406e40", + ), + ( + 14, + "0000000000000000000000000000000000000000000000000000762076207620", + ), + ( + 15, + "00000000000000000000000000000000000000000000000000007e007e007e00", + ), + ( + 16, + "00000000000000000000000000000000000000000000000000008751410913c0", + ), + ( + 17, + "000000000000000000000000000000000000000000000000000091984ca8a7c0", + ), + ( + 18, + "00000000000000000000000000000000000000000000000000009c2e4c600dc0", + ), + ( + 19, + "0000000000000000000000000000000000000000000000000000aa80bfeea100", + ), + ( + 20, + "0000000000000000000000000000000000000000000000000000be68bf6b8cc0", + ), + ( + 21, + "0000000000000000000000000000000000000000000000000000dc2fb8af3b80", + ), + ( + 22, + "0000000000000000000000000000000000000000000000000000ffde8588bce0", + ), + ( + 23, + "000000000000000000000000000000000000000000000000000123d207cd7780", + ), + ( + 24, + "000000000000000000000000000000000000000000000000000153be8a040220", + ), + ( + 25, + "000000000000000000000000000000000000000000000000000191537d8be600", + ), + ( + 26, + "0000000000000000000000000000000000000000000000000001eb9be75bf700", + ), + ( + 27, + "000000000000000000000000000000000000000000000000000250cc4092ede0", + ), + ( + 28, + "0000000000000000000000000000000000000000000000000002ae169cd3d9a0", + ), + ( + 29, + "000000000000000000000000000000000000000000000000000330f72fc5b200", + ), + ( + 30, + "0000000000000000000000000000000000000000000000000003b9d8cd2a7b60", + ), + ( + 31, + "000000000000000000000000000000000000000000000000000452a977bf36e0", + ), + ( + 32, + "00000000000000000000000000000000000000000000000000050bbcb9ab7b40", + ), + ( + 33, + "00000000000000000000000000000000000000000000000000067127f0749ce0", + ), + ( + 34, + "000000000000000000000000000000000000000000000000000c06d4cb992b40", + ), + ( + 35, + "00000000000000000000000000000000000000000000000000138a0a2a644e00", + ), + ( + 36, + "000000000000000000000000000000000000000000000000001e5f59ff0f0e00", + ), + ( + 37, + "000000000000000000000000000000000000000000000000002e1da12f45c380", + ), + ( + 38, + "00000000000000000000000000000000000000000000000000414ae078f5d1e0", + ), + ( + 39, + "000000000000000000000000000000000000000000000000005738ee4a11f0e0", + ), + ( + 40, + "000000000000000000000000000000000000000000000000007374f54c5c30a0", + ), + ( + 41, + "000000000000000000000000000000000000000000000000009c05a4af3fcdc0", + ), + ( + 42, + "00000000000000000000000000000000000000000000000000c669c7db3fed80", + ), + ( + 43, + "00000000000000000000000000000000000000000000000001088595f1a953e0", + ), + ( + 44, + "0000000000000000000000000000000000000000000000000167a1629fa7a960", + ), + ( + 45, + "00000000000000000000000000000000000000000000000001f32db747272760", + ), + ( + 46, + "00000000000000000000000000000000000000000000000002c66b5e31f1f5c0", + ), + ( + 47, + "00000000000000000000000000000000000000000000000003beec205689a020", + ), + ( + 48, + "0000000000000000000000000000000000000000000000000537d218c0d68ea0", + ), + ( + 49, + "00000000000000000000000000000000000000000000000006f5629da3560ee0", + ), + ( + 50, + "00000000000000000000000000000000000000000000000008eb0983e6ec8ee0", + ), + ( + 51, + "0000000000000000000000000000000000000000000000000b22382e2dcefd60", + ), + ( + 52, + "0000000000000000000000000000000000000000000000000dc75e541af84d60", + ), + ( + 53, + "00000000000000000000000000000000000000000000000010e71ec1cb23ca20", + ), + ( + 54, + "0000000000000000000000000000000000000000000000001548b4bf6b9d3100", + ), + ( + 55, + "0000000000000000000000000000000000000000000000001bf6c2e204f41b40", + ), + ( + 56, + "000000000000000000000000000000000000000000000000251e9cea79c2cce0", + ), + ( + 57, + "0000000000000000000000000000000000000000000000002d688542329dbac0", + ), + ( + 58, + "000000000000000000000000000000000000000000000000374da719dc958d00", + ), + ( + 59, + "0000000000000000000000000000000000000000000000004266777a08f8ce80", + ), + ( + 60, + "0000000000000000000000000000000000000000000000004f9428f4722a17c0", + ), + ( + 61, + "000000000000000000000000000000000000000000000000627ea20909250840", + ), + ( + 62, + "0000000000000000000000000000000000000000000000007fd41135d2b41520", + ), + ( + 63, + "000000000000000000000000000000000000000000000000b415d6336051fce0", + ), + ( + 64, + "000000000000000000000000000000000000000000000000f84049eaa2bdc920", + ), + ( + 65, + "00000000000000000000000000000000000000000000000161a153ee991e8a80", + ), + ( + 66, + "000000000000000000000000000000000000000000000002075c4ceea37a38c0", + ), + ( + 67, + "000000000000000000000000000000000000000000000002c32e7638f85db9e0", + ), + ( + 68, + "0000000000000000000000000000000000000000000000038e5e1ddb9420fbc0", + ), + ( + 69, + "00000000000000000000000000000000000000000000000471555420c8491da0", + ), + ( + 70, + "0000000000000000000000000000000000000000000000054a50a331db8feba0", + ), + ( + 71, + "0000000000000000000000000000000000000000000000061ff0deddce4307e0", + ), + ( + 72, + "000000000000000000000000000000000000000000000006f2e198344ff63d80", + ), + ( + 73, + "000000000000000000000000000000000000000000000007bde137a39a5782a0", + ), + ( + 74, + "0000000000000000000000000000000000000000000000086e4e1f0dc8b7fa60", + ), + ( + 75, + "000000000000000000000000000000000000000000000008feeb3e567cff41c0", + ), + ( + 76, + "0000000000000000000000000000000000000000000000098e37156a82413240", + ), + ( + 77, + "00000000000000000000000000000000000000000000000a1147e2764b0a21a0", + ), + ( + 78, + "00000000000000000000000000000000000000000000000a9c1364231203dde0", + ), + ( + 79, + "00000000000000000000000000000000000000000000000b27755c4f71b45e40", + ), + ( + 80, + "00000000000000000000000000000000000000000000000bbdc167cddde49e60", + ), + ( + 81, + "00000000000000000000000000000000000000000000000c5ae5fdc96e314540", + ), + ( + 82, + "00000000000000000000000000000000000000000000000d00aef727dc4d2a40", + ), + ( + 83, + "00000000000000000000000000000000000000000000000da61108e5fd222a00", + ), + ( + 84, + "00000000000000000000000000000000000000000000000e59f35f37e5c50260", + ), + ( + 85, + "00000000000000000000000000000000000000000000000f0dfe2f5e261117c0", + ), + ( + 86, + "00000000000000000000000000000000000000000000000fd172877cda20ea20", + ), + ( + 87, + "0000000000000000000000000000000000000000000000108f0e99cc0c40f240", + ), + ( + 88, + "00000000000000000000000000000000000000000000001144561ebe70d48900", + ), + ( + 89, + "000000000000000000000000000000000000000000000012149b602f9d5e4e40", + ), + ( + 90, + "000000000000000000000000000000000000000000000012d3cc52b3a56e4f80", + ), + ( + 91, + "000000000000000000000000000000000000000000000013920a567e1baf5720", + ), + ( + 92, + "00000000000000000000000000000000000000000000001461834d9e685448a0", + ), + ( + 93, + "00000000000000000000000000000000000000000000001533f9e08c3a70f180", + ), + ( + 94, + "00000000000000000000000000000000000000000000001614402859652da9e0", + ), + ( + 95, + "00000000000000000000000000000000000000000000001708fc9de8a9016820", + ), + ( + 96, + "000000000000000000000000000000000000000000000018104072b037fd6840", + ), + ( + 97, + "0000000000000000000000000000000000000000000000193587f47f44b318c0", + ), + ( + 98, + "00000000000000000000000000000000000000000000001a7942c3db3c544e00", + ), + ( + 99, + "00000000000000000000000000000000000000000000001bd16dfe8636359a80", + ), + ( + 100, + "00000000000000000000000000000000000000000000001d407d055b91205080", + ), + ( + 101, + "00000000000000000000000000000000000000000000001eb1ac5c2ea8ef52e0", + ), + ( + 102, + "0000000000000000000000000000000000000000000000203ebd97d829576860", + ), + ( + 103, + "000000000000000000000000000000000000000000000021d38c3de21fde2be0", + ), + ( + 104, + "00000000000000000000000000000000000000000000002370c89b2e2b749be0", + ), + ( + 105, + "00000000000000000000000000000000000000000000002505c2c5d3ae324400", + ), + ( + 106, + "0000000000000000000000000000000000000000000000266bceea3b91dfc7a0", + ), + ( + 107, + "000000000000000000000000000000000000000000000027f24a2bb126d7cfc0", + ), + ( + 108, + "0000000000000000000000000000000000000000000000295708322ca3f160e0", + ), + ( + 109, + "00000000000000000000000000000000000000000000002ae0a0a7639d5382c0", + ), + ( + 110, + "00000000000000000000000000000000000000000000002c9759c2b432e2cbc0", + ), + ( + 111, + "00000000000000000000000000000000000000000000002ea4372f1351e945c0", + ), + ( + 112, + "000000000000000000000000000000000000000000000030eabb6aea1e3372a0", + ), + ( + 113, + "0000000000000000000000000000000000000000000000340f55af7e1992dda0", + ), + ( + 114, + "000000000000000000000000000000000000000000000037a95bf3e36b001820", + ), + ( + 115, + "00000000000000000000000000000000000000000000003bdfc0ef666a1293c0", + ), + ( + 116, + "0000000000000000000000000000000000000000000000409a91c0ac3435e780", + ), + ( + 117, + "000000000000000000000000000000000000000000000045dae2457ed37e1a60", + ), + ( + 118, + "00000000000000000000000000000000000000000000004b8f4bcf1f459655e0", + ), + ( + 119, + "000000000000000000000000000000000000000000000052e28b37bc272455e0", + ), + ( + 120, + "00000000000000000000000000000000000000000000005bf6711e872f9c9c40", + ), + ( + 121, + "000000000000000000000000000000000000000000000065fa32870e624f9bc0", + ), + ( + 122, + "000000000000000000000000000000000000000000000072420dd4e9bfc326c0", + ), + ( + 123, + "000000000000000000000000000000000000000000000080ee0a56a1701d7e40", + ), + ( + 124, + "0000000000000000000000000000000000000000000000927b55a53fe0b5f960", + ), + ( + 125, + "0000000000000000000000000000000000000000000000aa54f2dade69a01dc0", + ), + ( + 126, + "0000000000000000000000000000000000000000000000c931ca9362b0377b20", + ), + ( + 127, + "0000000000000000000000000000000000000000000000f200146c9f43cd6f60", + ), + ( + 128, + "000000000000000000000000000000000000000000000126de11075b399a25c0", + ), + ( + 129, + "00000000000000000000000000000000000000000000016cb8e540a683fba740", + ), + ( + 130, + "0000000000000000000000000000000000000000000001c591d6a7ae7afa8d20", + ), + ( + 131, + "0000000000000000000000000000000000000000000002433db5b93a1c218940", + ), + ( + 132, + "0000000000000000000000000000000000000000000002fabd96a3c1683667a0", + ), + ( + 133, + "0000000000000000000000000000000000000000000003ea915b5e66b2ba4640", + ), + ( + 134, + "000000000000000000000000000000000000000000000508a7b83ce27d6e0d80", + ), + ( + 135, + "000000000000000000000000000000000000000000000654b54aef7d013eec60", + ), + ( + 136, + "0000000000000000000000000000000000000000000007ff151710fa2c0766a0", + ), + ( + 137, + "000000000000000000000000000000000000000000000a29667c9507de4f5860", + ), + ( + 138, + "000000000000000000000000000000000000000000000cc33a042440e69953e0", + ), + ( + 139, + "00000000000000000000000000000000000000000000100b3a9024583bf28b80", + ), + ( + 140, + "00000000000000000000000000000000000000000000141101d9154085911fe0", + ), + ( + 141, + "0000000000000000000000000000000000000000000018df7a6211abc5ab0f00", + ), + ( + 142, + "000000000000000000000000000000000000000000001e9c7ae8df8f81f56640", + ), + ( + 143, + "00000000000000000000000000000000000000000000259b8e9646e7349c0c00", + ), + ( + 144, + "000000000000000000000000000000000000000000002d66952994737e0a63e0", + ), + ( + 145, + "000000000000000000000000000000000000000000003694c58d08d508cc8300", + ), + ( + 146, + "0000000000000000000000000000000000000000000041cd5532605cb88f6a60", + ), + ( + 147, + "000000000000000000000000000000000000000000004e992868fd1d93ec6400", + ), + ( + 148, + "000000000000000000000000000000000000000000005d44b796f30b5b47bae0", + ), + ( + 149, + "000000000000000000000000000000000000000000006d8074912a6737d3d380", + ), + ( + 150, + "0000000000000000000000000000000000000000000080ac4e0f3e76ba089b80", + ), + ( + 151, + "00000000000000000000000000000000000000000000963ac1bd3bc314c0d7a0", + ), + ( + 152, + "00000000000000000000000000000000000000000000aeea01f39ddc8c90f040", + ), + ( + 153, + "00000000000000000000000000000000000000000000cdc07cf49ac256735280", + ), + ( + 154, + "00000000000000000000000000000000000000000000ed8a0bf93786bc4ea1c0", + ), + ( + 155, + "000000000000000000000000000000000000000000010fe4d0ad93ec88d58a20", + ), + ( + 156, + "000000000000000000000000000000000000000000013411c99602e0779512c0", + ), + ( + 157, + "000000000000000000000000000000000000000000015fca5387f865e1609380", + ), + ( + 158, + "00000000000000000000000000000000000000000001921527684f8e18e0f120", + ), + ( + 159, + "00000000000000000000000000000000000000000001c8c70b3ef33636f10d20", + ), + ( + 160, + "000000000000000000000000000000000000000000020854e6788dc151fee520", + ), + ( + 161, + "000000000000000000000000000000000000000000024882d8a223b780bebf20", + ), + ( + 162, + "000000000000000000000000000000000000000000028a7e47ce725d7d426340", + ), + ( + 163, + "00000000000000000000000000000000000000000002d31bfe56e2b1739d6bc0", + ), + ( + 164, + "000000000000000000000000000000000000000000031d00935207d1ab495d20", + ), + ( + 165, + "00000000000000000000000000000000000000000003665bd4e1aba42c7dd8c0", + ), + ( + 166, + "00000000000000000000000000000000000000000003aeb503f622705470cc20", + ), + ( + 167, + "00000000000000000000000000000000000000000003f939a016b21b1b395760", + ), + ( + 168, + "0000000000000000000000000000000000000000000449d9a5f3dbacdbb93960", + ), + ( + 169, + "000000000000000000000000000000000000000000049586e07bd6f20810b960", + ), + ( + 170, + "00000000000000000000000000000000000000000004e709f889ae74fa318c40", + ), + ( + 171, + "000000000000000000000000000000000000000000053ca35329505af64851c0", + ), + ( + 172, + "00000000000000000000000000000000000000000005939985b1e73e86585920", + ), + ( + 173, + "00000000000000000000000000000000000000000005e9427295b0327510f160", + ), + ( + 174, + "0000000000000000000000000000000000000000000643ec461b119e93fa0120", + ), + ( + 175, + "000000000000000000000000000000000000000000069b385ff2430bd50d39c0", + ), + ( + 176, + "00000000000000000000000000000000000000000006f293e337e48534b58620", + ), + ( + 177, + "000000000000000000000000000000000000000000074c11d1095634524084a0", + ), + ( + 178, + "00000000000000000000000000000000000000000007a354129e16951771cac0", + ), + ( + 179, + "00000000000000000000000000000000000000000007fe715e2872e96c5294a0", + ), + ( + 180, + "0000000000000000000000000000000000000000000859065d467171f99cd620", + ), + ( + 181, + "00000000000000000000000000000000000000000008b6ad4a7c5e93761ed960", + ), + ( + 182, + "0000000000000000000000000000000000000000000916886665dd85cb9e37c0", + ), + ( + 183, + "00000000000000000000000000000000000000000009772960493504b307b5c0", + ), + ( + 184, + "00000000000000000000000000000000000000000009daa5194766250ba1e4e0", + ), + ( + 185, + "0000000000000000000000000000000000000000000a4314a99165a339d76940", + ), + ( + 186, + "0000000000000000000000000000000000000000000aafe04e07a0cc76908780", + ), + ( + 187, + "0000000000000000000000000000000000000000000b1f61a6c72823bc6f7cc0", + ), + ( + 188, + "0000000000000000000000000000000000000000000b8f0423557c7834c9c440", + ), + ( + 189, + "0000000000000000000000000000000000000000000c0129c4864d86d6937540", + ), + ( + 190, + "0000000000000000000000000000000000000000000c79e686c513ee1711d700", + ), + ( + 191, + "0000000000000000000000000000000000000000000cff3e24f98a31a9513bc0", + ), + ( + 192, + "0000000000000000000000000000000000000000000d90484e8d690d207cb3e0", + ), + ( + 193, + "0000000000000000000000000000000000000000000e3ba087263ab2bf5acbe0", + ), + ( + 194, + "0000000000000000000000000000000000000000000efa194f42d4866a387da0", + ), + ( + 195, + "0000000000000000000000000000000000000000000fc9f11bbc39959b21b000", + ), + ( + 196, + "00000000000000000000000000000000000000000010a60801ccdc23faa49280", + ), + ( + 197, + "00000000000000000000000000000000000000000011ae475d9025c6286edae0", + ), + ( + 198, + "00000000000000000000000000000000000000000012da0d5328636c44f7bb20", + ), + ( + 199, + "00000000000000000000000000000000000000000013fc8a1001c47b4dec7d80", + ), + ( + 200, + "000000000000000000000000000000000000000000152bfd3dacde2eb7fd1260", + ), + ( + 201, + "000000000000000000000000000000000000000000165dec4bf88a5938102cc0", + ), + ( + 202, + "00000000000000000000000000000000000000000017a58ac69e578aeff74d60", + ), + ( + 203, + "00000000000000000000000000000000000000000018ed2050238fb72a6adb60", + ), + ( + 204, + "0000000000000000000000000000000000000000001a514e4f44f2f7b58cce20", + ), + ( + 205, + "0000000000000000000000000000000000000000001bbec2257da9ba542e3dc0", + ), + ( + 206, + "0000000000000000000000000000000000000000001d264026c89fc3a561ff20", + ), + ( + 207, + "0000000000000000000000000000000000000000001ea64c2728a2c3bd62bf20", + ), + ( + 208, + "000000000000000000000000000000000000000000202d9445cb5993709c9940", + ), + ( + 209, + "00000000000000000000000000000000000000000021b50850f1b264bd8ee400", + ), + ( + 210, + "000000000000000000000000000000000000000000232737b9bae704d658e980", + ), + ( + 211, + "00000000000000000000000000000000000000000024b5ca6a95511e529b9a60", + ), + ( + 212, + "000000000000000000000000000000000000000000264a8fdb9737cec5270360", + ), + ( + 213, + "00000000000000000000000000000000000000000027e8a464ee3e6441e33ba0", + ), + ( + 214, + "00000000000000000000000000000000000000000029a2f2ee951b390851d020", + ), + ( + 215, + "0000000000000000000000000000000000000000002b7cf7e446f67a01521e40", + ), + ( + 216, + "0000000000000000000000000000000000000000002d4dfeb582d570cb6ec4c0", + ), + ( + 217, + "0000000000000000000000000000000000000000002f20dbd4bde0279e863f60", + ), + ( + 218, + "00000000000000000000000000000000000000000031258f6adfa6b4147044c0", + ), + ( + 219, + "00000000000000000000000000000000000000000033335d7927c4d1cc706340", + ), + ( + 220, + "000000000000000000000000000000000000000000356c0dc0666c9a25e31d60", + ), + ( + 221, + "00000000000000000000000000000000000000000037b28eb2ad32e4eb0725a0", + ), + ( + 222, + "0000000000000000000000000000000000000000003a1c496adb7a0fa510f440", + ), + ( + 223, + "0000000000000000000000000000000000000000003ceccfe9ad4acc1bac8580", + ), + ( + 224, + "0000000000000000000000000000000000000000003ff2e4225485aa755b79a0", + ), + ( + 225, + "000000000000000000000000000000000000000000431b177600a43a49c8ff20", + ), + ( + 226, + "0000000000000000000000000000000000000000004667f1b695192e96aa5e00", + ), + ( + 227, + "00000000000000000000000000000000000000000049d02ec230291e1ed89fe0", + ), + ( + 228, + "0000000000000000000000000000000000000000004d644cbc7c8dac48b042e0", + ), + ( + 229, + "000000000000000000000000000000000000000000511f3d1a5d2ee6dddf2c60", + ), + ( + 230, + "00000000000000000000000000000000000000000054dc50acc5ee22163a87e0", + ), + ( + 231, + "00000000000000000000000000000000000000000058df0f81b00e65e31d9fc0", + ), + ( + 232, + "0000000000000000000000000000000000000000005d23b986246a80e2a66160", + ), + ( + 233, + "0000000000000000000000000000000000000000006200474547413007eb54e0", + ), + ( + 234, + "0000000000000000000000000000000000000000006719397aeed92cea73c0c0", + ), + ( + 235, + "0000000000000000000000000000000000000000006c2c99cc24de404ac4f6c0", + ), + ( + 236, + "00000000000000000000000000000000000000000071efc0e32e8d53c3437520", + ), + ( + 237, + "000000000000000000000000000000000000000000781907b3b129168140d360", + ), + ( + 238, + "0000000000000000000000000000000000000000007eb5d786594edfb7192580", + ), + ( + 239, + "00000000000000000000000000000000000000000085125dd58b787822420060", + ), + ( + 240, + "0000000000000000000000000000000000000000008bae3f082d510ef55e75a0", + ), + ( + 241, + "00000000000000000000000000000000000000000093956885c724768b3e4220", + ), + ( + 242, + "0000000000000000000000000000000000000000009ba216e9c83e948399d3e0", + ), + ( + 243, + "000000000000000000000000000000000000000000a4347de9712a3d299897c0", + ), + ( + 244, + "000000000000000000000000000000000000000000ae9c5fcf35f61f498146e0", + ), + ( + 245, + "000000000000000000000000000000000000000000b86222fe3501a784060ac0", + ), + ( + 246, + "000000000000000000000000000000000000000000c207f50841bc71fbf34200", + ), + ( + 247, + "000000000000000000000000000000000000000000cd6cfa174358d251800c40", + ), + ( + 248, + "000000000000000000000000000000000000000000dad77213452f0444c351e0", + ), + ( + 249, + "000000000000000000000000000000000000000000e8ac5170255ea89b74f900", + ), + ( + 250, + "000000000000000000000000000000000000000000f8a13b2c589aeeb23ffba0", + ), + ( + 251, + "0000000000000000000000000000000000000000010b46275cd6a0d8d647dcc0", + ), + ( + 252, + "0000000000000000000000000000000000000000011fdd1173e9b175a204fbc0", + ), + ( + 253, + "0000000000000000000000000000000000000000013567509d0940b8bba28240", + ), + ( + 254, + "0000000000000000000000000000000000000000014cf8de771e406fcb574e00", + ), + ( + 255, + "00000000000000000000000000000000000000000165c5ae302bc30be69eb9a0", + ), + ( + 256, + "0000000000000000000000000000000000000000017eeb74084c207738949880", + ), + ( + 257, + "0000000000000000000000000000000000000000019a6b1b59c384990c32ece0", + ), + ( + 258, + "000000000000000000000000000000000000000001b739d4c259343246ef1ee0", + ), + ( + 259, + "000000000000000000000000000000000000000001d4e7eb5fde62f143663aa0", + ), + ( + 260, + "000000000000000000000000000000000000000001f3c1028afece7ae8982120", + ), + ( + 261, + "0000000000000000000000000000000000000000021724227cc1eca0a316fde0", + ), + ( + 262, + "0000000000000000000000000000000000000000023b8214bfc487e587047c20", + ), + ( + 263, + "00000000000000000000000000000000000000000261ecc1d79b256c651d81c0", + ), + ( + 264, + "0000000000000000000000000000000000000000028704359f9c7d6769226240", + ), + ( + 265, + "000000000000000000000000000000000000000002b1a0ea483b571304264320", + ), + ( + 266, + "000000000000000000000000000000000000000002df642ba14be8dd6aa4de80", + ), + ( + 267, + "0000000000000000000000000000000000000000030f9301272cdfb2ac437b80", + ), + ( + 268, + "00000000000000000000000000000000000000000341d93154f4bdb6a5c457a0", + ), + ( + 269, + "00000000000000000000000000000000000000000375140aa6d3469564e40d20", + ), + ( + 270, + "000000000000000000000000000000000000000003aa793e4456d51fee079d20", + ), + ( + 271, + "000000000000000000000000000000000000000003ddeb802da8e6b18d7f2440", + ), + ( + 272, + "00000000000000000000000000000000000000000411609ae24d1bf31e937fa0", + ), + ( + 273, + "0000000000000000000000000000000000000000044107e5ba926026f20196c0", + ), + ( + 274, + "0000000000000000000000000000000000000000046978f859a2d324a4f423a0", + ), + ( + 275, + "0000000000000000000000000000000000000000048e0bf34e00b79c6e0c9cc0", + ), + ( + 276, + "000000000000000000000000000000000000000004b64a09060ec73d90f77520", + ), + ( + 277, + "000000000000000000000000000000000000000004e06ebc5bfb6b016e590e80", + ), + ( + 278, + "0000000000000000000000000000000000000000050a145245ab90a8067ccd40", + ), + ( + 279, + "000000000000000000000000000000000000000005357e89442872e853f88fe0", + ), + ( + 280, + "00000000000000000000000000000000000000000560fbafcacfef7b2141bde0", + ), + ( + 281, + "0000000000000000000000000000000000000000058c736b7d94f11ac4af8820", + ), + ( + 282, + "000000000000000000000000000000000000000005ba243ec2581be932e72bc0", + ), + ( + 283, + "000000000000000000000000000000000000000005e7ee4c12541090941dbe60", + ), + ( + 284, + "000000000000000000000000000000000000000006156f04d90982240b8b39c0", + ), + ( + 285, + "000000000000000000000000000000000000000006456fe96f932a6a69ce1de0", + ), + ( + 286, + "0000000000000000000000000000000000000000067575520b861045f8089f80", + ), + ( + 287, + "000000000000000000000000000000000000000006aae3297a3f9d93d55e9ce0", + ), + ( + 288, + "000000000000000000000000000000000000000006dff4cf1a2437365611d5c0", + ), + ( + 289, + "00000000000000000000000000000000000000000718c9a7d0e51cfd930a0b20", + ), + ( + 290, + "00000000000000000000000000000000000000000759b56bb260925290180080", + ), + ( + 291, + "0000000000000000000000000000000000000000079a44d2dcddadcd50c16380", + ), + ( + 292, + "000000000000000000000000000000000000000007e1c9a6b59653827fadcba0", + ), + ( + 293, + "0000000000000000000000000000000000000000082ab9c86e527cfd7dffdc40", + ), + ( + 294, + "00000000000000000000000000000000000000000877e0fc3b665d3187c61ce0", + ), + ( + 295, + "000000000000000000000000000000000000000008cd0b371205d869e58815e0", + ), + ( + 296, + "000000000000000000000000000000000000000009286f3a6c1469a93569fda0", + ), + ( + 297, + "000000000000000000000000000000000000000009859a773846d18d99e33c40", + ), + ( + 298, + "000000000000000000000000000000000000000009e7aabe3dbb65d04b436960", + ), + ( + 299, + "00000000000000000000000000000000000000000a42c5c116143a6675fe4c40", + ), + ( + 300, + "00000000000000000000000000000000000000000a9fb114d65d94f00168f6a0", + ), + ( + 301, + "00000000000000000000000000000000000000000afbeba9e7b19fc8c09584c0", + ), + ( + 302, + "00000000000000000000000000000000000000000b58a9ce9935920487232a80", + ), + ( + 303, + "00000000000000000000000000000000000000000bbb7ed558b66d4d2e1c0d60", + ), + ( + 304, + "00000000000000000000000000000000000000000c255453c47c551c36aa3540", + ), + ( + 305, + "00000000000000000000000000000000000000000c941a7dca358fb9521e03e0", + ), + ( + 306, + "00000000000000000000000000000000000000000d037486edebab30d3c6c0e0", + ), + ( + 307, + "00000000000000000000000000000000000000000d7260db2663c608c7f7a7c0", + ), + ( + 308, + "00000000000000000000000000000000000000000de8efca09e80642843d4be0", + ), + ( + 309, + "00000000000000000000000000000000000000000e4c955dbc140174f247f260", + ), + ( + 310, + "00000000000000000000000000000000000000000eb5fabb18e954747a74b000", + ), + ( + 311, + "00000000000000000000000000000000000000000f284806995597f3cd0bfd80", + ), + ( + 312, + "00000000000000000000000000000000000000000f9ba14e6a962918bf2127c0", + ), + ( + 313, + "000000000000000000000000000000000000000010080df526f4ff21960f1a40", + ), + ( + 314, + "0000000000000000000000000000000000000000106a692d441a6aad1cace9e0", + ), + ( + 315, + "000000000000000000000000000000000000000010db77996e285750cd8b9c80", + ), + ( + 316, + "0000000000000000000000000000000000000000114c850e564cfaa41534e5a0", + ), + ( + 317, + "000000000000000000000000000000000000000011c8c20e26e90338310d8b40", + ), + ( + 318, + "000000000000000000000000000000000000000012416d3a1b42c5f9e33e93e0", + ), + ( + 319, + "000000000000000000000000000000000000000012bad0326db68dfbabe3a0a0", + ), + ( + 320, + "0000000000000000000000000000000000000000133891fe722cd8f8d46b71e0", + ), + ( + 321, + "000000000000000000000000000000000000000013b4cf153adbbd38ac356200", + ), + ( + 322, + "0000000000000000000000000000000000000000143f25d8093643758a467060", + ), + ( + 323, + "000000000000000000000000000000000000000014c95e395adc5f2947c38f60", + ), + ( + 324, + "0000000000000000000000000000000000000000155898b9afe71b5bc6234aa0", + ), + ( + 325, + "000000000000000000000000000000000000000015d0d64858237f52b67fbd80", + ), + ( + 326, + "0000000000000000000000000000000000000000164edf3c9afdff38aca62a40", + ), + ( + 327, + "000000000000000000000000000000000000000016d815351bf2448270b26bc0", + ), + ( + 328, + "0000000000000000000000000000000000000000175dce415adf182a317efee0", + ), + ( + 329, + "000000000000000000000000000000000000000017e305e5e5ebe1b42a2283c0", + ), + ( + 330, + "000000000000000000000000000000000000000018769f070c2824c962eee160", + ), + ( + 331, + "0000000000000000000000000000000000000000190bc46a36b8e7956e861a40", + ), + ( + 332, + "000000000000000000000000000000000000000019a549dd7f975730622fb1e0", + ), + ( + 333, + "00000000000000000000000000000000000000001a40e2926d569536de587200", + ), + ( + 334, + "00000000000000000000000000000000000000001ada8179bddb7efbe43bb160", + ), + ( + 335, + "00000000000000000000000000000000000000001b771d7dcf1fac50373e6440", + ), + ( + 336, + "00000000000000000000000000000000000000001c1cd59725d81e6e21d5cae0", + ), + ( + 337, + "00000000000000000000000000000000000000001cc5bcc99d2c4a90357ad360", + ), + ( + 338, + "00000000000000000000000000000000000000001d595888caa6d458e6efa260", + ), + ( + 339, + "00000000000000000000000000000000000000001e0cbd014668d1e4d9ba8e40", + ), + ( + 340, + "00000000000000000000000000000000000000001ea37d7a3f2552a2f909f620", + ), + ( + 341, + "00000000000000000000000000000000000000001f3241a19347d4dd02c83d00", + ), + ( + 342, + "00000000000000000000000000000000000000001f99213be9ee53bddf9ee5c0", + ), + ( + 343, + "00000000000000000000000000000000000000001ffb0ee21327a85c0b6cd6c0", + ), + ( + 344, + "00000000000000000000000000000000000000002062e31d9a89a510058f0680", + ), + ( + 345, + "000000000000000000000000000000000000000020d24e4a9d0743295882b380", + ), + ( + 346, + "0000000000000000000000000000000000000000215078acda07c153babfb140", + ), + ( + 347, + "000000000000000000000000000000000000000021d45e243085daf592b0bbe0", + ), + ( + 348, + "0000000000000000000000000000000000000000225c6fa2067f24b11235b6a0", + ), + ( + 349, + "000000000000000000000000000000000000000022eaeae8d7274e795d554f80", + ), + ( + 350, + "0000000000000000000000000000000000000000237ac17de8cd15067a6a0fc0", + ), + ( + 351, + "00000000000000000000000000000000000000002415e366c94c1e34c7f72b20", + ), + ( + 352, + "000000000000000000000000000000000000000024b84a0606d0a6eff7d24240", + ), + ( + 353, + "000000000000000000000000000000000000000025584400aa7a24ab60f95da0", + ), + ( + 354, + "000000000000000000000000000000000000000026058fbce96b8fd898fb4440", + ), + ( + 355, + "000000000000000000000000000000000000000026b368bd9b25fad76f8f80e0", + ), + ( + 356, + "00000000000000000000000000000000000000002761f842fb541ec705fbab80", + ), + ( + 357, + "00000000000000000000000000000000000000002820cc635abe2ef6bb03bfa0", + ), + ( + 358, + "000000000000000000000000000000000000000028dff750d76099ef8067b5e0", + ), + ( + 359, + "000000000000000000000000000000000000000029a847072a5004727d7bb6c0", + ), + ( + 360, + "00000000000000000000000000000000000000002a6d9a7894891e6c8d042a60", + ), + ( + 361, + "00000000000000000000000000000000000000002b323ae9b7f6eaaec69c08a0", + ), + ( + 362, + "00000000000000000000000000000000000000002bfefb71afd13545bc2444e0", + ), + ( + 363, + "00000000000000000000000000000000000000002cc925a3aa907374b5f0b6c0", + ), + ( + 364, + "00000000000000000000000000000000000000002d9e8bc0134624c9d3ce88c0", + ), + ( + 365, + "00000000000000000000000000000000000000002e7e60cf6c8d3d2643d9ed00", + ), + ]; + + let spv_client = + SpvClient::new(db_path, 0, None, BitcoinNetworkType::Mainnet, false, false).unwrap(); + for (interval, work_str) in chain_work.iter() { + let calculated_work = spv_client.find_interval_work(*interval).unwrap().unwrap(); + let expected_work = Uint256::from_hex_be(work_str).unwrap(); + assert_eq!(calculated_work, expected_work); + } } #[test] @@ -1473,8 +3021,6 @@ mod test { // reorg is ignored assert_eq!(new_tip, 40321); - let hdr = spv_client.read_block_header(new_tip - 1).unwrap().unwrap(); - eprintln!("{}", &hdr.header.bitcoin_hash()); let total_work_after = spv_client.update_chain_work().unwrap(); assert_eq!(total_work_after, total_work_before); } @@ -1635,7 +3181,9 @@ mod test { // chain reorg detected! assert_eq!(new_tip, 40318); + + // total work increased let total_work_after = spv_client.update_chain_work().unwrap(); - assert_eq!(total_work_after, total_work_before); + assert!(total_work_after > total_work_before); } } From fcbc6606083d6cebef3083d4cbb4322ee02ec674 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 13:56:55 -0400 Subject: [PATCH 08/20] chore: s/InvalidDifficulty/InvalidChainWork/g --- src/burnchains/bitcoin/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/burnchains/bitcoin/mod.rs b/src/burnchains/bitcoin/mod.rs index c9b7ab5d3..3822ea0de 100644 --- a/src/burnchains/bitcoin/mod.rs +++ b/src/burnchains/bitcoin/mod.rs @@ -77,10 +77,10 @@ pub enum Error { NoncontiguousHeader, /// Missing header MissingHeader, - /// Invalid target + /// Invalid header proof-of-work (i.e. due to a bad timestamp or a bad `bits` field) InvalidPoW, - /// Bad difficulty - InvalidDifficulty, + /// Chainwork would decrease by including a given header + InvalidChainWork, /// Wrong number of bytes for constructing an address InvalidByteSequence, /// Configuration error @@ -109,7 +109,7 @@ impl fmt::Display for Error { Error::NoncontiguousHeader => write!(f, "Non-contiguous header"), Error::MissingHeader => write!(f, "Missing header"), Error::InvalidPoW => write!(f, "Invalid proof of work"), - Error::InvalidDifficulty => write!(f, "Chain difficulty cannot decrease"), + Error::InvalidChainWork => write!(f, "Chain difficulty cannot decrease"), Error::InvalidByteSequence => write!(f, "Invalid sequence of bytes"), Error::ConfigError(ref e_str) => fmt::Display::fmt(e_str, f), Error::BlockchainHeight => write!(f, "Value is beyond the end of the blockchain"), @@ -136,7 +136,7 @@ impl error::Error for Error { Error::NoncontiguousHeader => None, Error::MissingHeader => None, Error::InvalidPoW => None, - Error::InvalidDifficulty => None, + Error::InvalidChainWork => None, Error::InvalidByteSequence => None, Error::ConfigError(ref _e_str) => None, Error::BlockchainHeight => None, From 4721f670789bbe35ae16b183c6d2ac52798f28b2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 13:57:14 -0400 Subject: [PATCH 09/20] fix: use the correct chainwork calculation by summing over individual headers --- src/burnchains/bitcoin/spv.rs | 220 ++++++++++------------------------ 1 file changed, 66 insertions(+), 154 deletions(-) diff --git a/src/burnchains/bitcoin/spv.rs b/src/burnchains/bitcoin/spv.rs index c86d8b8df..fa21c5559 100644 --- a/src/burnchains/bitcoin/spv.rs +++ b/src/burnchains/bitcoin/spv.rs @@ -85,6 +85,10 @@ const SPV_INITIAL_SCHEMA: &[&'static str] = &[ "CREATE TABLE db_config(version TEXT NOT NULL);", ]; +// store the running chain work totals for each difficulty interval. +// unlike the `headers` table, this table will never be deleted from, since we use it to determine +// whether or not newly-arrived headers represent a better chain than the best-known chain. The +// only way to _replace_ a row is to find a header difficulty interval with a _higher_ work score. const SPV_SCHEMA_2: &[&'static str] = &[r#" CREATE TABLE chain_work( interval INTEGER PRIMARY KEY, @@ -101,9 +105,6 @@ pub struct SpvClient { readwrite: bool, reverse_order: bool, headers_db: DBConn, - - // only writeable in #[cfg(test)] - ignore_work_checks: bool, } impl FromColumn for Sha256dHash { @@ -151,7 +152,6 @@ impl SpvClient { readwrite: readwrite, reverse_order: reverse_order, headers_db: conn, - ignore_work_checks: false, }; if readwrite { @@ -180,7 +180,6 @@ impl SpvClient { readwrite: readwrite, reverse_order: reverse_order, headers_db: conn, - ignore_work_checks: true, }; if readwrite { @@ -190,11 +189,6 @@ impl SpvClient { Ok(client) } - #[cfg(test)] - pub fn set_ignore_work_checks(&mut self, ignore: bool) { - self.ignore_work_checks = ignore; - } - pub fn conn(&self) -> &DBConn { &self.headers_db } @@ -336,77 +330,13 @@ impl SpvClient { indexer.peer_communicate(self, true) } - /// Calculate the work of a single header given the first and last header in the interval - fn get_expected_work_in_range( - first_header: &LoneBlockHeader, - last_header: &LoneBlockHeader, - ) -> Uint256 { - let (_, target) = SpvClient::get_target_between_headers(&first_header, &last_header); - let work = - (Uint256::max() - target) / (target + Uint256::from_u64(1)) + Uint256::from_u64(1); - test_debug!("{}, {}", &work, &target); - work - } - - /// Calculate the total work over a full interval of headers. - fn get_full_interval_work(interval_headers: &Vec) -> Uint256 { - assert_eq!(interval_headers.len() as u64, BLOCK_DIFFICULTY_CHUNK_SIZE); - let first_header = interval_headers - .first() - .expect("FATAL: no first header in non-empty list of headers"); - let last_header = interval_headers - .last() - .expect("FATAL: no last header in non-empty list of headers"); - SpvClient::get_expected_work_in_range(first_header, last_header) - * Uint256::from_u64(BLOCK_DIFFICULTY_CHUNK_SIZE) - } - - /// Calculate a partial interval's work, given the last full interval before it - fn get_partial_interval_work( - &self, - last_full_interval: u64, - partial_interval_len: usize, - ) -> Result, btc_error> { - let last_interval_work = self.get_interval_header_work(last_full_interval)?; - if let Some(last_interval_work) = last_interval_work { - let work = last_interval_work * Uint256::from_u64(partial_interval_len as u64); - Ok(Some(work)) - } else { - Ok(None) + /// Calculate the total work over a given interval of headers. + fn get_interval_work(interval_headers: &Vec) -> Uint256 { + let mut work = Uint256::from_u64(0); + for hdr in interval_headers.iter() { + work = work + hdr.header.work(); } - } - - /// Calculate the work done by a single header in `interval`, if we have the headers for that - /// interval - pub fn get_interval_header_work(&self, interval: u64) -> Result, btc_error> { - let first_header = - match self.read_block_header((interval - 1) * BLOCK_DIFFICULTY_CHUNK_SIZE)? { - Some(res) => res, - None => { - test_debug!( - "No header at height {}", - (interval - 1) * BLOCK_DIFFICULTY_CHUNK_SIZE - ); - return Ok(None); - } - }; - - let last_header = - match self.read_block_header(interval * BLOCK_DIFFICULTY_CHUNK_SIZE - 1)? { - Some(res) => res, - None => { - test_debug!( - "No header at height {}", - interval * BLOCK_DIFFICULTY_CHUNK_SIZE - 1 - ); - return Ok(None); - } - }; - - Ok(Some(SpvClient::get_expected_work_in_range( - &first_header, - &last_header, - ))) + work } /// Find the highest interval for which we have a chain work score. @@ -436,7 +366,7 @@ impl SpvClient { ) .optional() .map_err(db_error::SqliteError)?; - Ok(work_hex.map(|x| Uint256::from_hex_le(&x).expect("FATAL: work is not a uint256"))) + Ok(work_hex.map(|x| Uint256::from_hex_be(&x).expect("FATAL: work is not a uint256"))) } /// Store an interval's running total work. @@ -444,17 +374,17 @@ impl SpvClient { /// currently-stored interval. pub fn store_interval_work(&mut self, interval: u64, work: Uint256) -> Result<(), btc_error> { if let Some(cur_work) = self.find_interval_work(interval)? { - if cur_work > work && !self.ignore_work_checks { + if cur_work > work { error!( "Tried to store work {} to interval {}, which has work {} already", work, interval, cur_work ); - return Err(btc_error::InvalidDifficulty); + return Err(btc_error::InvalidChainWork); } } let tx = self.tx_begin()?; - let args: &[&dyn ToSql] = &[&u64_to_sql(interval)?, &work.to_hex_le()]; + let args: &[&dyn ToSql] = &[&u64_to_sql(interval)?, &work.to_hex_be()]; tx.execute( "INSERT OR REPLACE INTO chain_work (interval,work) VALUES (?1,?2)", args, @@ -466,7 +396,8 @@ impl SpvClient { } /// Update the total chain work table up to a given interval (even if partial). - /// Returns the total work + /// This method is idempotent. + /// Returns the total work. pub fn update_chain_work(&mut self) -> Result { let highest_interval = self.find_highest_work_score_interval()?; let mut work_so_far = if highest_interval > 0 { @@ -478,7 +409,7 @@ impl SpvClient { let last_interval = self.get_headers_height()? / BLOCK_DIFFICULTY_CHUNK_SIZE + 1; - debug!( + test_debug!( "Highest work-calculation interval is {} (height {}), work {}; update to {}", highest_interval, highest_interval * BLOCK_DIFFICULTY_CHUNK_SIZE, @@ -491,33 +422,16 @@ impl SpvClient { (interval - 1) * BLOCK_DIFFICULTY_CHUNK_SIZE, interval * BLOCK_DIFFICULTY_CHUNK_SIZE, )?; - let interval_work = if interval_headers.len() == BLOCK_DIFFICULTY_CHUNK_SIZE as usize { - // full interval - let work = SpvClient::get_full_interval_work(&interval_headers); - work_so_far = work_so_far + work; + let interval_work = SpvClient::get_interval_work(&interval_headers); + work_so_far = work_so_far + interval_work; + + if interval_headers.len() == BLOCK_DIFFICULTY_CHUNK_SIZE as usize { self.store_interval_work(interval - 1, work_so_far)?; - work } else { - // partial (and last) interval - let work = if interval > 2 { - let work = self - .get_partial_interval_work(interval - 2, interval_headers.len())? - .expect(&format!( - "FATAL: do not have work score for interval {}", - interval - 2 - )); - - work_so_far = work_so_far + work; - work - } else { - Uint256::from_u64(0) - }; - partial = true; - work - }; + } - debug!( + test_debug!( "Chain work in {} interval {} ({}-{}) is {}, total is {}", if partial { "partial" } else { "full" }, interval - 1, @@ -538,27 +452,30 @@ impl SpvClient { /// You will have needed to call update_chain_work() prior to this after inserting new headers. pub fn get_chain_work(&self) -> Result { let highest_full_interval = self.find_highest_work_score_interval()?; - if highest_full_interval == 0 { - return Ok(Uint256::from_u64(0)); - } + let highest_interval_work = if highest_full_interval == 0 { + Uint256::from_u64(0) + } else { + self.find_interval_work(highest_full_interval)? + .expect("FATAL: have interval but no work") + }; - let highest_interval_work = self - .find_interval_work(highest_full_interval)? - .expect("FATAL: have interval but no work"); + let partial_interval = if highest_full_interval == 0 { + 0 + } else { + highest_full_interval + 1 + }; - let partial_interval = highest_full_interval + 1; let partial_interval_headers = self.read_block_headers( partial_interval * BLOCK_DIFFICULTY_CHUNK_SIZE, (partial_interval + 1) * BLOCK_DIFFICULTY_CHUNK_SIZE, )?; - assert!(partial_interval_headers.len() < BLOCK_DIFFICULTY_CHUNK_SIZE as usize); + assert!( + partial_interval_headers.len() < BLOCK_DIFFICULTY_CHUNK_SIZE as usize, + "interval {} is not partial", + partial_interval + ); - let partial_interval_work = self - .get_partial_interval_work(highest_full_interval, partial_interval_headers.len())? - .expect(&format!( - "FATAL: no work score for interval {}", - highest_full_interval - )); + let partial_interval_work = SpvClient::get_interval_work(&partial_interval_headers); debug!("Chain work: highest work-calculated interval is {} with total work {} partial {} ({} headers)", &highest_full_interval, &highest_interval_work, &partial_interval_work, partial_interval_headers.len()); Ok(highest_interval_work + partial_interval_work) @@ -635,9 +552,9 @@ impl SpvClient { past_11_headers.iter().map(|hdr| hdr.header.time).collect(); past_timestamps.sort(); - if header_i.time < past_timestamps[5] { + if header_i.time <= past_timestamps[5] { error!( - "Block {} timestamp {} < {} (median of {:?})", + "Block {} timestamp {} <= {} (median of {:?})", block_height, header_i.time, past_timestamps[5], &past_timestamps ); return Err(btc_error::InvalidPoW); @@ -840,10 +757,11 @@ impl SpvClient { let num_headers = block_headers.len(); let first_header_hash = block_headers[0].header.bitcoin_hash(); let last_header_hash = block_headers[block_headers.len() - 1].header.bitcoin_hash(); - let total_work_before = self.get_chain_work()?; + let total_work_before = self.update_chain_work()?; if !self.reverse_order { - // fetching headers in ascending order + // fetching headers in ascending order, so verify that the first item in + // `block_headers` connects to a parent in the DB (if it has one) self.insert_block_headers_after(insert_height, block_headers) .map_err(|e| { error!("Failed to insert block headers: {:?}", &e); @@ -864,7 +782,8 @@ impl SpvClient { e })?; } else { - // fetching headers in descending order + // fetching headers in descending order, so verify that the last item in + // `block_headers` connects to a child in the DB (if it has one) self.insert_block_headers_before(insert_height, block_headers) .map_err(|e| { error!("Failed to insert block headers: {:?}", &e); @@ -895,7 +814,7 @@ impl SpvClient { "New headers represent less work than the old headers ({} < {})", total_work_before, total_work_after ); - return Err(btc_error::InvalidDifficulty); + return Err(btc_error::InvalidChainWork); } debug!( @@ -932,6 +851,15 @@ impl SpvClient { Ok(()) } + #[cfg(test)] + pub fn test_write_block_headers( + &mut self, + height: u64, + headers: Vec, + ) -> Result<(), btc_error> { + self.write_block_headers(height, headers) + } + /// Insert block headers into the headers DB. /// Verify that the first header's parent exists and connects with this header chain, and verify that /// the headers are themselves contiguous. @@ -1033,23 +961,6 @@ impl SpvClient { } } - match self.read_block_header(start_height)? { - Some(parent_header) => { - // contiguous? - if block_headers[0].header.prev_blockhash != parent_header.header.bitcoin_hash() { - warn!("Received discontiguous headers at height {}: we have parent {:?} ({}), but were given {:?} ({})", - start_height, &parent_header.header, parent_header.header.bitcoin_hash(), &block_headers[0].header, &block_headers[0].header.bitcoin_hash()); - return Err(btc_error::NoncontiguousHeader); - } - } - None => { - debug!( - "No header for parent block {}, so will not validate continuity", - start_height - 1 - ); - } - } - // store them self.write_block_headers(start_height + 1, block_headers) } @@ -1650,15 +1561,16 @@ mod test { assert_eq!(spv_client.read_block_headers(0, 10).unwrap(), all_headers); - // should fail - if let Err(btc_error::NoncontiguousHeader) = - spv_client.insert_block_headers_before(2, headers.clone()) - { - } else { - assert!(false); - } + // should succeed, since we only check that the last header connects + // to its child, if the child is stored at all + spv_client + .insert_block_headers_before(1, headers.clone()) + .unwrap(); + spv_client + .insert_block_headers_before(2, headers.clone()) + .unwrap(); - // should fail + // should fail now, since there's a child to check if let Err(btc_error::NoncontiguousHeader) = spv_client.insert_block_headers_before(1, headers.clone()) { From 6c9221aa7af4fb7ac5b26192a143128b91d68430 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 13:57:39 -0400 Subject: [PATCH 10/20] fix: update sync_with_indexer() to assume that find_chain_reorg() does the bookkeeping on the SPV DB, so it doesn't have to --- src/burnchains/burnchain.rs | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/src/burnchains/burnchain.rs b/src/burnchains/burnchain.rs index 0b58d02d9..d29740f36 100644 --- a/src/burnchains/burnchain.rs +++ b/src/burnchains/burnchain.rs @@ -980,27 +980,24 @@ impl Burnchain { if sync_height + 1 < orig_header_height { // a reorg happened warn!( - "Dropping headers higher than {} due to burnchain reorg", + "Dropped headers higher than {} due to burnchain reorg", sync_height ); - indexer.drop_headers(sync_height)?; } // get latest headers. - debug!("Sync headers from {}", sync_height); + let highest_header = indexer.get_highest_header_height()?; - let end_block = indexer.sync_headers(sync_height, None)?; - let mut start_block = match sync_height { - 0 => 0, - _ => sync_height, - }; + debug!("Sync headers from {}", highest_header); + let end_block = indexer.sync_headers(highest_header, None)?; + let mut start_block = sync_height; if db_height < start_block { start_block = db_height; } debug!( "Sync'ed headers from {} to {}. DB at {}", - start_block, end_block, db_height + highest_header, end_block, db_height ); if start_block == db_height && db_height == end_block { // all caught up @@ -1218,22 +1215,22 @@ impl Burnchain { let db_height = burn_chain_tip.block_height; - // handle reorgs + // handle reorgs (which also updates our best-known chain work and headers DB) let (sync_height, did_reorg) = Burnchain::sync_reorg(indexer)?; if did_reorg { // a reorg happened warn!( - "Dropping headers higher than {} due to burnchain reorg", + "Dropped headers higher than {} due to burnchain reorg", sync_height ); - indexer.drop_headers(sync_height)?; } // get latest headers. debug!("Sync headers from {}", sync_height); - // fetch all headers, no matter what - let mut end_block = indexer.sync_headers(sync_height, None)?; + // fetch all new headers + let highest_header = indexer.get_highest_header_height()?; + let mut end_block = indexer.sync_headers(highest_header, None)?; if did_reorg && sync_height > 0 { // a reorg happened, and the last header fetched // is on a smaller fork than the one we just @@ -1258,7 +1255,7 @@ impl Burnchain { debug!( "Sync'ed headers from {} to {}. DB at {}", - sync_height, end_block, db_height + highest_header, end_block, db_height ); if let Some(target_block_height) = target_block_height_opt { From 7670d02acde965a01f43e0eea5f8e23177b11b09 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 13:58:12 -0400 Subject: [PATCH 11/20] chore: add big-endian code for uint256, which is easier to read --- stacks-common/src/util/uint.rs | 46 +++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/stacks-common/src/util/uint.rs b/stacks-common/src/util/uint.rs index bcf69dfe1..a448cc0e1 100644 --- a/stacks-common/src/util/uint.rs +++ b/stacks-common/src/util/uint.rs @@ -131,7 +131,7 @@ macro_rules! construct_uint { $name(ret) } - /// as byte array + /// as litte-endian byte array pub fn to_u8_slice(&self) -> [u8; $n_words * 8] { let mut ret = [0u8; $n_words * 8]; for i in 0..$n_words { @@ -143,6 +143,18 @@ macro_rules! construct_uint { ret } + /// as big-endian byte array + pub fn to_u8_slice_be(&self) -> [u8; $n_words * 8] { + let mut ret = [0u8; $n_words * 8]; + for i in 0..$n_words { + let bytes = self.0[i].to_le_bytes(); + for j in 0..bytes.len() { + ret[$n_words * 8 - 1 - (i * 8 + j)] = bytes[j]; + } + } + ret + } + /// from a little-endian hex string /// padding is expected pub fn from_hex_le(hex: &str) -> Option<$name> { @@ -167,6 +179,31 @@ macro_rules! construct_uint { pub fn to_hex_le(&self) -> String { to_hex(&self.to_u8_slice()) } + + /// from a big-endian hex string + /// padding is expected + pub fn from_hex_be(hex: &str) -> Option<$name> { + let bytes = hex_bytes(hex).ok()?; + if bytes.len() % 8 != 0 { + return None; + } + if bytes.len() / 8 != $n_words { + return None; + } + let mut ret = [0u64; $n_words]; + for i in 0..(bytes.len() / 8) { + let mut next_bytes = [0u8; 8]; + next_bytes.copy_from_slice(&bytes[8 * i..(8 * (i + 1))]); + let next = u64::from_be_bytes(next_bytes); + ret[(bytes.len() / 8) - 1 - i] = next; + } + Some($name(ret)) + } + + /// to a big-endian hex string + pub fn to_hex_be(&self) -> String { + to_hex(&self.to_u8_slice_be()) + } } impl ::std::ops::Add<$name> for $name { @@ -701,10 +738,17 @@ mod tests { #[test] pub fn hex_codec() { let init = Uint256::from_u64(0xDEADBEEFDEADBEEF); + // little-endian representation let hex_init = "efbeaddeefbeadde000000000000000000000000000000000000000000000000"; assert_eq!(Uint256::from_hex_le(&hex_init).unwrap(), init); assert_eq!(&init.to_hex_le(), hex_init); assert_eq!(Uint256::from_hex_le(&init.to_hex_le()).unwrap(), init); + + // big-endian representation + let hex_init = "000000000000000000000000000000000000000000000000deadbeefdeadbeef"; + assert_eq!(Uint256::from_hex_be(&hex_init).unwrap(), init); + assert_eq!(&init.to_hex_be(), hex_init); + assert_eq!(Uint256::from_hex_be(&init.to_hex_be()).unwrap(), init); } } From 437381e339854956aeb0d6fec4374ce32bb1f7cb Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 15:43:56 -0400 Subject: [PATCH 12/20] fix: test that the SPV chain tip is within 2 hours of now --- src/burnchains/bitcoin/indexer.rs | 136 +++++++++++++++++++++++++++++- 1 file changed, 134 insertions(+), 2 deletions(-) diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index c35d9616b..2eab6170e 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -46,6 +46,7 @@ use stacks_common::deps_common::bitcoin::blockdata::block::LoneBlockHeader; use stacks_common::deps_common::bitcoin::network::message::NetworkMessage; use stacks_common::deps_common::bitcoin::network::serialize::BitcoinHash; use stacks_common::deps_common::bitcoin::network::serialize::Error as btc_serialization_err; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::log; use crate::core::{ @@ -762,6 +763,35 @@ impl BitcoinIndexer { Ok(new_tip) } + + /// Verify that the last block header we have is within 2 hours of now. + /// Return burnchain_error::TrySyncAgain if not, and delete the offending header + pub fn check_chain_tip_timestamp(&mut self) -> Result<(), burnchain_error> { + // if there was no target block height, then verify that the highest header fetched is within + // 2 hours of now. Remove headers that don't meet this criterion. + let highest_header_height = self.get_highest_header_height()?; + if highest_header_height == 0 { + return Err(burnchain_error::TrySyncAgain); + } + + let highest_header = self + .read_headers(highest_header_height, highest_header_height + 1)? + .pop() + .expect("FATAL: no header at highest known height"); + let now = get_epoch_time_secs(); + if now - 2 * 60 * 60 <= (highest_header.block_header.header.time as u64) + && (highest_header.block_header.header.time as u64) <= now + 2 * 60 * 60 + { + // we're good + return Ok(()); + } + warn!( + "Header at height {} is not wihtin 2 hours of now (is at {})", + highest_header_height, highest_header.block_header.header.time + ); + self.drop_headers(highest_header_height.saturating_sub(1))?; + return Err(burnchain_error::TrySyncAgain); + } } impl Drop for BitcoinIndexer { @@ -942,11 +972,18 @@ impl BurnchainIndexer for BitcoinIndexer { return Ok(end_height.unwrap()); } - self.sync_last_headers(start_height, end_height) + let new_height = self + .sync_last_headers(start_height, end_height) .map_err(|e| match e { btc_error::TimedOut => burnchain_error::TrySyncAgain, x => burnchain_error::Bitcoin(x), - }) + })?; + + // make sure the headers are up-to-date if we have no target height + if end_height.is_none() { + self.check_chain_tip_timestamp()?; + } + Ok(new_height) } /// Drop headers after a given height -- i.e. to accomodate a reorg @@ -988,6 +1025,7 @@ mod test { deserialize, serialize, BitcoinHash, }; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; + use stacks_common::util::get_epoch_time_secs; use stacks_common::util::uint::Uint256; use std::env; @@ -3186,4 +3224,98 @@ mod test { let total_work_after = spv_client.update_chain_work().unwrap(); assert!(total_work_after > total_work_before); } + + #[test] + fn test_check_header_timestamp() { + let db_path = "/tmp/test-indexer-check-header-timestamp.dat"; + + if fs::metadata(db_path).is_ok() { + fs::remove_file(db_path).unwrap(); + } + + let headers = vec![ + LoneBlockHeader { + header: BlockHeader { + bits: 545259519, + merkle_root: Sha256dHash::from_hex( + "20bee96458517fc5082a9720ce6207b5742f2b18e4e0a7e7373342725d80f88c", + ) + .unwrap(), + nonce: 2, + prev_blockhash: Sha256dHash::from_hex( + "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206", + ) + .unwrap(), + time: (get_epoch_time_secs() - 1) as u32, + version: 0x20000000, + }, + tx_count: VarInt(0), + }, + LoneBlockHeader { + header: BlockHeader { + bits: 545259519, + merkle_root: Sha256dHash::from_hex( + "39d1a6f1ee7a5903797f92ec89e4c58549013f38114186fc2eb6e5218cb2d0ac", + ) + .unwrap(), + nonce: 1, + prev_blockhash: Sha256dHash::from_hex( + "606d31daaaa5919f3720d8440dd99d31f2a4e4189c65879f19ae43268425e74b", + ) + .unwrap(), + time: (get_epoch_time_secs() - 1) as u32, + version: 0x20000000, + }, + tx_count: VarInt(0), + }, + LoneBlockHeader { + header: BlockHeader { + bits: 545259519, + merkle_root: Sha256dHash::from_hex( + "a7e04ed25f589938eb5627abb7b5913dd77b8955bcdf72d7f111d0a71e346e47", + ) + .unwrap(), + nonce: 4, + prev_blockhash: Sha256dHash::from_hex( + "2fa2f451ac27f0e5cd3760ba6cdf34ef46adb76a44d96bc0f3bf3e713dd955f0", + ) + .unwrap(), + time: 1587626882, + version: 0x20000000, + }, + tx_count: VarInt(0), + }, + ]; + + // set up SPV client so we don't have chain work at first + let mut spv_client = SpvClient::new_without_migration( + &db_path, + 0, + None, + BitcoinNetworkType::Regtest, + true, + false, + ) + .unwrap(); + + spv_client + .test_write_block_headers(0, headers.clone()) + .unwrap(); + assert_eq!(spv_client.get_highest_header_height().unwrap(), 2); + + let mut indexer = BitcoinIndexer::new( + BitcoinIndexerConfig::test_default(db_path.to_string()), + BitcoinIndexerRuntime::new(BitcoinNetworkType::Regtest), + ); + + if let Err(burnchain_error::TrySyncAgain) = indexer.check_chain_tip_timestamp() { + } else { + panic!("stale tip not detected"); + } + + // peeled + assert_eq!(spv_client.get_highest_header_height().unwrap(), 1); + assert!(indexer.check_chain_tip_timestamp().is_ok()); + assert_eq!(spv_client.get_highest_header_height().unwrap(), 1); + } } From debf40c35cc27ce14715afe8db968a844125b005 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 15:44:34 -0400 Subject: [PATCH 13/20] refactor: better name --- src/burnchains/burnchain.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/burnchains/burnchain.rs b/src/burnchains/burnchain.rs index d29740f36..20c7500b5 100644 --- a/src/burnchains/burnchain.rs +++ b/src/burnchains/burnchain.rs @@ -1229,8 +1229,8 @@ impl Burnchain { debug!("Sync headers from {}", sync_height); // fetch all new headers - let highest_header = indexer.get_highest_header_height()?; - let mut end_block = indexer.sync_headers(highest_header, None)?; + let highest_header_height = indexer.get_highest_header_height()?; + let mut end_block = indexer.sync_headers(highest_header_height, None)?; if did_reorg && sync_height > 0 { // a reorg happened, and the last header fetched // is on a smaller fork than the one we just @@ -1255,7 +1255,7 @@ impl Burnchain { debug!( "Sync'ed headers from {} to {}. DB at {}", - highest_header, end_block, db_height + highest_header_height, end_block, db_height ); if let Some(target_block_height) = target_block_height_opt { From 1cc98b552f8d2ed00e4e6d36b2fa7271b927edaa Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 26 May 2022 21:35:12 -0400 Subject: [PATCH 14/20] fix: prime the .reorg DB with the parent of the first header we expect to download (off-by-one error) --- src/burnchains/bitcoin/indexer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index 2eab6170e..b703a66e4 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -456,9 +456,9 @@ impl BitcoinIndexer { let interval_start_block = start_block / BLOCK_DIFFICULTY_CHUNK_SIZE - 2; let base_block = interval_start_block * BLOCK_DIFFICULTY_CHUNK_SIZE; let interval_headers = - canonical_spv_client.read_block_headers(base_block, start_block)?; + canonical_spv_client.read_block_headers(base_block, start_block + 1)?; assert!( - interval_headers.len() == (start_block - base_block) as usize, + interval_headers.len() >= (start_block - base_block) as usize, "BUG: missing headers for {}-{}", base_block, start_block From 9f510fd59432467aec2c2cef715c02d2231fb61d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 28 May 2022 22:21:03 -0400 Subject: [PATCH 15/20] fix: use .saturating_sub() when determining the header range to copy over to the .reorg DB, and use K/V logging. Also, add multi-word test vectors to hex codec for uint256 --- src/burnchains/bitcoin/indexer.rs | 4 ++-- stacks-common/src/util/uint.rs | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index b703a66e4..f6aef4b82 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -453,7 +453,7 @@ impl BitcoinIndexer { // * needs the last difficulty interval of headers (note that the current // interval is `start_block / BLOCK_DIFFICULTY_CHUNK_SIZE - 1). // * needs the last interval's chain work calculation - let interval_start_block = start_block / BLOCK_DIFFICULTY_CHUNK_SIZE - 2; + let interval_start_block = (start_block / BLOCK_DIFFICULTY_CHUNK_SIZE).saturating_sub(2); let base_block = interval_start_block * BLOCK_DIFFICULTY_CHUNK_SIZE; let interval_headers = canonical_spv_client.read_block_headers(base_block, start_block + 1)?; @@ -703,7 +703,7 @@ impl BitcoinIndexer { let reorg_total_work = reorg_spv_client.update_chain_work()?; let orig_total_work = orig_spv_client.get_chain_work()?; - debug!("Bitcoin headers history is consistent up to {}. Orig chainwork: {}, reorg chainwork: {}", new_tip, orig_total_work, reorg_total_work); + debug!("Bitcoin headers history is consistent up to {}", new_tip; "Orig chainwork" => %origin_total_work, "Reorg chainwork" => %reorg_total_work)i; if orig_total_work < reorg_total_work { let reorg_tip = reorg_spv_client.get_headers_height()?; diff --git a/stacks-common/src/util/uint.rs b/stacks-common/src/util/uint.rs index a448cc0e1..3c5c0d8e8 100644 --- a/stacks-common/src/util/uint.rs +++ b/stacks-common/src/util/uint.rs @@ -147,10 +147,9 @@ macro_rules! construct_uint { pub fn to_u8_slice_be(&self) -> [u8; $n_words * 8] { let mut ret = [0u8; $n_words * 8]; for i in 0..$n_words { - let bytes = self.0[i].to_le_bytes(); - for j in 0..bytes.len() { - ret[$n_words * 8 - 1 - (i * 8 + j)] = bytes[j]; - } + let word_end = $n_words * 8 - (i * 8); + let word_start = word_end - 8; + ret[word_start..word_end].copy_from_slice(&self.0[i].to_be_bytes()); } ret } @@ -737,16 +736,17 @@ mod tests { #[test] pub fn hex_codec() { - let init = Uint256::from_u64(0xDEADBEEFDEADBEEF); + let init = + Uint256::from_u64(0xDEADBEEFDEADBEEF) << 64 | Uint256::from_u64(0x0102030405060708); // little-endian representation - let hex_init = "efbeaddeefbeadde000000000000000000000000000000000000000000000000"; + let hex_init = "0807060504030201efbeaddeefbeadde00000000000000000000000000000000"; assert_eq!(Uint256::from_hex_le(&hex_init).unwrap(), init); assert_eq!(&init.to_hex_le(), hex_init); assert_eq!(Uint256::from_hex_le(&init.to_hex_le()).unwrap(), init); // big-endian representation - let hex_init = "000000000000000000000000000000000000000000000000deadbeefdeadbeef"; + let hex_init = "00000000000000000000000000000000deadbeefdeadbeef0102030405060708"; assert_eq!(Uint256::from_hex_be(&hex_init).unwrap(), init); assert_eq!(&init.to_hex_be(), hex_init); assert_eq!(Uint256::from_hex_be(&init.to_hex_be()).unwrap(), init); From e1319a9e81c5e390cd212789ffe3615ae0ada6a7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 28 May 2022 22:45:19 -0400 Subject: [PATCH 16/20] fix: compile-time error --- src/burnchains/bitcoin/indexer.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index f6aef4b82..a0b8a0f25 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -703,7 +703,9 @@ impl BitcoinIndexer { let reorg_total_work = reorg_spv_client.update_chain_work()?; let orig_total_work = orig_spv_client.get_chain_work()?; - debug!("Bitcoin headers history is consistent up to {}", new_tip; "Orig chainwork" => %origin_total_work, "Reorg chainwork" => %reorg_total_work)i; + debug!("Bitcoin headers history is consistent up to {}", new_tip; + "Orig chainwork" => %orig_total_work, + "Reorg chainwork" => %reorg_total_work); if orig_total_work < reorg_total_work { let reorg_tip = reorg_spv_client.get_headers_height()?; From 5b326f9d35e3fbbdde8649704268115b5f3ce85c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 28 May 2022 22:45:44 -0400 Subject: [PATCH 17/20] chore: cargo fmt --- src/burnchains/bitcoin/indexer.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/burnchains/bitcoin/indexer.rs b/src/burnchains/bitcoin/indexer.rs index a0b8a0f25..402ef1436 100644 --- a/src/burnchains/bitcoin/indexer.rs +++ b/src/burnchains/bitcoin/indexer.rs @@ -453,7 +453,8 @@ impl BitcoinIndexer { // * needs the last difficulty interval of headers (note that the current // interval is `start_block / BLOCK_DIFFICULTY_CHUNK_SIZE - 1). // * needs the last interval's chain work calculation - let interval_start_block = (start_block / BLOCK_DIFFICULTY_CHUNK_SIZE).saturating_sub(2); + let interval_start_block = + (start_block / BLOCK_DIFFICULTY_CHUNK_SIZE).saturating_sub(2); let base_block = interval_start_block * BLOCK_DIFFICULTY_CHUNK_SIZE; let interval_headers = canonical_spv_client.read_block_headers(base_block, start_block + 1)?; From 762e007cafda91c1b05539e08d51947c79263885 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 31 May 2022 12:08:22 -0400 Subject: [PATCH 18/20] chore: add changelog for SPV chain work fix --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ad7801a17..4c849273e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,14 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [2.05.0.2.1] + +### Fixed +- Fixed a security bug in the SPV client whereby the chain work was not being + considered at all when determining the canonical Bitcoin fork. The SPV client +now only accepts a new Bitcoin fork if it has a higher chain work than any other +previously-seen chain (#3152). + ## [2.05.0.2.0] ### IMPORTANT! READ THIS FIRST From db3c7a233e8d7c492ba24c7143b5ac065f58ea9b Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Sun, 12 Jun 2022 10:05:14 -0500 Subject: [PATCH 19/20] replace unwrap with error --- clarity/src/vm/analysis/errors.rs | 2 ++ clarity/src/vm/types/mod.rs | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/analysis/errors.rs b/clarity/src/vm/analysis/errors.rs index 4cd89c971..61a59583a 100644 --- a/clarity/src/vm/analysis/errors.rs +++ b/clarity/src/vm/analysis/errors.rs @@ -170,6 +170,7 @@ pub enum CheckErrors { // strings InvalidCharactersDetected, + InvalidUTF8Encoding, // secp256k1 signature InvalidSecp65k1Signature, @@ -405,6 +406,7 @@ impl DiagnosableError for CheckErrors { CheckErrors::TraitReferenceNotAllowed => format!("trait references can not be stored"), CheckErrors::ContractOfExpectsTrait => format!("trait reference expected"), CheckErrors::InvalidCharactersDetected => format!("invalid characters detected"), + CheckErrors::InvalidUTF8Encoding => format!("invalid UTF8 encoding"), CheckErrors::InvalidSecp65k1Signature => format!("invalid seckp256k1 signature"), CheckErrors::TypeAlreadyAnnotatedFailure | CheckErrors::CheckerImplementationFailure => { format!("internal error - please file an issue on github.com/blockstack/blockstack-core") diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index b8b48ff72..8ec216db9 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -786,8 +786,9 @@ impl Value { let matched = captures.name("value").unwrap(); let scalar_value = window[matched.start()..matched.end()].to_string(); let unicode_char = { - let u = u32::from_str_radix(&scalar_value, 16).unwrap(); - let c = char::from_u32(u).unwrap(); + let u = u32::from_str_radix(&scalar_value, 16) + .map_err(|_| CheckErrors::InvalidUTF8Encoding)?; + let c = char::from_u32(u).ok_or_else(|| CheckErrors::InvalidUTF8Encoding)?; let mut encoded_char: Vec = vec![0; c.len_utf8()]; c.encode_utf8(&mut encoded_char[..]); encoded_char From e2c69befa074f8625806c031127f2f912504a02d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 26 Jul 2022 11:18:28 -0400 Subject: [PATCH 20/20] fix: ::Skipped() events are no longer reported by the miner since there are so many of them --- testnet/stacks-node/src/tests/neon_integrations.rs | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 9acfb3917..7159b4173 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -3968,7 +3968,7 @@ fn mining_events_integration_test() { // check tx events in the first microblock // 1 success: 1 contract publish, 2 error (on chain transactions) let microblock_tx_events = &mined_microblock_events[0].tx_events; - assert_eq!(microblock_tx_events.len(), 3); + assert_eq!(microblock_tx_events.len(), 1); // contract publish match µblock_tx_events[0] { @@ -3993,15 +3993,6 @@ fn mining_events_integration_test() { } _ => panic!("unexpected event type"), } - for i in 1..3 { - // on chain only transactions will be skipped in a microblock - match µblock_tx_events[i] { - TransactionEvent::Skipped(TransactionSkippedEvent { error, .. }) => { - assert_eq!(error, "Invalid transaction anchor mode for streamed data"); - } - _ => panic!("unexpected event type"), - } - } // check mined block events let mined_block_events = test_observer::get_mined_blocks();