refactor: standardize block download pipeline across indexers (#463)

* zmq after chain tip

* some progress

* block pool advance

* runloop finished

* renames

* runs first time

* log levels

* start connecting runes

* compress block opt

* remove dead code

* remove dead code chainhook sdk

* remove ordhook dead code

* rollback install

* auto fmt

* clippy fixes

* fmt

* rollback chain tip

* api test

* api metrics fix

* rename

* standard logs

* ordinals start chain tip

* chain tips

* ci

* clippy

* fix tests

* remove dead code
This commit is contained in:
Rafael Cárdenas
2025-03-13 08:26:46 -06:00
committed by GitHub
parent 5fb8b02a9e
commit 9e9eac81ea
88 changed files with 1659 additions and 2841 deletions

View File

@@ -1,5 +1,5 @@
[alias]
bitcoin-indexer-install = "install --path components/ordhook-cli --locked --force"
bitcoin-indexer-install = "install --path components/cli --locked --force"
bitcoin-indexer-fmt = "fmt -- --config group_imports=StdExternalCrate,imports_granularity=Crate"
bitcoin-indexer-clippy = "clippy --tests --all-features --all-targets -- -A clippy::too_many_arguments -A clippy::needless_return -A clippy::type_complexity -A clippy::ptr_arg"
bitcoin-indexer-clippy-cli = "clippy --tests --all-features --all-targets --message-format=short -- -A clippy::too_many_arguments -A clippy::needless_return -A clippy::type_complexity -A clippy::ptr_arg -D warnings"

View File

@@ -126,10 +126,10 @@ jobs:
fail-fast: false
matrix:
suite:
- cli
- chainhook-sdk
- bitcoind
- chainhook-postgres
- ordhook-core
- cli
- ordinals
- runes
defaults:
run:
@@ -220,10 +220,10 @@ jobs:
fail-fast: false
matrix:
suite:
- cli
- chainhook-sdk
- bitcoind
- chainhook-postgres
- ordhook-core
- cli
- ordinals
- runes
runs-on: ubuntu-latest
defaults:

94
Cargo.lock generated
View File

@@ -1,6 +1,6 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
version = 4
[[package]]
name = "addr2line"
@@ -451,6 +451,32 @@ dependencies = [
"serde_json",
]
[[package]]
name = "bitcoind"
version = "2.2.5"
dependencies = [
"assert-json-diff",
"base58",
"bitcoin 0.32.5",
"bitcoincore-rpc",
"bitcoincore-rpc-json",
"chainhook-types",
"config",
"crossbeam-channel",
"hex",
"hiro-system-kit",
"lazy_static",
"reqwest 0.12.12",
"rocket",
"serde",
"serde-hex",
"serde_derive",
"serde_json",
"test-case",
"tokio",
"zmq",
]
[[package]]
name = "bitflags"
version = "1.3.2"
@@ -563,32 +589,6 @@ dependencies = [
"tokio-postgres",
]
[[package]]
name = "chainhook-sdk"
version = "2.2.5"
dependencies = [
"assert-json-diff",
"base58",
"bitcoin 0.32.5",
"bitcoincore-rpc",
"bitcoincore-rpc-json",
"chainhook-types",
"config",
"crossbeam-channel",
"hex",
"hiro-system-kit",
"lazy_static",
"reqwest 0.12.12",
"rocket",
"serde",
"serde-hex",
"serde_derive",
"serde_json",
"test-case",
"tokio",
"zmq",
]
[[package]]
name = "chainhook-types"
version = "1.3.8"
@@ -755,7 +755,7 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6"
name = "cli"
version = "2.2.5"
dependencies = [
"chainhook-sdk",
"bitcoind",
"chainhook-types",
"clap 3.2.25",
"clap_generate",
@@ -764,7 +764,7 @@ dependencies = [
"hex",
"hiro-system-kit",
"num_cpus",
"ordhook",
"ordinals 2.2.5",
"reqwest 0.11.27",
"runes",
"serde",
@@ -2386,7 +2386,7 @@ version = "0.22.2"
dependencies = [
"anyhow",
"bitcoin 0.32.5",
"chainhook-sdk",
"bitcoind",
"ciborium",
"serde",
"serde_derive",
@@ -2394,15 +2394,28 @@ dependencies = [
]
[[package]]
name = "ordhook"
name = "ordinals"
version = "0.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "032a5b631f33636e2f07a6a153c7807fd5ddc3bfb30aec4c68534dc8ead0b3b2"
dependencies = [
"bitcoin 0.32.5",
"derive_more",
"serde",
"serde_with",
"thiserror 2.0.11",
]
[[package]]
name = "ordinals"
version = "2.2.5"
dependencies = [
"ansi_term",
"anyhow",
"atty",
"bitcoin 0.32.5",
"bitcoind",
"chainhook-postgres",
"chainhook-sdk",
"chainhook-types",
"config",
"crossbeam-channel",
@@ -2439,19 +2452,6 @@ dependencies = [
"tokio-postgres",
]
[[package]]
name = "ordinals"
version = "0.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "032a5b631f33636e2f07a6a153c7807fd5ddc3bfb30aec4c68534dc8ead0b3b2"
dependencies = [
"bitcoin 0.32.5",
"derive_more",
"serde",
"serde_with",
"thiserror 2.0.11",
]
[[package]]
name = "os_str_bytes"
version = "6.6.1"
@@ -3197,9 +3197,9 @@ name = "runes"
version = "2.2.5"
dependencies = [
"bitcoin 0.32.5",
"bitcoind",
"bytes",
"chainhook-postgres",
"chainhook-sdk",
"chainhook-types",
"clap 4.5.31",
"clap_generate",
@@ -3212,7 +3212,7 @@ dependencies = [
"lru 0.12.5",
"maplit",
"num-traits",
"ordinals",
"ordinals 0.0.15",
"rand 0.8.5",
"refinery",
"serde",

View File

@@ -1,11 +1,11 @@
[workspace]
members = [
"components/chainhook-sdk",
"components/bitcoind",
"components/chainhook-postgres",
"components/chainhook-types-rs",
"components/cli",
"components/config",
"components/ordhook-core",
"components/ordinals",
"components/ord",
"components/runes",
]

View File

@@ -19,7 +19,7 @@ export class ApiMetrics {
help: 'The most recent Bitcoin block height ingested by the API',
async collect() {
const height = await db.getChainTipBlockHeight();
this.set(height);
this.set(height ?? 0);
},
});
this.ordinals_api_max_inscription_number = new prom.Gauge({

View File

@@ -51,9 +51,9 @@ export class PgStore extends BasePgStore {
this.counts = new CountsPgStore(this);
}
async getChainTipBlockHeight(): Promise<number> {
async getChainTipBlockHeight(): Promise<number | undefined> {
const result = await this.sql<{ block_height: string }[]>`SELECT block_height FROM chain_tip`;
return parseInt(result[0].block_height);
if (result[0].block_height) return parseInt(result[0].block_height);
}
async getMaxInscriptionNumber(): Promise<number | undefined> {

View File

@@ -38,7 +38,6 @@ describe('Status', () => {
expect(json).toStrictEqual({
server_version: 'ordinals-api v0.0.1 (test:123456)',
status: 'ready',
block_height: 0,
});
const noVersionResponse = await fastify.inject({ method: 'GET', url: '/ordinals/' });
expect(response.statusCode).toEqual(noVersionResponse.statusCode);

View File

@@ -1,7 +1,7 @@
[package]
name = "chainhook-sdk"
name = "bitcoind"
version.workspace = true
description = "Stateless Transaction Indexing Engine for Stacks and Bitcoin"
description = "Stateless Transaction Indexing Engine for Bitcoin"
license = "GPL-3.0"
edition = "2021"

View File

@@ -1,8 +1,9 @@
use std::io::{Cursor, Read, Write};
use chainhook_sdk::indexer::bitcoin::BitcoinBlockFullBreakdown;
use chainhook_types::BitcoinBlockData;
use super::BitcoinBlockFullBreakdown;
#[derive(Debug)]
pub struct BlockBytesCursor<'a> {
pub bytes: &'a [u8],
@@ -365,13 +366,13 @@ impl Iterator for TransactionBytesCursorIterator<'_> {
#[cfg(test)]
mod tests {
use chainhook_sdk::{
indexer::bitcoin::{parse_downloaded_block, standardize_bitcoin_block},
utils::Context,
};
use chainhook_types::BitcoinNetwork;
use super::*;
use crate::{
indexer::bitcoin::{parse_downloaded_block, standardize_bitcoin_block},
utils::Context,
};
#[test]
fn test_block_cursor_roundtrip() {

View File

@@ -1,3 +1,6 @@
pub mod cursor;
pub mod pipeline;
use std::time::Duration;
use bitcoincore_rpc::{
@@ -232,7 +235,7 @@ pub async fn retrieve_block_hash(
Ok(block_hash)
}
// not used internally by chainhook; exported for ordhook
// not used internally by chainhook; exported for ordinals
pub async fn try_download_block_bytes_with_retry(
http_client: HttpClient,
block_height: u64,

View File

@@ -1,55 +1,42 @@
pub mod processors;
use std::{
collections::{HashMap, VecDeque},
thread::{sleep, JoinHandle},
thread::sleep,
time::Duration,
};
use chainhook_sdk::{
indexer::bitcoin::{
build_http_client, parse_downloaded_block, standardize_bitcoin_block,
try_download_block_bytes_with_retry,
},
utils::Context,
};
use chainhook_types::{BitcoinBlockData, BitcoinNetwork};
use chainhook_types::BitcoinNetwork;
use config::Config;
use crossbeam_channel::bounded;
use reqwest::Client;
use tokio::task::JoinSet;
use crate::{db::cursor::BlockBytesCursor, try_debug, try_info};
use crate::{
indexer::{
bitcoin::{
cursor::BlockBytesCursor, parse_downloaded_block, standardize_bitcoin_block,
try_download_block_bytes_with_retry,
},
BlockProcessor, BlockProcessorCommand, BlockProcessorEvent,
},
try_debug, try_info,
utils::Context,
};
pub enum PostProcessorCommand {
ProcessBlocks(Vec<(u64, Vec<u8>)>, Vec<BitcoinBlockData>),
Terminate,
}
pub enum PostProcessorEvent {
Terminated,
Expired,
}
pub struct PostProcessorController {
pub commands_tx: crossbeam_channel::Sender<PostProcessorCommand>,
pub events_rx: crossbeam_channel::Receiver<PostProcessorEvent>,
pub thread_handle: JoinHandle<()>,
}
/// Downloads blocks from bitcoind's RPC interface and pushes them to a `PostProcessorController` so they can be indexed or
/// ingested as needed.
pub async fn bitcoind_download_blocks(
/// Downloads historical blocks from bitcoind's RPC interface and pushes them to a [BlockDownloadProcessor] so they can be indexed
/// or ingested as needed.
pub async fn start_block_download_pipeline(
config: &Config,
http_client: &Client,
blocks: Vec<u64>,
start_sequencing_blocks_at_height: u64,
blocks_post_processor: &PostProcessorController,
compress_blocks: bool,
blocks_post_processor: &BlockProcessor,
speed: usize,
ctx: &Context,
) -> Result<(), String> {
let number_of_blocks_to_process = blocks.len() as u64;
let (block_compressed_tx, block_compressed_rx) = crossbeam_channel::bounded(speed);
let http_client = build_http_client();
let moved_config = config.bitcoind.clone();
let moved_ctx = ctx.clone();
@@ -105,23 +92,29 @@ pub async fn bitcoind_download_blocks(
rx_thread_pool.push(rx);
}
// Download and parse
for (thread_index, rx) in rx_thread_pool.into_iter().enumerate() {
let block_compressed_tx_moved = block_compressed_tx.clone();
let moved_ctx: Context = moved_ctx.clone();
let moved_bitcoin_network_inner = moved_bitcoin_network;
let handle = hiro_system_kit::thread_named("Block data compression")
.spawn(move || {
while let Ok(Some(block_bytes)) = rx.recv() {
let raw_block_data =
parse_downloaded_block(block_bytes).expect("unable to parse block");
let compressed_block = BlockBytesCursor::from_full_block(&raw_block_data)
.expect("unable to compress block");
let compressed_block = if compress_blocks {
Some(
BlockBytesCursor::from_full_block(&raw_block_data)
.expect("unable to compress block"),
)
} else {
None
};
let block_height = raw_block_data.height as u64;
let block_data = if block_height >= start_sequencing_blocks_at_height {
let block = standardize_bitcoin_block(
raw_block_data,
&BitcoinNetwork::from_network(moved_bitcoin_network_inner),
&BitcoinNetwork::from_network(moved_bitcoin_network),
&moved_ctx,
)
.expect("unable to deserialize block");
@@ -157,7 +150,8 @@ pub async fn bitcoind_download_blocks(
cloned_ctx,
"#{blocks_processed} blocks successfully sent to processor"
);
let _ = blocks_post_processor_commands_tx.send(PostProcessorCommand::Terminate);
let _ =
blocks_post_processor_commands_tx.send(BlockProcessorCommand::Terminate);
break;
}
@@ -191,9 +185,9 @@ pub async fn bitcoind_download_blocks(
let mut ooo_compacted_blocks = vec![];
for (block_height, block_opt, compacted_block) in new_blocks.into_iter() {
if let Some(block) = block_opt {
inbox.insert(block_height, (block, compacted_block.to_vec()));
} else {
ooo_compacted_blocks.push((block_height, compacted_block.to_vec()));
inbox.insert(block_height, (block, compacted_block));
} else if let Some(compacted_block) = compacted_block {
ooo_compacted_blocks.push((block_height, compacted_block));
}
}
@@ -201,7 +195,10 @@ pub async fn bitcoind_download_blocks(
if !ooo_compacted_blocks.is_empty() {
blocks_processed += ooo_compacted_blocks.len() as u64;
let _ = blocks_post_processor_commands_tx.send(
PostProcessorCommand::ProcessBlocks(ooo_compacted_blocks, vec![]),
BlockProcessorCommand::ProcessBlocks {
compacted_blocks: ooo_compacted_blocks,
blocks: vec![],
},
);
}
@@ -213,7 +210,9 @@ pub async fn bitcoind_download_blocks(
let mut compacted_blocks = vec![];
let mut blocks = vec![];
while let Some((block, compacted_block)) = inbox.remove(&inbox_cursor) {
compacted_blocks.push((inbox_cursor, compacted_block));
if let Some(compacted_block) = compacted_block {
compacted_blocks.push((inbox_cursor, compacted_block));
}
blocks.push(block);
inbox_cursor += 1;
}
@@ -222,7 +221,10 @@ pub async fn bitcoind_download_blocks(
if !blocks.is_empty() {
let _ = blocks_post_processor_commands_tx.send(
PostProcessorCommand::ProcessBlocks(compacted_blocks, blocks),
BlockProcessorCommand::ProcessBlocks {
compacted_blocks,
blocks,
},
);
}
@@ -284,7 +286,7 @@ pub async fn bitcoind_download_blocks(
loop {
if let Ok(signal) = blocks_post_processor.events_rx.recv() {
match signal {
PostProcessorEvent::Terminated | PostProcessorEvent::Expired => break,
BlockProcessorEvent::Terminated | BlockProcessorEvent::Expired => break,
}
}
}
@@ -292,7 +294,7 @@ pub async fn bitcoind_download_blocks(
let _ = block_compressed_tx.send(None);
let _ = storage_thread.join();
set.shutdown().await;
let _ = set.shutdown().await;
try_info!(
ctx,

View File

@@ -1,61 +1,11 @@
pub mod bitcoin;
pub mod fork_scratch_pad;
use std::collections::VecDeque;
use chainhook_types::{BlockHeader, BlockIdentifier, BlockchainEvent};
use config::BitcoindConfig;
use hiro_system_kit::slog;
use chainhook_types::BlockIdentifier;
use self::fork_scratch_pad::ForkScratchPad;
use crate::utils::{AbstractBlock, Context};
#[derive(Deserialize, Debug, Clone, Default)]
pub struct AssetClassCache {
pub symbol: String,
pub decimals: u8,
}
pub struct BitcoinChainContext {}
impl Default for BitcoinChainContext {
fn default() -> Self {
Self::new()
}
}
impl BitcoinChainContext {
pub fn new() -> BitcoinChainContext {
BitcoinChainContext {}
}
}
pub struct Indexer {
pub config: BitcoindConfig,
bitcoin_blocks_pool: ForkScratchPad,
pub bitcoin_context: BitcoinChainContext,
}
impl Indexer {
pub fn new(config: BitcoindConfig) -> Indexer {
let bitcoin_blocks_pool = ForkScratchPad::new();
let bitcoin_context = BitcoinChainContext::new();
Indexer {
config,
bitcoin_blocks_pool,
bitcoin_context,
}
}
pub fn handle_bitcoin_header(
&mut self,
header: BlockHeader,
ctx: &Context,
) -> Result<Option<BlockchainEvent>, String> {
self.bitcoin_blocks_pool.process_header(header, ctx)
}
}
use crate::{
try_debug, try_info, try_warn,
utils::{AbstractBlock, Context},
};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ChainSegment {
@@ -75,8 +25,8 @@ pub enum ChainSegmentIncompatibility {
#[derive(Debug)]
pub struct ChainSegmentDivergence {
block_ids_to_apply: Vec<BlockIdentifier>,
block_ids_to_rollback: Vec<BlockIdentifier>,
pub block_ids_to_apply: Vec<BlockIdentifier>,
pub block_ids_to_rollback: Vec<BlockIdentifier>,
}
impl Default for ChainSegment {
@@ -91,7 +41,7 @@ impl ChainSegment {
ChainSegment { block_ids }
}
fn is_empty(&self) -> bool {
pub fn is_empty(&self) -> bool {
self.block_ids.is_empty()
}
@@ -123,9 +73,7 @@ impl ChainSegment {
Some(tip) => tip,
None => return Ok(()),
};
ctx.try_log(|logger| {
slog::info!(logger, "Comparing {} with {}", tip, block.get_identifier())
});
try_debug!(ctx, "Comparing {} with {}", tip, block.get_identifier());
if tip.index == block.get_parent_identifier().index {
match tip.hash == block.get_parent_identifier().hash {
true => return Ok(()),
@@ -196,7 +144,7 @@ impl ChainSegment {
}
}
fn try_identify_divergence(
pub fn try_identify_divergence(
&self,
other_segment: &ChainSegment,
allow_reset: bool,
@@ -219,10 +167,8 @@ impl ChainSegment {
}
block_ids_to_rollback.push(cursor_segment_1.clone());
}
ctx.try_log(|logger| {
slog::debug!(logger, "Blocks to rollback: {:?}", block_ids_to_rollback)
});
ctx.try_log(|logger| slog::debug!(logger, "Blocks to apply: {:?}", block_ids_to_apply));
try_debug!(ctx, "Blocks to rollback: {:?}", block_ids_to_rollback);
try_debug!(ctx, "Blocks to apply: {:?}", block_ids_to_apply);
block_ids_to_apply.reverse();
match common_root.take() {
Some(_common_root) => Ok(ChainSegmentDivergence {
@@ -237,30 +183,26 @@ impl ChainSegment {
}
}
fn try_append_block(
pub fn try_append_block(
&mut self,
block: &dyn AbstractBlock,
ctx: &Context,
) -> (bool, Option<ChainSegment>) {
let mut block_appended = false;
let mut fork = None;
ctx.try_log(|logger| {
slog::info!(
logger,
"Trying to append {} to {}",
block.get_identifier(),
self
)
});
try_debug!(
ctx,
"Trying to append {} to {}",
block.get_identifier(),
self
);
match self.can_append_block(block, ctx) {
Ok(()) => {
self.append_block_identifier(block.get_identifier());
block_appended = true;
}
Err(incompatibility) => {
ctx.try_log(|logger| {
slog::warn!(logger, "Will have to fork: {:?}", incompatibility)
});
try_warn!(ctx, "Will have to fork: {:?}", incompatibility);
match incompatibility {
ChainSegmentIncompatibility::BlockCollision => {
let mut new_fork = self.clone();
@@ -269,7 +211,7 @@ impl ChainSegment {
block.get_parent_identifier(),
);
if parent_found {
ctx.try_log(|logger| slog::info!(logger, "Success"));
try_info!(ctx, "Success");
new_fork.append_block_identifier(block.get_identifier());
fork = Some(new_fork);
block_appended = true;
@@ -305,6 +247,3 @@ impl std::fmt::Display for ChainSegment {
)
}
}
#[cfg(test)]
pub mod tests;

View File

@@ -6,11 +6,8 @@ use chainhook_types::{
};
use hiro_system_kit::slog;
use crate::{
indexer::{ChainSegment, ChainSegmentIncompatibility},
try_error, try_info, try_warn,
utils::Context,
};
use super::chain_segment::{ChainSegment, ChainSegmentIncompatibility};
use crate::{try_debug, try_error, try_info, try_warn, utils::Context};
pub struct ForkScratchPad {
canonical_fork_id: usize,
@@ -38,6 +35,14 @@ impl ForkScratchPad {
}
}
pub fn canonical_chain_tip(&self) -> Option<&BlockIdentifier> {
self.forks
.get(&self.canonical_fork_id)
.unwrap()
.block_ids
.front()
}
pub fn can_process_header(&self, header: &BlockHeader) -> bool {
if self.headers_store.is_empty() {
return true;
@@ -52,7 +57,7 @@ impl ForkScratchPad {
header: BlockHeader,
ctx: &Context,
) -> Result<Option<BlockchainEvent>, String> {
try_info!(
try_debug!(
ctx,
"ForkScratchPad: Start processing {}",
header.block_identifier
@@ -70,7 +75,7 @@ impl ForkScratchPad {
}
for (i, fork) in self.forks.iter() {
try_info!(ctx, "ForkScratchPad: Active fork {}: {}", i, fork);
try_debug!(ctx, "ForkScratchPad: Active fork {}: {}", i, fork);
}
// Retrieve previous canonical fork
let previous_canonical_fork_id = self.canonical_fork_id;
@@ -164,13 +169,13 @@ impl ForkScratchPad {
let mut canonical_fork_id = 0;
let mut highest_height = 0;
for (fork_id, fork) in self.forks.iter() {
try_info!(ctx, "ForkScratchPad: Active fork: {} - {}", fork_id, fork);
try_debug!(ctx, "ForkScratchPad: Active fork: {} - {}", fork_id, fork);
if fork.get_length() >= highest_height {
highest_height = fork.get_length();
canonical_fork_id = *fork_id;
}
}
try_info!(
try_debug!(
ctx,
"ForkScratchPad: Active fork selected as canonical: {}",
canonical_fork_id

View File

@@ -0,0 +1,499 @@
pub mod bitcoin;
pub mod chain_segment;
pub mod fork_scratch_pad;
use std::{
collections::{HashMap, VecDeque},
sync::{Arc, Mutex},
thread::{sleep, JoinHandle},
time::Duration,
};
use bitcoin::{
build_http_client, download_and_parse_block_with_retry,
pipeline::start_block_download_pipeline, standardize_bitcoin_block,
};
use chainhook_types::{BitcoinBlockData, BitcoinNetwork, BlockIdentifier, BlockchainEvent};
use config::Config;
use crossbeam_channel::{Receiver, Sender, TryRecvError};
use reqwest::Client;
use self::fork_scratch_pad::ForkScratchPad;
use crate::{
observer::zmq::start_zeromq_pipeline,
try_debug, try_info,
utils::{
bitcoind::{bitcoind_get_chain_tip, bitcoind_wait_for_chain_tip},
future_block_on, AbstractBlock, BlockHeights, Context,
},
};
pub enum BlockProcessorCommand {
ProcessBlocks {
compacted_blocks: Vec<(u64, Vec<u8>)>,
blocks: Vec<BitcoinBlockData>,
},
Terminate,
}
pub enum BlockProcessorEvent {
Terminated,
Expired,
}
/// Object that will receive any blocks as they come from bitcoind. These messages do not track any canonical chain alterations.
pub struct BlockProcessor {
pub commands_tx: crossbeam_channel::Sender<BlockProcessorCommand>,
pub events_rx: crossbeam_channel::Receiver<BlockProcessorEvent>,
pub thread_handle: JoinHandle<()>,
}
pub enum IndexerCommand {
StoreCompactedBlocks(Vec<(u64, Vec<u8>)>),
IndexBlocks {
apply_blocks: Vec<BitcoinBlockData>,
rollback_block_ids: Vec<BlockIdentifier>,
},
}
/// Object that will receive standardized blocks ready to be indexer or rolled back. Blocks can come from historical downloads or
/// recent block streams.
pub struct Indexer {
/// Sender for emitting indexer commands.
pub commands_tx: crossbeam_channel::Sender<IndexerCommand>,
/// Current index chain tip at launch time.
pub chain_tip: Option<BlockIdentifier>,
pub thread_handle: JoinHandle<()>,
}
/// Moves our block pool with a newly received standardized block
async fn advance_block_pool(
block: BitcoinBlockData,
block_pool: &Arc<Mutex<ForkScratchPad>>,
block_store: &Arc<Mutex<HashMap<BlockIdentifier, BitcoinBlockData>>>,
http_client: &Client,
indexer_commands_tx: &Sender<IndexerCommand>,
config: &Config,
ctx: &Context,
) -> Result<(), String> {
let network = BitcoinNetwork::from_network(config.bitcoind.network);
let mut block_ids = VecDeque::new();
block_ids.push_front(block.block_identifier.clone());
let block_pool_ref = block_pool.clone();
let block_store_ref = block_store.clone();
// Keep incoming block before sending.
{
let mut block_store_guard = block_store_ref.lock().unwrap();
block_store_guard.insert(block.block_identifier.clone(), block);
}
while let Some(block_id) = block_ids.pop_front() {
let (header, canonical) = {
let mut pool_guard = block_pool_ref.lock().unwrap();
let mut block_store_guard = block_store_ref.lock().unwrap();
let block = block_store_guard.get(&block_id).unwrap();
let header = block.get_header();
if pool_guard.can_process_header(&header) {
match pool_guard.process_header(header.clone(), ctx)? {
Some(event) => match event {
BlockchainEvent::BlockchainUpdatedWithHeaders(event) => {
let mut apply_blocks = vec![];
for header in event.new_headers.iter() {
apply_blocks.push(
block_store_guard.remove(&header.block_identifier).unwrap(),
);
}
indexer_commands_tx
.send(IndexerCommand::IndexBlocks {
apply_blocks,
rollback_block_ids: vec![],
})
.map_err(|e| e.to_string())?;
(header, true)
}
BlockchainEvent::BlockchainUpdatedWithReorg(event) => {
let mut apply_blocks = vec![];
for header in event.headers_to_apply.iter() {
apply_blocks.push(
block_store_guard.remove(&header.block_identifier).unwrap(),
);
}
let rollback_block_ids: Vec<BlockIdentifier> = event
.headers_to_rollback
.iter()
.map(|h| h.block_identifier.clone())
.collect();
indexer_commands_tx
.send(IndexerCommand::IndexBlocks {
apply_blocks,
rollback_block_ids,
})
.map_err(|e| e.to_string())?;
(header, true)
}
},
None => return Err("Unable to append block".into()),
}
} else {
try_info!(
ctx,
"Received non-canonical block {}",
header.block_identifier
);
(header, false)
}
};
if !canonical {
let parent_block = {
// Handle a behaviour specific to ZMQ usage in bitcoind.
// Considering a simple re-org:
// A (1) - B1 (2) - C1 (3)
// \ B2 (4) - C2 (5) - D2 (6)
// When D2 is being discovered (making A -> B2 -> C2 -> D2 the new canonical fork)
// it looks like ZMQ is only publishing D2.
// Without additional operation, we end up with a block that we can't append.
let parent_block_hash = header
.parent_block_identifier
.get_hash_bytes_str()
.to_string();
// try_info!(
// ctx,
// "zmq: Re-org detected, retrieving parent block {parent_block_hash}"
// );
let parent_block = download_and_parse_block_with_retry(
http_client,
&parent_block_hash,
&config.bitcoind,
ctx,
)
.await?;
standardize_bitcoin_block(parent_block, &network, ctx).map_err(|(e, _)| e)?
};
// Keep parent block and repeat the cycle
{
let mut block_store_guard = block_store_ref.lock().unwrap();
block_store_guard
.insert(parent_block.block_identifier.clone(), parent_block.clone());
}
block_ids.push_front(block_id);
block_ids.push_front(parent_block.block_identifier.clone());
}
}
Ok(())
}
/// Initialize our block pool with the current index's last seen block, so we can detect any re-orgs or gaps that may come our
/// way with the next blocks.
async fn initialize_block_pool(
block_pool: &Arc<Mutex<ForkScratchPad>>,
index_chain_tip: &BlockIdentifier,
http_client: &Client,
config: &Config,
ctx: &Context,
) -> Result<(), String> {
let last_block = download_and_parse_block_with_retry(
http_client,
index_chain_tip.get_hash_bytes_str(),
&config.bitcoind,
ctx,
)
.await?;
let block_pool_ref = block_pool.clone();
let mut pool = block_pool_ref.lock().unwrap();
match pool.process_header(last_block.get_block_header(), ctx) {
Ok(_) => {
try_debug!(
ctx,
"Primed fork processor with last seen block hash {index_chain_tip}"
);
}
Err(e) => return Err(format!("Unable to load last seen block: {e}")),
}
Ok(())
}
/// Runloop designed to receive Bitcoin blocks through a [BlockProcessor] and send them to a [ForkScratchPad] so it can advance
/// the canonical chain.
async fn block_ingestion_runloop(
indexer_commands_tx: &Sender<IndexerCommand>,
index_chain_tip: &Option<BlockIdentifier>,
block_commands_rx: &Receiver<BlockProcessorCommand>,
block_events_tx: &Sender<BlockProcessorEvent>,
block_pool: &Arc<Mutex<ForkScratchPad>>,
block_store: &Arc<Mutex<HashMap<BlockIdentifier, BitcoinBlockData>>>,
http_client: &Client,
sequence_start_block_height: u64,
config: &Config,
ctx: &Context,
) -> Result<(), String> {
// Before starting the loop, check if the index already has progress. If so, prime the block pool with the current tip.
if let Some(index_chain_tip) = index_chain_tip {
if index_chain_tip.index >= sequence_start_block_height {
initialize_block_pool(block_pool, index_chain_tip, http_client, config, ctx).await?;
}
}
let mut empty_cycles = 0;
loop {
let (compacted_blocks, blocks) = match block_commands_rx.try_recv() {
Ok(BlockProcessorCommand::ProcessBlocks {
compacted_blocks,
blocks,
}) => {
empty_cycles = 0;
(compacted_blocks, blocks)
}
Ok(BlockProcessorCommand::Terminate) => {
let _ = block_events_tx.send(BlockProcessorEvent::Terminated);
return Ok(());
}
Err(e) => match e {
TryRecvError::Empty => {
empty_cycles += 1;
if empty_cycles == 180 {
try_info!(ctx, "Block processor reached expiration");
let _ = block_events_tx.send(BlockProcessorEvent::Expired);
return Ok(());
}
sleep(Duration::from_secs(1));
continue;
}
_ => {
return Ok(());
}
},
};
if !compacted_blocks.is_empty() {
indexer_commands_tx
.send(IndexerCommand::StoreCompactedBlocks(compacted_blocks))
.map_err(|e| e.to_string())?;
}
for block in blocks.into_iter() {
advance_block_pool(
block,
block_pool,
block_store,
http_client,
indexer_commands_tx,
config,
ctx,
)
.await?;
}
}
}
/// Starts a bitcoind RPC block download pipeline that will send us all historical bitcoin blocks in a parallel fashion. We will
/// then stream these blocks into our block pool so they can be fed into the configured [Indexer]. This will eventually bring the
/// index chain tip to `target_block_height`.
async fn download_rpc_blocks(
indexer: &Indexer,
block_pool: &Arc<Mutex<ForkScratchPad>>,
block_store: &Arc<Mutex<HashMap<BlockIdentifier, BitcoinBlockData>>>,
http_client: &Client,
target_block_height: u64,
sequence_start_block_height: u64,
compress_blocks: bool,
config: &Config,
ctx: &Context,
) -> Result<(), String> {
let (commands_tx, commands_rx) = crossbeam_channel::bounded::<BlockProcessorCommand>(2);
let (events_tx, events_rx) = crossbeam_channel::unbounded::<BlockProcessorEvent>();
let ctx_moved = ctx.clone();
let config_moved = config.clone();
let block_pool_moved = block_pool.clone();
let block_store_moved = block_store.clone();
let http_client_moved = http_client.clone();
let indexer_commands_tx_moved = indexer.commands_tx.clone();
let index_chain_tip_moved = indexer.chain_tip.clone();
let handle: JoinHandle<()> = hiro_system_kit::thread_named("block_download_processor")
.spawn(move || {
future_block_on(&ctx_moved.clone(), async move {
block_ingestion_runloop(
&indexer_commands_tx_moved,
&index_chain_tip_moved,
&commands_rx,
&events_tx,
&block_pool_moved,
&block_store_moved,
&http_client_moved,
sequence_start_block_height,
&config_moved,
&ctx_moved,
)
.await
});
})
.expect("unable to spawn thread");
let processor = BlockProcessor {
commands_tx,
events_rx,
thread_handle: handle,
};
let blocks = {
let block_pool_ref = block_pool.clone();
let pool = block_pool_ref.lock().unwrap();
let chain_tip = pool.canonical_chain_tip().or(indexer.chain_tip.as_ref());
let start_block = chain_tip.map_or(0, |ct| ct.index + 1);
BlockHeights::BlockRange(start_block, target_block_height)
.get_sorted_entries()
.map_err(|_e| "Block start / end block spec invalid".to_string())?
};
try_debug!(
ctx,
"Downloading blocks from #{} to #{}",
blocks.front().unwrap(),
blocks.back().unwrap()
);
start_block_download_pipeline(
config,
http_client,
blocks.into(),
sequence_start_block_height,
compress_blocks,
&processor,
1000,
ctx,
)
.await
}
/// Streams all upcoming blocks from bitcoind through its ZeroMQ interface and pipes them onto the [Indexer] once processed
/// through our block pool. This process will run indefinitely and will make sure our index keeps advancing as new Bitcoin blocks
/// get mined.
async fn stream_zmq_blocks(
indexer: &Indexer,
block_pool: &Arc<Mutex<ForkScratchPad>>,
block_store: &Arc<Mutex<HashMap<BlockIdentifier, BitcoinBlockData>>>,
http_client: &Client,
sequence_start_block_height: u64,
compress_blocks: bool,
config: &Config,
ctx: &Context,
) -> Result<(), String> {
let (commands_tx, commands_rx) = crossbeam_channel::bounded::<BlockProcessorCommand>(2);
let (events_tx, events_rx) = crossbeam_channel::unbounded::<BlockProcessorEvent>();
let ctx_moved = ctx.clone();
let config_moved = config.clone();
let block_pool_moved = block_pool.clone();
let block_store_moved = block_store.clone();
let http_client_moved = http_client.clone();
let indexer_commands_tx_moved = indexer.commands_tx.clone();
let index_chain_tip_moved = indexer.chain_tip.clone();
let handle: JoinHandle<()> = hiro_system_kit::thread_named("block_stream_processor")
.spawn(move || {
future_block_on(&ctx_moved.clone(), async move {
block_ingestion_runloop(
&indexer_commands_tx_moved,
&index_chain_tip_moved,
&commands_rx,
&events_tx,
&block_pool_moved,
&block_store_moved,
&http_client_moved,
sequence_start_block_height,
&config_moved,
&ctx_moved,
)
.await
});
})
.expect("unable to spawn thread");
let processor = BlockProcessor {
commands_tx,
events_rx,
thread_handle: handle,
};
start_zeromq_pipeline(
&processor,
sequence_start_block_height,
compress_blocks,
config,
ctx,
)
.await
}
/// Starts a Bitcoin block indexer pipeline.
pub async fn start_bitcoin_indexer(
indexer: &Indexer,
sequence_start_block_height: u64,
stream_blocks_at_chain_tip: bool,
compress_blocks: bool,
config: &Config,
ctx: &Context,
) -> Result<(), String> {
let mut bitcoind_chain_tip = bitcoind_wait_for_chain_tip(&config.bitcoind, ctx);
let http_client = build_http_client();
// Block pool that will track the canonical chain and detect any reorgs that may happen.
let block_pool_arc = Arc::new(Mutex::new(ForkScratchPad::new()));
let block_pool = block_pool_arc.clone();
// Block cache that will keep block data in memory while it is prepared to be sent to indexers.
let block_store_arc = Arc::new(Mutex::new(HashMap::new()));
if let Some(index_chain_tip) = &indexer.chain_tip {
try_info!(ctx, "Index chain tip is at {}", index_chain_tip);
} else {
try_info!(ctx, "Index is empty");
}
// Sync index until chain tip is reached.
loop {
{
let pool = block_pool.lock().unwrap();
let chain_tip = pool.canonical_chain_tip().or(indexer.chain_tip.as_ref());
if let Some(chain_tip) = chain_tip {
if bitcoind_chain_tip == *chain_tip {
try_info!(
ctx,
"Index has reached bitcoind chain tip at {bitcoind_chain_tip}"
);
break;
}
}
}
download_rpc_blocks(
indexer,
&block_pool_arc,
&block_store_arc,
&http_client,
bitcoind_chain_tip.index,
sequence_start_block_height,
compress_blocks,
config,
ctx,
)
.await?;
// Bitcoind may have advanced while we were indexing, check its chain tip again.
bitcoind_chain_tip = bitcoind_get_chain_tip(&config.bitcoind, ctx);
}
// Stream new incoming blocks.
if stream_blocks_at_chain_tip {
stream_zmq_blocks(
indexer,
&block_pool_arc,
&block_store_arc,
&http_client,
sequence_start_block_height,
compress_blocks,
config,
ctx,
)
.await?;
}
Ok(())
}
#[cfg(test)]
pub mod tests;

View File

@@ -0,0 +1 @@
pub mod zmq;

View File

@@ -0,0 +1,118 @@
use chainhook_types::BitcoinNetwork;
use config::Config;
use zmq::Socket;
use crate::{
indexer::{
bitcoin::{
build_http_client, cursor::BlockBytesCursor, download_and_parse_block_with_retry,
standardize_bitcoin_block,
},
BlockProcessor, BlockProcessorCommand,
},
try_info, try_warn,
utils::Context,
};
fn new_zmq_socket() -> Socket {
let context = zmq::Context::new();
let socket = context.socket(zmq::SUB).unwrap();
assert!(socket.set_subscribe(b"hashblock").is_ok());
assert!(socket.set_rcvhwm(0).is_ok());
// We override the OS default behavior:
assert!(socket.set_tcp_keepalive(1).is_ok());
// The keepalive routine will wait for 5 minutes
assert!(socket.set_tcp_keepalive_idle(300).is_ok());
// And then resend it every 60 seconds
assert!(socket.set_tcp_keepalive_intvl(60).is_ok());
// 120 times
assert!(socket.set_tcp_keepalive_cnt(120).is_ok());
socket
}
pub async fn start_zeromq_pipeline(
blocks_post_processor: &BlockProcessor,
start_sequencing_blocks_at_height: u64,
compress_blocks: bool,
config: &Config,
ctx: &Context,
) -> Result<(), String> {
let http_client = build_http_client();
let bitcoind_zmq_url = config.bitcoind.zmq_url.clone();
let network = BitcoinNetwork::from_network(config.bitcoind.network);
try_info!(
ctx,
"zmq: Waiting for ZMQ connection acknowledgment from bitcoind"
);
let mut socket = new_zmq_socket();
assert!(socket.connect(&bitcoind_zmq_url).is_ok());
try_info!(
ctx,
"zmq: Connected, waiting for ZMQ messages from bitcoind"
);
loop {
let msg = match socket.recv_multipart(0) {
Ok(msg) => msg,
Err(e) => {
try_warn!(ctx, "zmq: Unable to receive ZMQ message: {e}");
socket = new_zmq_socket();
assert!(socket.connect(&bitcoind_zmq_url).is_ok());
continue;
}
};
let (topic, data, _sequence) = (&msg[0], &msg[1], &msg[2]);
if !topic.eq(b"hashblock") {
try_warn!(
ctx,
"zmq: {} Topic not supported",
String::from_utf8(topic.clone()).unwrap()
);
continue;
}
let block_hash = hex::encode(data);
try_info!(ctx, "zmq: Bitcoin block hash announced {block_hash}");
let raw_block_data = match download_and_parse_block_with_retry(
&http_client,
&block_hash,
&config.bitcoind,
ctx,
)
.await
{
Ok(block) => block,
Err(e) => {
try_warn!(ctx, "zmq: Unable to download block: {e}");
continue;
}
};
let block_height = raw_block_data.height as u64;
let compacted_blocks = if compress_blocks {
vec![(
block_height,
BlockBytesCursor::from_full_block(&raw_block_data)
.expect("unable to compress block"),
)]
} else {
vec![]
};
let blocks = if block_height >= start_sequencing_blocks_at_height {
let block = standardize_bitcoin_block(raw_block_data, &network, ctx)
.expect("unable to deserialize block");
vec![block]
} else {
vec![]
};
blocks_post_processor
.commands_tx
.send(BlockProcessorCommand::ProcessBlocks {
compacted_blocks,
blocks,
})
.map_err(|e| e.to_string())?;
}
}

View File

@@ -1,8 +1,8 @@
use std::{thread::sleep, time::Duration};
use bitcoincore_rpc::{Auth, Client, RpcApi};
use chainhook_types::BlockIdentifier;
use config::BitcoindConfig;
use hiro_system_kit::slog;
use crate::{try_error, try_info, utils::Context};
@@ -21,13 +21,16 @@ fn bitcoind_get_client(config: &BitcoindConfig, ctx: &Context) -> Client {
}
}
/// Retrieves the block height from bitcoind.
pub fn bitcoind_get_block_height(config: &BitcoindConfig, ctx: &Context) -> u64 {
/// Retrieves the chain tip from bitcoind.
pub fn bitcoind_get_chain_tip(config: &BitcoindConfig, ctx: &Context) -> BlockIdentifier {
let bitcoin_rpc = bitcoind_get_client(config, ctx);
loop {
match bitcoin_rpc.get_blockchain_info() {
Ok(result) => {
return result.blocks;
return BlockIdentifier {
index: result.blocks,
hash: format!("0x{}", result.best_block_hash),
};
}
Err(e) => {
try_error!(
@@ -42,7 +45,7 @@ pub fn bitcoind_get_block_height(config: &BitcoindConfig, ctx: &Context) -> u64
}
/// Checks if bitcoind is still synchronizing blocks and waits until it's finished if that is the case.
pub fn bitcoind_wait_for_chain_tip(config: &BitcoindConfig, ctx: &Context) {
pub fn bitcoind_wait_for_chain_tip(config: &BitcoindConfig, ctx: &Context) -> BlockIdentifier {
let bitcoin_rpc = bitcoind_get_client(config, ctx);
let mut confirmations = 0;
loop {
@@ -54,7 +57,10 @@ pub fn bitcoind_wait_for_chain_tip(config: &BitcoindConfig, ctx: &Context) {
// peers.
if confirmations == 10 {
try_info!(ctx, "bitcoind: Chain tip reached");
return;
return BlockIdentifier {
index: result.blocks,
hash: format!("0x{}", result.best_block_hash),
};
}
try_info!(ctx, "bitcoind: Verifying chain tip");
} else {

View File

@@ -8,9 +8,11 @@ use std::{
};
use chainhook_types::{BitcoinBlockData, BlockHeader, BlockIdentifier};
use hiro_system_kit::slog::{self, Logger};
use hiro_system_kit::{slog, Logger};
use reqwest::RequestBuilder;
use crate::try_crit;
#[derive(Clone)]
pub struct Context {
pub logger: Option<Logger>,
@@ -223,6 +225,29 @@ impl BlockHeights {
}
}
pub fn future_block_on<F>(ctx: &Context, future: F)
where
F: std::future::Future<Output = Result<(), String>> + Send + 'static,
{
let (handle, _rt) = match tokio::runtime::Handle::try_current() {
Ok(h) => (h, None),
Err(_) => {
let rt = tokio::runtime::Runtime::new().unwrap();
(rt.handle().clone(), Some(rt))
}
};
let thread = std::thread::current();
let thread_name = thread.name().unwrap_or("unknown");
let result = handle.block_on(future);
match result {
Ok(value) => value,
Err(e) => {
try_crit!(ctx, "[{thread_name}]: {e}");
std::process::exit(1);
}
}
}
#[test]
fn test_block_heights_range_construct() {
let range = BlockHeights::BlockRange(0, 10);
@@ -328,39 +353,49 @@ pub fn write_file_content_at_path(file_path: &Path, content: &[u8]) -> Result<()
#[macro_export]
macro_rules! try_info {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| slog::info!(l, $tag, $($args)*));
$a.try_log(|l| hiro_system_kit::slog::info!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| slog::info!(l, $tag));
$a.try_log(|l| hiro_system_kit::slog::info!(l, $tag));
};
}
#[macro_export]
macro_rules! try_debug {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| slog::debug!(l, $tag, $($args)*));
$a.try_log(|l| hiro_system_kit::slog::debug!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| slog::debug!(l, $tag));
$a.try_log(|l| hiro_system_kit::slog::debug!(l, $tag));
};
}
#[macro_export]
macro_rules! try_warn {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| slog::warn!(l, $tag, $($args)*));
$a.try_log(|l| hiro_system_kit::slog::warn!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| slog::warn!(l, $tag));
$a.try_log(|l| hiro_system_kit::slog::warn!(l, $tag));
};
}
#[macro_export]
macro_rules! try_error {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| slog::error!(l, $tag, $($args)*));
$a.try_log(|l| hiro_system_kit::slog::error!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| slog::error!(l, $tag));
$a.try_log(|l| hiro_system_kit::slog::error!(l, $tag));
};
}
#[macro_export]
macro_rules! try_crit {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| hiro_system_kit::slog::crit!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| hiro_system_kit::slog::crit!(l, $tag));
};
}

View File

@@ -1,602 +0,0 @@
mod zmq;
use std::{
collections::HashMap,
error::Error,
str,
sync::mpsc::{Receiver, Sender},
};
use chainhook_types::{
BitcoinBlockData, BitcoinChainEvent, BitcoinChainUpdatedWithBlocksData,
BitcoinChainUpdatedWithReorgData, BitcoinNetwork, BlockIdentifier, BlockchainEvent,
};
use config::BitcoindConfig;
use hiro_system_kit::{self, slog};
use rocket::{serde::Deserialize, Shutdown};
use crate::{
indexer::bitcoin::{
build_http_client, download_and_parse_block_with_retry, standardize_bitcoin_block,
BitcoinBlockFullBreakdown,
},
utils::Context,
};
#[derive(Deserialize)]
pub struct NewTransaction {
pub txid: String,
pub status: String,
pub raw_result: String,
pub raw_tx: String,
}
#[derive(Clone, Debug)]
pub enum Event {
BitcoinChainEvent(BitcoinChainEvent),
}
#[derive(Clone, Debug, PartialEq)]
pub enum ObserverCommand {
StandardizeBitcoinBlock(BitcoinBlockFullBreakdown),
CacheBitcoinBlock(BitcoinBlockData),
PropagateBitcoinChainEvent(BlockchainEvent),
Terminate,
}
#[derive(Clone, Debug, PartialEq)]
pub struct HookExpirationData {
pub hook_uuid: String,
pub block_height: u64,
}
#[derive(Clone, Debug, PartialEq)]
pub struct MempoolAdmissionData {
pub tx_data: String,
pub tx_description: String,
}
#[derive(Clone, Debug)]
pub enum ObserverEvent {
Error(String),
Fatal(String),
Info(String),
Terminate,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
/// JSONRPC Request
pub struct BitcoinRPCRequest {
/// The name of the RPC call
pub method: String,
/// Parameters to the RPC call
pub params: serde_json::Value,
/// Identifier for this Request, which should appear in the response
pub id: serde_json::Value,
/// jsonrpc field, MUST be "2.0"
pub jsonrpc: serde_json::Value,
}
#[derive(Debug, Clone)]
pub struct BitcoinBlockDataCached {
pub block: BitcoinBlockData,
pub processed_by_sidecar: bool,
}
type BlockMutationSender =
crossbeam_channel::Sender<(Vec<BitcoinBlockDataCached>, Vec<BlockIdentifier>)>;
type BlockMutationReceiver = crossbeam_channel::Receiver<Vec<BitcoinBlockDataCached>>;
type BlockEventHandlerSender = crossbeam_channel::Sender<HandleBlock>;
pub struct ObserverSidecar {
pub bitcoin_blocks_mutator: Option<(BlockMutationSender, BlockMutationReceiver)>,
pub bitcoin_chain_event_notifier: Option<BlockEventHandlerSender>,
}
impl ObserverSidecar {
fn perform_bitcoin_sidecar_mutations(
&self,
blocks: Vec<BitcoinBlockDataCached>,
blocks_ids_to_rollback: Vec<BlockIdentifier>,
ctx: &Context,
) -> Vec<BitcoinBlockDataCached> {
if let Some(ref block_mutator) = self.bitcoin_blocks_mutator {
ctx.try_log(|logger| slog::info!(logger, "Sending blocks to pre-processor",));
let _ = block_mutator
.0
.send((blocks.clone(), blocks_ids_to_rollback));
ctx.try_log(|logger| slog::info!(logger, "Waiting for blocks from pre-processor",));
match block_mutator.1.recv() {
Ok(updated_blocks) => {
ctx.try_log(|logger| slog::info!(logger, "Block received from pre-processor",));
updated_blocks
}
Err(e) => {
ctx.try_log(|logger| {
slog::error!(
logger,
"Unable to receive block from pre-processor {}",
e.to_string()
)
});
blocks
}
}
} else {
blocks
}
}
fn notify_chain_event(&self, chain_event: &BitcoinChainEvent, _ctx: &Context) {
if let Some(ref notifier) = self.bitcoin_chain_event_notifier {
match chain_event {
BitcoinChainEvent::ChainUpdatedWithBlocks(data) => {
for block in data.new_blocks.iter() {
let _ = notifier.send(HandleBlock::ApplyBlock(block.clone()));
}
}
BitcoinChainEvent::ChainUpdatedWithReorg(data) => {
for block in data.blocks_to_rollback.iter() {
let _ = notifier.send(HandleBlock::UndoBlock(block.clone()));
}
for block in data.blocks_to_apply.iter() {
let _ = notifier.send(HandleBlock::ApplyBlock(block.clone()));
}
}
}
}
}
}
/// A helper struct used to configure and call [start_event_observer], which spawns a thread to observer chain events.
///
/// ### Examples
/// ```
/// use chainhook_sdk::observer::EventObserverBuilder;
/// use chainhook_sdk::observer::ObserverCommand;
/// use chainhook_sdk::utils::Context;
/// use config::BitcoindConfig;
/// use std::error::Error;
/// use std::sync::mpsc::{Receiver, Sender};
///
/// fn start_event_observer(
/// config: BitcoindConfig,
/// observer_commands_tx: &Sender<ObserverCommand>,
/// observer_commands_rx: Receiver<ObserverCommand>,
/// ctx: &Context,
/// )-> Result<(), Box<dyn Error>> {
/// EventObserverBuilder::new(
/// config,
/// &observer_commands_tx,
/// observer_commands_rx,
/// &ctx
/// )
/// .start()
/// }
/// ```
pub struct EventObserverBuilder {
config: BitcoindConfig,
observer_commands_tx: Sender<ObserverCommand>,
observer_commands_rx: Receiver<ObserverCommand>,
ctx: Context,
observer_events_tx: Option<crossbeam_channel::Sender<ObserverEvent>>,
observer_sidecar: Option<ObserverSidecar>,
}
impl EventObserverBuilder {
pub fn new(
config: BitcoindConfig,
observer_commands_tx: &Sender<ObserverCommand>,
observer_commands_rx: Receiver<ObserverCommand>,
ctx: &Context,
) -> Self {
EventObserverBuilder {
config,
observer_commands_tx: observer_commands_tx.clone(),
observer_commands_rx,
ctx: ctx.clone(),
observer_events_tx: None,
observer_sidecar: None,
}
}
/// Sets the `observer_events_tx` Sender. Set this and listen on the corresponding
/// Receiver to be notified of every [ObserverEvent].
pub fn events_tx(
&mut self,
observer_events_tx: crossbeam_channel::Sender<ObserverEvent>,
) -> &mut Self {
self.observer_events_tx = Some(observer_events_tx);
self
}
/// Sets a sidecar for the observer. See [ObserverSidecar].
pub fn sidecar(&mut self, sidecar: ObserverSidecar) -> &mut Self {
self.observer_sidecar = Some(sidecar);
self
}
/// Starts the event observer, calling [start_event_observer]. This function consumes the
/// [EventObserverBuilder] and spawns a new thread to run the observer.
pub fn start(self) -> Result<(), Box<dyn Error>> {
start_event_observer(
self.config,
self.observer_commands_tx,
self.observer_commands_rx,
self.observer_events_tx,
self.observer_sidecar,
self.ctx,
)
}
}
/// Spawns a thread to observe blockchain events. Use [EventObserverBuilder] to configure easily.
pub fn start_event_observer(
config: BitcoindConfig,
observer_commands_tx: Sender<ObserverCommand>,
observer_commands_rx: Receiver<ObserverCommand>,
observer_events_tx: Option<crossbeam_channel::Sender<ObserverEvent>>,
observer_sidecar: Option<ObserverSidecar>,
ctx: Context,
) -> Result<(), Box<dyn Error>> {
ctx.try_log(|logger| {
slog::info!(
logger,
"Observing Bitcoin chain events via ZeroMQ: {}",
config.zmq_url
)
});
let context_cloned = ctx.clone();
let event_observer_config_moved = config.clone();
let observer_commands_tx_moved = observer_commands_tx.clone();
let _ = hiro_system_kit::thread_named("Chainhook event observer")
.spawn(move || {
let future = start_bitcoin_event_observer(
event_observer_config_moved,
observer_commands_tx_moved,
observer_commands_rx,
observer_events_tx.clone(),
observer_sidecar,
context_cloned.clone(),
);
match hiro_system_kit::nestable_block_on(future) {
Ok(_) => {}
Err(e) => {
if let Some(tx) = observer_events_tx {
context_cloned.try_log(|logger| {
slog::crit!(
logger,
"Chainhook event observer thread failed with error: {e}",
)
});
let _ = tx.send(ObserverEvent::Terminate);
}
}
}
})
.expect("unable to spawn thread");
Ok(())
}
pub async fn start_bitcoin_event_observer(
config: BitcoindConfig,
_observer_commands_tx: Sender<ObserverCommand>,
observer_commands_rx: Receiver<ObserverCommand>,
observer_events_tx: Option<crossbeam_channel::Sender<ObserverEvent>>,
observer_sidecar: Option<ObserverSidecar>,
ctx: Context,
) -> Result<(), Box<dyn Error>> {
let ctx_moved = ctx.clone();
let config_moved = config.clone();
let _ = hiro_system_kit::thread_named("ZMQ handler").spawn(move || {
let future = zmq::start_zeromq_runloop(&config_moved, _observer_commands_tx, &ctx_moved);
hiro_system_kit::nestable_block_on(future);
});
// This loop is used for handling background jobs, emitted by HTTP calls.
start_observer_commands_handler(
config,
observer_commands_rx,
observer_events_tx,
None,
observer_sidecar,
ctx,
)
.await
}
pub enum HandleBlock {
ApplyBlock(BitcoinBlockData),
UndoBlock(BitcoinBlockData),
}
pub async fn start_observer_commands_handler(
config: BitcoindConfig,
observer_commands_rx: Receiver<ObserverCommand>,
observer_events_tx: Option<crossbeam_channel::Sender<ObserverEvent>>,
ingestion_shutdown: Option<Shutdown>,
observer_sidecar: Option<ObserverSidecar>,
ctx: Context,
) -> Result<(), Box<dyn Error>> {
let mut bitcoin_block_store: HashMap<BlockIdentifier, BitcoinBlockDataCached> = HashMap::new();
let http_client = build_http_client();
let store_update_required = observer_sidecar
.as_ref()
.and_then(|s| s.bitcoin_blocks_mutator.as_ref())
.is_some();
loop {
let command = match observer_commands_rx.recv() {
Ok(cmd) => cmd,
Err(e) => {
ctx.try_log(|logger| {
slog::crit!(logger, "Error: broken channel {}", e.to_string())
});
break;
}
};
match command {
ObserverCommand::Terminate => {
break;
}
ObserverCommand::StandardizeBitcoinBlock(mut block_data) => {
let block_hash = block_data.hash.to_string();
let mut attempts = 0;
let max_attempts = 10;
let block = loop {
match standardize_bitcoin_block(
block_data.clone(),
&BitcoinNetwork::from_network(config.network),
&ctx,
) {
Ok(block) => break Some(block),
Err((e, refetch_block)) => {
attempts += 1;
if attempts > max_attempts {
break None;
}
ctx.try_log(|logger| {
slog::warn!(logger, "Error standardizing block: {}", e)
});
if refetch_block {
block_data = match download_and_parse_block_with_retry(
&http_client,
&block_hash,
&config,
&ctx,
)
.await
{
Ok(block) => block,
Err(e) => {
ctx.try_log(|logger| {
slog::warn!(
logger,
"unable to download_and_parse_block: {}",
e.to_string()
)
});
continue;
}
};
}
}
};
};
let Some(block) = block else {
ctx.try_log(|logger| {
slog::crit!(
logger,
"Could not process bitcoin block after {} attempts.",
attempts
)
});
break;
};
bitcoin_block_store.insert(
block.block_identifier.clone(),
BitcoinBlockDataCached {
block,
processed_by_sidecar: false,
},
);
}
ObserverCommand::CacheBitcoinBlock(block) => {
bitcoin_block_store.insert(
block.block_identifier.clone(),
BitcoinBlockDataCached {
block,
processed_by_sidecar: false,
},
);
}
ObserverCommand::PropagateBitcoinChainEvent(blockchain_event) => {
ctx.try_log(|logger| {
slog::info!(logger, "Handling PropagateBitcoinChainEvent command")
});
let mut confirmed_blocks = vec![];
// Update Chain event before propagation
let (chain_event, _) = match blockchain_event {
BlockchainEvent::BlockchainUpdatedWithHeaders(data) => {
let mut blocks_to_mutate = vec![];
let mut new_blocks = vec![];
let mut new_tip = 0;
for header in data.new_headers.iter() {
if header.block_identifier.index > new_tip {
new_tip = header.block_identifier.index;
}
if store_update_required {
let Some(block) =
bitcoin_block_store.remove(&header.block_identifier)
else {
continue;
};
blocks_to_mutate.push(block);
} else {
let Some(cache) = bitcoin_block_store.get(&header.block_identifier)
else {
continue;
};
new_blocks.push(cache.block.clone());
};
}
if let Some(ref sidecar) = observer_sidecar {
let updated_blocks = sidecar.perform_bitcoin_sidecar_mutations(
blocks_to_mutate,
vec![],
&ctx,
);
for cache in updated_blocks.into_iter() {
bitcoin_block_store
.insert(cache.block.block_identifier.clone(), cache.clone());
new_blocks.push(cache.block);
}
}
for header in data.confirmed_headers.iter() {
match bitcoin_block_store.remove(&header.block_identifier) {
Some(res) => {
confirmed_blocks.push(res.block);
}
None => {
ctx.try_log(|logger| {
slog::error!(
logger,
"Unable to retrieve confirmed bitcoin block {}",
header.block_identifier
)
});
}
}
}
(
BitcoinChainEvent::ChainUpdatedWithBlocks(
BitcoinChainUpdatedWithBlocksData {
new_blocks,
confirmed_blocks: confirmed_blocks.clone(),
},
),
new_tip,
)
}
BlockchainEvent::BlockchainUpdatedWithReorg(data) => {
let mut blocks_to_rollback = vec![];
let mut blocks_to_mutate = vec![];
let mut blocks_to_apply = vec![];
let mut new_tip = 0;
for header in data.headers_to_apply.iter() {
if header.block_identifier.index > new_tip {
new_tip = header.block_identifier.index;
}
if store_update_required {
let Some(block) =
bitcoin_block_store.remove(&header.block_identifier)
else {
continue;
};
blocks_to_mutate.push(block);
} else {
let Some(cache) = bitcoin_block_store.get(&header.block_identifier)
else {
continue;
};
blocks_to_apply.push(cache.block.clone());
};
}
let mut blocks_ids_to_rollback: Vec<BlockIdentifier> = vec![];
for header in data.headers_to_rollback.iter() {
match bitcoin_block_store.get(&header.block_identifier) {
Some(cache) => {
blocks_ids_to_rollback.push(header.block_identifier.clone());
blocks_to_rollback.push(cache.block.clone());
}
None => {
ctx.try_log(|logger| {
slog::error!(
logger,
"Unable to retrieve bitcoin block {}",
header.block_identifier
)
});
}
}
}
if let Some(ref sidecar) = observer_sidecar {
let updated_blocks = sidecar.perform_bitcoin_sidecar_mutations(
blocks_to_mutate,
blocks_ids_to_rollback,
&ctx,
);
for cache in updated_blocks.into_iter() {
bitcoin_block_store
.insert(cache.block.block_identifier.clone(), cache.clone());
blocks_to_apply.push(cache.block);
}
}
for header in data.confirmed_headers.iter() {
match bitcoin_block_store.remove(&header.block_identifier) {
Some(res) => {
confirmed_blocks.push(res.block);
}
None => {
ctx.try_log(|logger| {
slog::error!(
logger,
"Unable to retrieve confirmed bitcoin block {}",
header.block_identifier
)
});
}
}
}
(
BitcoinChainEvent::ChainUpdatedWithReorg(
BitcoinChainUpdatedWithReorgData {
blocks_to_apply,
blocks_to_rollback,
confirmed_blocks: confirmed_blocks.clone(),
},
),
new_tip,
)
}
};
if let Some(ref sidecar) = observer_sidecar {
sidecar.notify_chain_event(&chain_event, &ctx)
}
}
}
}
terminate(ingestion_shutdown, observer_events_tx, &ctx);
Ok(())
}
fn terminate(
ingestion_shutdown: Option<Shutdown>,
observer_events_tx: Option<crossbeam_channel::Sender<ObserverEvent>>,
ctx: &Context,
) {
ctx.try_log(|logger| slog::info!(logger, "Handling Termination command"));
if let Some(ingestion_shutdown) = ingestion_shutdown {
ingestion_shutdown.notify();
}
if let Some(ref tx) = observer_events_tx {
let _ = tx.send(ObserverEvent::Info("Terminating event observer".into()));
let _ = tx.send(ObserverEvent::Terminate);
}
}

View File

@@ -1,133 +0,0 @@
use std::{collections::VecDeque, sync::mpsc::Sender};
use config::BitcoindConfig;
use hiro_system_kit::slog;
use zmq::Socket;
use super::ObserverCommand;
use crate::{
indexer::{
bitcoin::{build_http_client, download_and_parse_block_with_retry},
fork_scratch_pad::ForkScratchPad,
},
try_info, try_warn,
utils::Context,
};
fn new_zmq_socket() -> Socket {
let context = zmq::Context::new();
let socket = context.socket(zmq::SUB).unwrap();
assert!(socket.set_subscribe(b"hashblock").is_ok());
assert!(socket.set_rcvhwm(0).is_ok());
// We override the OS default behavior:
assert!(socket.set_tcp_keepalive(1).is_ok());
// The keepalive routine will wait for 5 minutes
assert!(socket.set_tcp_keepalive_idle(300).is_ok());
// And then resend it every 60 seconds
assert!(socket.set_tcp_keepalive_intvl(60).is_ok());
// 120 times
assert!(socket.set_tcp_keepalive_cnt(120).is_ok());
socket
}
pub async fn start_zeromq_runloop(
config: &BitcoindConfig,
observer_commands_tx: Sender<ObserverCommand>,
ctx: &Context,
) {
let bitcoind_zmq_url = config.zmq_url.clone();
let http_client = build_http_client();
try_info!(
ctx,
"zmq: Waiting for ZMQ connection acknowledgment from bitcoind"
);
let mut socket = new_zmq_socket();
assert!(socket.connect(&bitcoind_zmq_url).is_ok());
try_info!(
ctx,
"zmq: Connected, waiting for ZMQ messages from bitcoind"
);
let mut bitcoin_blocks_pool = ForkScratchPad::new();
loop {
let msg = match socket.recv_multipart(0) {
Ok(msg) => msg,
Err(e) => {
try_warn!(ctx, "zmq: Unable to receive ZMQ message: {e}");
socket = new_zmq_socket();
assert!(socket.connect(&bitcoind_zmq_url).is_ok());
continue;
}
};
let (topic, data, _sequence) = (&msg[0], &msg[1], &msg[2]);
if !topic.eq(b"hashblock") {
try_warn!(
ctx,
"zmq: {} Topic not supported",
String::from_utf8(topic.clone()).unwrap()
);
continue;
}
let block_hash = hex::encode(data);
try_info!(ctx, "zmq: Bitcoin block hash announced {block_hash}");
let mut block_hashes: VecDeque<String> = VecDeque::new();
block_hashes.push_front(block_hash);
while let Some(block_hash) = block_hashes.pop_front() {
let block =
match download_and_parse_block_with_retry(&http_client, &block_hash, config, ctx)
.await
{
Ok(block) => block,
Err(e) => {
try_warn!(ctx, "zmq: Unable to download block: {e}");
continue;
}
};
let header = block.get_block_header();
try_info!(ctx, "zmq: Standardizing bitcoin block #{}", block.height);
let _ = observer_commands_tx.send(ObserverCommand::StandardizeBitcoinBlock(block));
if bitcoin_blocks_pool.can_process_header(&header) {
match bitcoin_blocks_pool.process_header(header, ctx) {
Ok(Some(event)) => {
let _ = observer_commands_tx
.send(ObserverCommand::PropagateBitcoinChainEvent(event));
}
Err(e) => {
try_warn!(ctx, "zmq: Unable to append block: {e}");
}
Ok(None) => {
try_warn!(ctx, "zmq: Unable to append block");
}
}
} else {
// Handle a behaviour specific to ZMQ usage in bitcoind.
// Considering a simple re-org:
// A (1) - B1 (2) - C1 (3)
// \ B2 (4) - C2 (5) - D2 (6)
// When D2 is being discovered (making A -> B2 -> C2 -> D2 the new canonical fork)
// it looks like ZMQ is only publishing D2.
// Without additional operation, we end up with a block that we can't append.
let parent_block_hash = header
.parent_block_identifier
.get_hash_bytes_str()
.to_string();
try_info!(
ctx,
"zmq: Re-org detected, retrieving parent block {parent_block_hash}"
);
block_hashes.push_front(block_hash);
block_hashes.push_front(parent_block_hash);
}
}
}
}

View File

@@ -13,10 +13,10 @@ path = "src/lib.rs"
[dependencies]
config = { path = "../config" }
ordhook = { path = "../ordhook-core" }
ordinals = { path = "../ordinals" }
runes = { path = "../runes" }
chainhook-types = { path = "../chainhook-types-rs" }
chainhook-sdk = { path = "../chainhook-sdk" }
bitcoind = { path = "../bitcoind" }
hex = "0.4.3"
num_cpus = "1.16.0"
serde = "1"

View File

@@ -1,11 +1,11 @@
use std::{path::PathBuf, process, thread::sleep, time::Duration};
use chainhook_sdk::utils::Context;
use bitcoind::{try_error, try_info, utils::Context};
use chainhook_types::BlockIdentifier;
use clap::Parser;
use commands::{Command, ConfigCommand, DatabaseCommand, IndexCommand, Protocol, ServiceCommand};
use config::{generator::generate_toml_config, Config};
use hiro_system_kit::{self, error, info};
use ordhook::{db::migrate_dbs, service::Service, try_info};
use hiro_system_kit;
mod commands;
@@ -26,7 +26,7 @@ pub fn main() {
};
if let Err(e) = hiro_system_kit::nestable_block_on(handle_command(opts, &ctx)) {
error!(ctx.expect_logger(), "{e}");
try_error!(&ctx, "{e}");
std::thread::sleep(std::time::Duration::from_millis(500));
process::exit(1);
}
@@ -43,12 +43,15 @@ fn check_maintenance_mode(ctx: &Context) {
}
}
fn confirm_rollback(current_chain_tip: u64, blocks_to_rollback: u32) -> Result<(), String> {
fn confirm_rollback(
current_chain_tip: &BlockIdentifier,
blocks_to_rollback: u32,
) -> Result<(), String> {
println!("Index chain tip is at #{current_chain_tip}");
println!(
"{} blocks will be dropped. New index chain tip will be at #{}. Confirm? [Y/n]",
blocks_to_rollback,
current_chain_tip - blocks_to_rollback as u64
current_chain_tip.index - blocks_to_rollback as u64
);
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).unwrap();
@@ -66,37 +69,27 @@ async fn handle_command(opts: Protocol, ctx: &Context) -> Result<(), String> {
check_maintenance_mode(ctx);
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_ordinals_config()?;
migrate_dbs(&config, ctx).await?;
let mut service = Service::new(&config, ctx);
// TODO(rafaelcr): This only works if there's a rocksdb file already containing blocks previous to the first
// inscription height.
let start_block = service.get_index_chain_tip().await?;
try_info!(ctx, "Index chain tip is at #{start_block}");
return service.run(false).await;
ordinals::start_ordinals_indexer(true, &config, ctx).await?
}
},
Command::Index(index_command) => match index_command {
IndexCommand::Sync(cmd) => {
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_ordinals_config()?;
migrate_dbs(&config, ctx).await?;
let service = Service::new(&config, ctx);
service.catch_up_to_bitcoin_chain_tip().await?;
ordinals::start_ordinals_indexer(false, &config, ctx).await?
}
IndexCommand::Rollback(cmd) => {
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_ordinals_config()?;
let service = Service::new(&config, ctx);
let chain_tip = service.get_index_chain_tip().await?;
confirm_rollback(chain_tip, cmd.blocks)?;
let service = Service::new(&config, ctx);
let block_heights: Vec<u64> =
((chain_tip - cmd.blocks as u64)..=chain_tip).collect();
service.rollback(&block_heights).await?;
let chain_tip = ordinals::get_chain_tip(&config).await?;
confirm_rollback(&chain_tip, cmd.blocks)?;
ordinals::rollback_block_range(
chain_tip.index - cmd.blocks as u64,
chain_tip.index,
&config,
ctx,
)
.await?;
println!("{} blocks dropped", cmd.blocks);
}
},
@@ -104,7 +97,7 @@ async fn handle_command(opts: Protocol, ctx: &Context) -> Result<(), String> {
DatabaseCommand::Migrate(cmd) => {
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_ordinals_config()?;
migrate_dbs(&config, ctx).await?;
ordinals::db::migrate_dbs(&config, ctx).await?;
}
},
},
@@ -114,36 +107,35 @@ async fn handle_command(opts: Protocol, ctx: &Context) -> Result<(), String> {
check_maintenance_mode(ctx);
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_runes_config()?;
return runes::service::start_service(&config, ctx).await;
runes::start_runes_indexer(true, &config, ctx).await?
}
},
Command::Index(index_command) => match index_command {
IndexCommand::Sync(cmd) => {
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_runes_config()?;
runes::service::catch_up_to_bitcoin_chain_tip(&config, ctx).await?;
runes::start_runes_indexer(false, &config, ctx).await?
}
IndexCommand::Rollback(cmd) => {
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_runes_config()?;
let chain_tip = runes::service::get_index_chain_tip(&config, ctx).await;
confirm_rollback(chain_tip, cmd.blocks)?;
let mut pg_client = runes::db::pg_connect(&config, false, ctx).await;
runes::scan::bitcoin::drop_blocks(
chain_tip - cmd.blocks as u64,
chain_tip,
&mut pg_client,
let chain_tip = runes::get_chain_tip(&config, ctx).await?;
confirm_rollback(&chain_tip, cmd.blocks)?;
runes::rollback_block_range(
chain_tip.index - cmd.blocks as u64,
chain_tip.index,
&config,
ctx,
)
.await;
.await?;
println!("{} blocks dropped", cmd.blocks);
}
},
Command::Database(database_command) => match database_command {
DatabaseCommand::Migrate(cmd) => {
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_runes_config()?;
let _ = runes::db::pg_connect(&config, true, ctx).await;
runes::db::pg_connect(&config, true, ctx).await;
}
},
},

View File

@@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
anyhow = { version = "1.0.56", features = ["backtrace"] }
bitcoin = { workspace = true }
chainhook-sdk = { path = "../chainhook-sdk" }
bitcoind = { path = "../bitcoind" }
ciborium = "0.2.1"
serde = "1"
serde_derive = "1"

View File

@@ -64,7 +64,7 @@ impl Chain {
}
// pub(crate) fn genesis_block(self) -> Block {
// chainhook_sdk::bitcoin::blockdata::constants::genesis_block(self.network())
// bitcoind::bitcoin::blockdata::constants::genesis_block(self.network())
// }
// pub(crate) fn genesis_coinbase_outpoint(self) -> OutPoint {

View File

@@ -1,160 +0,0 @@
use std::{
thread::{sleep, JoinHandle},
time::Duration,
};
use chainhook_sdk::utils::Context;
use chainhook_types::BitcoinBlockData;
use config::Config;
use crossbeam_channel::{Sender, TryRecvError};
use rocksdb::DB;
use crate::{
core::pipeline::{PostProcessorCommand, PostProcessorController, PostProcessorEvent},
db::blocks::{insert_entry_in_blocks, open_blocks_db_with_retry},
try_error, try_info,
};
pub fn start_block_archiving_processor(
config: &Config,
ctx: &Context,
update_tip: bool,
_post_processor: Option<Sender<BitcoinBlockData>>,
) -> PostProcessorController {
let (commands_tx, commands_rx) = crossbeam_channel::bounded::<PostProcessorCommand>(2);
let (events_tx, events_rx) = crossbeam_channel::unbounded::<PostProcessorEvent>();
let config = config.clone();
let ctx = ctx.clone();
let handle: JoinHandle<()> = hiro_system_kit::thread_named("Processor Runloop")
.spawn(move || {
let blocks_db_rw = open_blocks_db_with_retry(true, &config, &ctx);
let mut processed_blocks = 0;
loop {
let (compacted_blocks, _) = match commands_rx.try_recv() {
Ok(PostProcessorCommand::ProcessBlocks(compacted_blocks, blocks)) => {
(compacted_blocks, blocks)
}
Ok(PostProcessorCommand::Terminate) => {
let _ = events_tx.send(PostProcessorEvent::Terminated);
break;
}
Err(e) => match e {
TryRecvError::Empty => {
sleep(Duration::from_secs(1));
continue;
}
_ => {
break;
}
},
};
processed_blocks += compacted_blocks.len();
store_compacted_blocks(compacted_blocks, update_tip, &blocks_db_rw, &ctx);
if processed_blocks % 10_000 == 0 {
let _ = blocks_db_rw.flush_wal(true);
}
}
if let Err(e) = blocks_db_rw.flush() {
try_error!(ctx, "{}", e.to_string());
}
})
.expect("unable to spawn thread");
PostProcessorController {
commands_tx,
events_rx,
thread_handle: handle,
}
}
pub fn store_compacted_blocks(
mut compacted_blocks: Vec<(u64, Vec<u8>)>,
update_tip: bool,
blocks_db_rw: &DB,
ctx: &Context,
) {
compacted_blocks.sort_by(|(a, _), (b, _)| a.cmp(b));
for (block_height, compacted_block) in compacted_blocks.into_iter() {
insert_entry_in_blocks(
block_height as u32,
&compacted_block,
update_tip,
blocks_db_rw,
ctx,
);
try_info!(ctx, "Block #{block_height} saved to disk");
}
if let Err(e) = blocks_db_rw.flush() {
try_error!(ctx, "{}", e.to_string());
}
}
// #[cfg(test)]
// mod test {
// use std::{thread::sleep, time::Duration};
// use chainhook_sdk::utils::Context;
// use crate::{
// config::Config,
// core::{
// pipeline::PostProcessorCommand,
// test_builders::{TestBlockBuilder, TestTransactionBuilder},
// },
// db::{
// blocks::{find_block_bytes_at_block_height, open_blocks_db_with_retry},
// cursor::BlockBytesCursor,
// drop_all_dbs, initialize_sqlite_dbs,
// },
// };
// use super::start_block_archiving_processor;
// #[test]
// fn archive_blocks_via_processor() {
// let ctx = Context::empty();
// let config = Config::test_default();
// {
// drop_all_dbs(&config);
// let _ = initialize_sqlite_dbs(&config, &ctx);
// let _ = open_blocks_db_with_retry(true, &config, &ctx);
// }
// let controller = start_block_archiving_processor(&config, &ctx, true, None);
// // Store a block and terminate.
// let block0 = TestBlockBuilder::new()
// .hash("0x00000000000000000001b228f9faca9e7d11fcecff9d463bd05546ff0aa4651a".to_string())
// .height(849999)
// .add_transaction(
// TestTransactionBuilder::new()
// .hash(
// "0xa321c61c83563a377f82ef59301f2527079f6bda7c2d04f9f5954c873f42e8ac"
// .to_string(),
// )
// .build(),
// )
// .build();
// let _ = controller
// .commands_tx
// .send(PostProcessorCommand::ProcessBlocks(
// vec![(
// 849999,
// BlockBytesCursor::from_standardized_block(&block0).unwrap(),
// )],
// vec![],
// ));
// sleep(Duration::from_millis(100));
// let _ = controller.commands_tx.send(PostProcessorCommand::Terminate);
// // Check that blocks exist in rocksdb
// let blocks_db = open_blocks_db_with_retry(false, &config, &ctx);
// let result = find_block_bytes_at_block_height(849999, 3, &blocks_db, &ctx);
// assert!(result.is_some());
// }
// }

View File

@@ -1,526 +0,0 @@
use std::{
collections::{BTreeMap, HashMap},
hash::BuildHasherDefault,
sync::Arc,
thread::{sleep, JoinHandle},
time::Duration,
};
use chainhook_postgres::{pg_begin, pg_pool_client};
use chainhook_sdk::utils::Context;
use chainhook_types::{BitcoinBlockData, TransactionIdentifier};
use config::Config;
use crossbeam_channel::TryRecvError;
use dashmap::DashMap;
use fxhash::FxHasher;
use crate::{
core::{
meta_protocols::brc20::{
brc20_pg,
cache::{brc20_new_cache, Brc20MemoryCache},
index::index_block_and_insert_brc20_operations,
},
new_traversals_lazy_cache,
pipeline::{
processors::block_archiving::store_compacted_blocks, PostProcessorCommand,
PostProcessorController, PostProcessorEvent,
},
protocol::{
inscription_parsing::parse_inscriptions_in_standardized_block,
inscription_sequencing::{
get_bitcoin_network, get_jubilee_block_height,
parallelize_inscription_data_computations,
update_block_inscriptions_with_consensus_sequence_data,
},
satoshi_numbering::TraversalResult,
satoshi_tracking::augment_block_with_transfers,
sequence_cursor::SequenceCursor,
},
},
db::{blocks::open_blocks_db_with_retry, cursor::TransactionBytesCursor, ordinals_pg},
service::PgConnectionPools,
try_crit, try_debug, try_info,
utils::monitoring::PrometheusMonitoring,
};
pub fn start_inscription_indexing_processor(
config: &Config,
pg_pools: &PgConnectionPools,
ctx: &Context,
prometheus: &PrometheusMonitoring,
) -> PostProcessorController {
let (commands_tx, commands_rx) = crossbeam_channel::bounded::<PostProcessorCommand>(2);
let (events_tx, events_rx) = crossbeam_channel::unbounded::<PostProcessorEvent>();
let config = config.clone();
let ctx = ctx.clone();
let pg_pools = pg_pools.clone();
let prometheus = prometheus.clone();
let handle: JoinHandle<()> = hiro_system_kit::thread_named("Inscription indexing runloop")
.spawn(move || {
hiro_system_kit::nestable_block_on(async move {
let cache_l2 = Arc::new(new_traversals_lazy_cache(2048));
let garbage_collect_every_n_blocks = 100;
let mut garbage_collect_nth_block = 0;
let mut empty_cycles = 0;
let mut sequence_cursor = SequenceCursor::new();
let mut brc20_cache = brc20_new_cache(&config);
loop {
let (compacted_blocks, mut blocks) = match commands_rx.try_recv() {
Ok(PostProcessorCommand::ProcessBlocks(compacted_blocks, blocks)) => {
empty_cycles = 0;
(compacted_blocks, blocks)
}
Ok(PostProcessorCommand::Terminate) => {
let _ = events_tx.send(PostProcessorEvent::Terminated);
break;
}
Err(e) => match e {
TryRecvError::Empty => {
empty_cycles += 1;
if empty_cycles == 180 {
try_info!(ctx, "Block processor reached expiration");
let _ = events_tx.send(PostProcessorEvent::Expired);
break;
}
sleep(Duration::from_secs(1));
continue;
}
_ => {
break;
}
},
};
{
let blocks_db_rw = open_blocks_db_with_retry(true, &config, &ctx);
store_compacted_blocks(
compacted_blocks,
true,
&blocks_db_rw,
&Context::empty(),
);
}
if blocks.is_empty() {
continue;
}
blocks = match process_blocks(
&mut blocks,
&mut sequence_cursor,
&cache_l2,
&mut brc20_cache,
&prometheus,
&config,
&pg_pools,
&ctx,
)
.await
{
Ok(blocks) => blocks,
Err(e) => {
try_crit!(ctx, "Error indexing blocks: {e}");
std::process::exit(1);
}
};
garbage_collect_nth_block += blocks.len();
if garbage_collect_nth_block > garbage_collect_every_n_blocks {
try_debug!(ctx, "Clearing cache L2 ({} entries)", cache_l2.len());
cache_l2.clear();
garbage_collect_nth_block = 0;
}
}
});
})
.expect("unable to spawn thread");
PostProcessorController {
commands_tx,
events_rx,
thread_handle: handle,
}
}
async fn process_blocks(
next_blocks: &mut Vec<BitcoinBlockData>,
sequence_cursor: &mut SequenceCursor,
cache_l2: &Arc<DashMap<(u32, [u8; 8]), TransactionBytesCursor, BuildHasherDefault<FxHasher>>>,
brc20_cache: &mut Option<Brc20MemoryCache>,
prometheus: &PrometheusMonitoring,
config: &Config,
pg_pools: &PgConnectionPools,
ctx: &Context,
) -> Result<Vec<BitcoinBlockData>, String> {
let mut cache_l1 = BTreeMap::new();
let mut updated_blocks = vec![];
for _cursor in 0..next_blocks.len() {
let mut block = next_blocks.remove(0);
index_block(
&mut block,
next_blocks,
sequence_cursor,
&mut cache_l1,
cache_l2,
brc20_cache.as_mut(),
prometheus,
config,
pg_pools,
ctx,
)
.await?;
updated_blocks.push(block);
}
Ok(updated_blocks)
}
pub async fn index_block(
block: &mut BitcoinBlockData,
next_blocks: &Vec<BitcoinBlockData>,
sequence_cursor: &mut SequenceCursor,
cache_l1: &mut BTreeMap<(TransactionIdentifier, usize, u64), TraversalResult>,
cache_l2: &Arc<DashMap<(u32, [u8; 8]), TransactionBytesCursor, BuildHasherDefault<FxHasher>>>,
brc20_cache: Option<&mut Brc20MemoryCache>,
prometheus: &PrometheusMonitoring,
config: &Config,
pg_pools: &PgConnectionPools,
ctx: &Context,
) -> Result<(), String> {
let stopwatch = std::time::Instant::now();
let block_height = block.block_identifier.index;
try_info!(ctx, "Indexing block #{block_height}");
// Invalidate and recompute cursor when crossing the jubilee height
if block.block_identifier.index
== get_jubilee_block_height(&get_bitcoin_network(&block.metadata.network))
{
sequence_cursor.reset();
}
{
let mut ord_client = pg_pool_client(&pg_pools.ordinals).await?;
let ord_tx = pg_begin(&mut ord_client).await?;
// Parsed BRC20 ops will be deposited here for this block.
let mut brc20_operation_map = HashMap::new();
parse_inscriptions_in_standardized_block(block, &mut brc20_operation_map, config, ctx);
let has_inscription_reveals = parallelize_inscription_data_computations(
block,
next_blocks,
cache_l1,
cache_l2,
config,
ctx,
)?;
if has_inscription_reveals {
update_block_inscriptions_with_consensus_sequence_data(
block,
sequence_cursor,
cache_l1,
&ord_tx,
ctx,
)
.await?;
}
augment_block_with_transfers(block, &ord_tx, ctx).await?;
// Write data
ordinals_pg::insert_block(block, &ord_tx).await?;
// BRC-20
if let (Some(brc20_cache), Some(brc20_pool)) = (brc20_cache, &pg_pools.brc20) {
let mut brc20_client = pg_pool_client(brc20_pool).await?;
let brc20_tx = pg_begin(&mut brc20_client).await?;
index_block_and_insert_brc20_operations(
block,
&mut brc20_operation_map,
brc20_cache,
&brc20_tx,
ctx,
)
.await?;
brc20_tx
.commit()
.await
.map_err(|e| format!("unable to commit brc20 pg transaction: {e}"))?;
}
prometheus.metrics_block_indexed(block_height);
prometheus.metrics_inscription_indexed(
ordinals_pg::get_highest_inscription_number(&ord_tx)
.await?
.unwrap_or(0) as u64,
);
ord_tx
.commit()
.await
.map_err(|e| format!("unable to commit ordinals pg transaction: {e}"))?;
}
try_info!(
ctx,
"Block #{block_height} indexed in {}s",
stopwatch.elapsed().as_millis() as f32 / 1000.0
);
Ok(())
}
pub async fn rollback_block(
block_height: u64,
_config: &Config,
pg_pools: &PgConnectionPools,
ctx: &Context,
) -> Result<(), String> {
try_info!(ctx, "Rolling back block #{block_height}");
{
let mut ord_client = pg_pool_client(&pg_pools.ordinals).await?;
let ord_tx = pg_begin(&mut ord_client).await?;
ordinals_pg::rollback_block(block_height, &ord_tx).await?;
// BRC-20
if let Some(brc20_pool) = &pg_pools.brc20 {
let mut brc20_client = pg_pool_client(brc20_pool).await?;
let brc20_tx = pg_begin(&mut brc20_client).await?;
brc20_pg::rollback_block_operations(block_height, &brc20_tx).await?;
brc20_tx
.commit()
.await
.map_err(|e| format!("unable to commit brc20 pg transaction: {e}"))?;
try_info!(
ctx,
"Rolled back BRC-20 operations at block #{block_height}"
);
}
ord_tx
.commit()
.await
.map_err(|e| format!("unable to commit ordinals pg transaction: {e}"))?;
try_info!(
ctx,
"Rolled back inscription activity at block #{block_height}"
);
}
Ok(())
}
// #[cfg(test)]
// mod test {
// use std::{thread, time::Duration};
// use chainhook_sdk::{
// types::{
// bitcoin::TxOut, BitcoinBlockData, OrdinalInscriptionTransferDestination,
// OrdinalOperation,
// },
// utils::Context,
// };
// use crossbeam_channel::unbounded;
// use crate::{
// config::Config,
// core::{
// pipeline::PostProcessorCommand,
// test_builders::{TestBlockBuilder, TestTransactionBuilder, TestTxInBuilder},
// },
// db::{
// blocks::open_blocks_db_with_retry, cursor::BlockBytesCursor, drop_all_dbs,
// initialize_sqlite_dbs,
// },
// utils::monitoring::PrometheusMonitoring,
// };
// use super::start_inscription_indexing_processor;
// #[test]
// fn process_inscription_reveal_and_transfer_via_processor() {
// let ctx = Context::empty();
// let config = Config::test_default();
// {
// drop_all_dbs(&config);
// let _ = initialize_sqlite_dbs(&config, &ctx);
// let _ = open_blocks_db_with_retry(true, &config, &ctx);
// }
// let prometheus = PrometheusMonitoring::new();
// let (block_tx, block_rx) = unbounded::<BitcoinBlockData>();
// let controller =
// start_inscription_indexing_processor(&config, &ctx, Some(block_tx), &prometheus);
// // Block 0: A coinbase tx generating the inscription sat.
// let c0 = controller.commands_tx.clone();
// thread::spawn(move || {
// let block0 = TestBlockBuilder::new()
// .hash(
// "0x00000000000000000001b228f9faca9e7d11fcecff9d463bd05546ff0aa4651a"
// .to_string(),
// )
// .height(849999)
// .add_transaction(
// TestTransactionBuilder::new()
// .hash(
// "0xa321c61c83563a377f82ef59301f2527079f6bda7c2d04f9f5954c873f42e8ac"
// .to_string(),
// )
// .build(),
// )
// .build();
// thread::sleep(Duration::from_millis(50));
// let _ = c0.send(PostProcessorCommand::ProcessBlocks(
// vec![(
// 849999,
// BlockBytesCursor::from_standardized_block(&block0).unwrap(),
// )],
// vec![block0],
// ));
// });
// let _ = block_rx.recv().unwrap();
// // Block 1: The actual inscription.
// let c1 = controller.commands_tx.clone();
// thread::spawn(move || {
// let block1 = TestBlockBuilder::new()
// .hash("0xb61b0172d95e266c18aea0c624db987e971a5d6d4ebc2aaed85da4642d635735".to_string())
// .height(850000)
// .add_transaction(TestTransactionBuilder::new().build())
// .add_transaction(
// TestTransactionBuilder::new()
// .hash("0xc62d436323e14cdcb91dd21cb7814fd1ac5b9ecb6e3cc6953b54c02a343f7ec9".to_string())
// .add_input(
// TestTxInBuilder::new()
// .prev_out_block_height(849999)
// .prev_out_tx_hash(
// "0xa321c61c83563a377f82ef59301f2527079f6bda7c2d04f9f5954c873f42e8ac"
// .to_string(),
// )
// .value(12_000)
// .witness(vec![
// "0x6c00eb3c4d35fedd257051333b4ca81d1a25a37a9af4891f1fec2869edd56b14180eafbda8851d63138a724c9b15384bc5f0536de658bd294d426a36212e6f08".to_string(),
// "0x209e2849b90a2353691fccedd467215c88eec89a5d0dcf468e6cf37abed344d746ac0063036f7264010118746578742f706c61696e3b636861727365743d7574662d38004c5e7b200a20202270223a20226272632d3230222c0a2020226f70223a20226465706c6f79222c0a2020227469636b223a20226f726469222c0a2020226d6178223a20223231303030303030222c0a2020226c696d223a202231303030220a7d68".to_string(),
// "0xc19e2849b90a2353691fccedd467215c88eec89a5d0dcf468e6cf37abed344d746".to_string(),
// ])
// .build(),
// )
// .add_output(TxOut {
// value: 10_000,
// script_pubkey: "0x00145e5f0d045e441bf001584eaeca6cd84da04b1084".to_string(),
// })
// .build()
// )
// .build();
// thread::sleep(Duration::from_millis(50));
// let _ = c1.send(PostProcessorCommand::ProcessBlocks(
// vec![(
// 850000,
// BlockBytesCursor::from_standardized_block(&block1).unwrap(),
// )],
// vec![block1],
// ));
// });
// let results1 = block_rx.recv().unwrap();
// let result_tx_1 = &results1.transactions[1];
// assert_eq!(result_tx_1.metadata.ordinal_operations.len(), 1);
// let OrdinalOperation::InscriptionRevealed(reveal) =
// &result_tx_1.metadata.ordinal_operations[0]
// else {
// unreachable!();
// };
// assert_eq!(
// reveal.inscription_id,
// "c62d436323e14cdcb91dd21cb7814fd1ac5b9ecb6e3cc6953b54c02a343f7ec9i0".to_string()
// );
// assert_eq!(reveal.inscription_number.jubilee, 0);
// assert_eq!(reveal.content_bytes, "0x7b200a20202270223a20226272632d3230222c0a2020226f70223a20226465706c6f79222c0a2020227469636b223a20226f726469222c0a2020226d6178223a20223231303030303030222c0a2020226c696d223a202231303030220a7d".to_string());
// assert_eq!(reveal.content_length, 94);
// assert_eq!(reveal.content_type, "text/plain;charset=utf-8".to_string());
// assert_eq!(
// reveal.inscriber_address,
// Some("bc1qte0s6pz7gsdlqq2cf6hv5mxcfksykyyyjkdfd5".to_string())
// );
// assert_eq!(reveal.ordinal_number, 1971874687500000);
// assert_eq!(reveal.ordinal_block_height, 849999);
// assert_eq!(
// reveal.satpoint_post_inscription,
// "c62d436323e14cdcb91dd21cb7814fd1ac5b9ecb6e3cc6953b54c02a343f7ec9:0:0".to_string()
// );
// // Block 2: Inscription transfer
// let c2 = controller.commands_tx.clone();
// thread::spawn(move || {
// let block2 = TestBlockBuilder::new()
// .hash("0x000000000000000000029854dcc8becfd64a352e1d2b1f1d3bb6f101a947af0e".to_string())
// .height(850001)
// .add_transaction(TestTransactionBuilder::new().build())
// .add_transaction(
// TestTransactionBuilder::new()
// .hash("0x1b65c7494c7d1200416a81e65e1dd6bee8d5d4276128458df43692dcb21f49f5".to_string())
// .add_input(
// TestTxInBuilder::new()
// .prev_out_block_height(850000)
// .prev_out_tx_hash(
// "0xc62d436323e14cdcb91dd21cb7814fd1ac5b9ecb6e3cc6953b54c02a343f7ec9"
// .to_string(),
// )
// .value(10_000)
// .build(),
// )
// .add_output(TxOut {
// value: 8000,
// script_pubkey: "0x00145e5f0d045e441bf001584eaeca6cd84da04b1084".to_string(),
// })
// .build()
// )
// .build();
// thread::sleep(Duration::from_millis(50));
// let _ = c2.send(PostProcessorCommand::ProcessBlocks(
// vec![(
// 850001,
// BlockBytesCursor::from_standardized_block(&block2).unwrap(),
// )],
// vec![block2],
// ));
// });
// let results2 = block_rx.recv().unwrap();
// let result_tx_2 = &results2.transactions[1];
// assert_eq!(result_tx_2.metadata.ordinal_operations.len(), 1);
// let OrdinalOperation::InscriptionTransferred(transfer) =
// &result_tx_2.metadata.ordinal_operations[0]
// else {
// unreachable!();
// };
// let OrdinalInscriptionTransferDestination::Transferred(destination) = &transfer.destination
// else {
// unreachable!();
// };
// assert_eq!(
// destination.to_string(),
// "bc1qte0s6pz7gsdlqq2cf6hv5mxcfksykyyyjkdfd5".to_string()
// );
// assert_eq!(transfer.ordinal_number, 1971874687500000);
// assert_eq!(
// transfer.satpoint_pre_transfer,
// "c62d436323e14cdcb91dd21cb7814fd1ac5b9ecb6e3cc6953b54c02a343f7ec9:0:0".to_string()
// );
// assert_eq!(
// transfer.satpoint_post_transfer,
// "1b65c7494c7d1200416a81e65e1dd6bee8d5d4276128458df43692dcb21f49f5:0:0".to_string()
// );
// assert_eq!(transfer.post_transfer_output_value, Some(8000));
// // Close channel.
// let _ = controller.commands_tx.send(PostProcessorCommand::Terminate);
// }
// }

View File

@@ -1,15 +0,0 @@
#[macro_use]
extern crate hiro_system_kit;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate lazy_static;
extern crate serde;
pub mod core;
pub mod db;
pub mod service;
pub mod utils;

View File

@@ -1,412 +0,0 @@
use std::{
collections::BTreeMap,
hash::BuildHasherDefault,
sync::{mpsc::channel, Arc},
};
use chainhook_postgres::{pg_begin, pg_pool, pg_pool_client};
use chainhook_sdk::{
observer::{start_event_observer, BitcoinBlockDataCached, ObserverEvent, ObserverSidecar},
utils::{bitcoind::bitcoind_wait_for_chain_tip, BlockHeights, Context},
};
use chainhook_types::BlockIdentifier;
use config::{Config, OrdinalsMetaProtocolsConfig};
use crossbeam_channel::select;
use dashmap::DashMap;
use deadpool_postgres::Pool;
use fxhash::FxHasher;
use crate::{
core::{
first_inscription_height,
meta_protocols::brc20::cache::{brc20_new_cache, Brc20MemoryCache},
new_traversals_lazy_cache,
pipeline::{
bitcoind_download_blocks,
processors::{
block_archiving::start_block_archiving_processor,
inscription_indexing::{
index_block, rollback_block, start_inscription_indexing_processor,
},
},
},
protocol::sequence_cursor::SequenceCursor,
should_sync_ordinals_db, should_sync_rocks_db,
},
db::{
blocks::{self, find_missing_blocks, open_blocks_db_with_retry, run_compaction},
cursor::{BlockBytesCursor, TransactionBytesCursor},
ordinals_pg,
},
try_crit, try_error, try_info,
utils::monitoring::{start_serving_prometheus_metrics, PrometheusMonitoring},
};
#[derive(Debug, Clone)]
pub struct PgConnectionPools {
pub ordinals: Pool,
pub brc20: Option<Pool>,
}
pub struct Service {
pub prometheus: PrometheusMonitoring,
pub config: Config,
pub ctx: Context,
pub pg_pools: PgConnectionPools,
}
impl Service {
pub fn new(config: &Config, ctx: &Context) -> Self {
let Some(ordinals_config) = &config.ordinals else {
unreachable!();
};
Self {
prometheus: PrometheusMonitoring::new(),
config: config.clone(),
ctx: ctx.clone(),
pg_pools: PgConnectionPools {
ordinals: pg_pool(&ordinals_config.db).unwrap(),
brc20: match &ordinals_config.meta_protocols {
Some(OrdinalsMetaProtocolsConfig {
brc20: Some(brc20), ..
}) => match brc20.enabled {
true => Some(pg_pool(&brc20.db).unwrap()),
false => None,
},
_ => None,
},
},
}
}
/// Returns the last block height we have indexed. This only looks at the max index chain tip, not at the blocks DB chain tip.
/// Adjusts for starting index height depending on Bitcoin network.
pub async fn get_index_chain_tip(&self) -> Result<u64, String> {
let mut ord_client = pg_pool_client(&self.pg_pools.ordinals).await?;
let ord_tx = pg_begin(&mut ord_client).await?;
// Update chain tip to match first inscription height at least.
let db_height = ordinals_pg::get_chain_tip_block_height(&ord_tx)
.await?
.unwrap_or(0)
.max(first_inscription_height(&self.config) - 1);
ordinals_pg::update_chain_tip(db_height, &ord_tx).await?;
ord_tx
.commit()
.await
.map_err(|e| format!("unable to commit get_index_chain_tip transaction: {e}"))?;
Ok(db_height)
}
pub async fn run(&mut self, check_blocks_integrity: bool) -> Result<(), String> {
// 1: Initialize Prometheus monitoring server.
if let Some(metrics) = &self.config.metrics {
if metrics.enabled {
let registry_moved = self.prometheus.registry.clone();
let ctx_cloned = self.ctx.clone();
let port = metrics.prometheus_port;
let _ = std::thread::spawn(move || {
hiro_system_kit::nestable_block_on(start_serving_prometheus_metrics(
port,
registry_moved,
ctx_cloned,
));
});
}
}
let (max_inscription_number, chain_tip) = {
let ord_client = pg_pool_client(&self.pg_pools.ordinals).await?;
let inscription_number = ordinals_pg::get_highest_inscription_number(&ord_client)
.await?
.unwrap_or(0);
let chain_tip = ordinals_pg::get_chain_tip_block_height(&ord_client)
.await?
.unwrap_or(0);
(inscription_number, chain_tip)
};
self.prometheus
.initialize(0, max_inscription_number as u64, chain_tip);
// 2: Catch-up the ordinals index to Bitcoin chain tip.
if check_blocks_integrity {
self.check_blocks_db_integrity().await?;
}
self.catch_up_to_bitcoin_chain_tip().await?;
try_info!(self.ctx, "Service: Streaming blocks start");
// 3: Set up the real-time ZMQ Bitcoin block streaming channels and start listening.
let zmq_observer_sidecar = self.set_up_bitcoin_zmq_observer_sidecar()?;
let (observer_command_tx, observer_command_rx) = channel();
let (observer_event_tx, observer_event_rx) = crossbeam_channel::unbounded();
let inner_ctx = self.ctx.clone();
let _ = start_event_observer(
self.config.bitcoind.clone(),
observer_command_tx.clone(),
observer_command_rx,
Some(observer_event_tx),
Some(zmq_observer_sidecar),
inner_ctx,
);
// 4: Block the main thread.
loop {
let event = match observer_event_rx.recv() {
Ok(cmd) => cmd,
Err(e) => {
try_error!(self.ctx, "Error: broken channel {}", e.to_string());
break;
}
};
if let ObserverEvent::Terminate = event {
try_info!(&self.ctx, "Terminating runloop");
break;
}
}
Ok(())
}
/// Rolls back index data for the specified block heights.
pub async fn rollback(&self, block_heights: &[u64]) -> Result<(), String> {
for block_height in block_heights.iter() {
rollback_block(*block_height, &self.config, &self.pg_pools, &self.ctx).await?;
}
Ok(())
}
fn set_up_bitcoin_zmq_observer_sidecar(&self) -> Result<ObserverSidecar, String> {
let (block_mutator_in_tx, block_mutator_in_rx) = crossbeam_channel::unbounded();
let (block_mutator_out_tx, block_mutator_out_rx) = crossbeam_channel::unbounded();
let (chain_event_notifier_tx, chain_event_notifier_rx) = crossbeam_channel::unbounded();
let observer_sidecar = ObserverSidecar {
bitcoin_blocks_mutator: Some((block_mutator_in_tx, block_mutator_out_rx)),
bitcoin_chain_event_notifier: Some(chain_event_notifier_tx),
};
// TODO(rafaelcr): Move these outside so they can be used across blocks.
let cache_l2 = Arc::new(new_traversals_lazy_cache(100_000));
let mut brc20_cache = brc20_new_cache(&self.config);
let ctx = self.ctx.clone();
let config = self.config.clone();
let pg_pools = self.pg_pools.clone();
let prometheus = self.prometheus.clone();
hiro_system_kit::thread_named("Observer Sidecar Runloop")
.spawn(move || {
hiro_system_kit::nestable_block_on(async move {
loop {
select! {
// Mutate a newly-received Bitcoin block and add any Ordinals or BRC-20 activity to it. Write index
// data to DB.
recv(block_mutator_in_rx) -> msg => {
if let Ok((mut blocks_to_mutate, blocks_ids_to_rollback)) = msg {
match chainhook_sidecar_mutate_blocks(
&mut blocks_to_mutate,
&blocks_ids_to_rollback,
&cache_l2,
&mut brc20_cache,
&prometheus,
&config,
&pg_pools,
&ctx,
).await {
Ok(_) => {
let _ = block_mutator_out_tx.send(blocks_to_mutate);
},
Err(e) => {
try_crit!(ctx, "Error indexing streamed block: {e}");
std::process::exit(1);
},
};
}
}
recv(chain_event_notifier_rx) -> _msg => {
// No action required.
}
}
}
})
})
.expect("unable to spawn zmq thread");
Ok(observer_sidecar)
}
pub async fn check_blocks_db_integrity(&mut self) -> Result<(), String> {
bitcoind_wait_for_chain_tip(&self.config.bitcoind, &self.ctx);
let (tip, missing_blocks) = {
let blocks_db = open_blocks_db_with_retry(false, &self.config, &self.ctx);
let ord_client = pg_pool_client(&self.pg_pools.ordinals).await?;
let tip = ordinals_pg::get_chain_tip_block_height(&ord_client)
.await?
.unwrap_or(0);
let missing_blocks = find_missing_blocks(&blocks_db, 0, tip as u32, &self.ctx);
(tip, missing_blocks)
};
if !missing_blocks.is_empty() {
info!(
self.ctx.expect_logger(),
"{} missing blocks detected, will attempt to repair data",
missing_blocks.len()
);
let block_ingestion_processor =
start_block_archiving_processor(&self.config, &self.ctx, false, None);
bitcoind_download_blocks(
&self.config,
missing_blocks.into_iter().map(|x| x as u64).collect(),
tip,
&block_ingestion_processor,
10_000,
&self.ctx,
)
.await?;
}
let blocks_db_rw = open_blocks_db_with_retry(false, &self.config, &self.ctx);
info!(self.ctx.expect_logger(), "Running database compaction",);
run_compaction(&blocks_db_rw, tip as u32);
Ok(())
}
/// Synchronizes and indexes all databases until their block height matches bitcoind's block height.
pub async fn catch_up_to_bitcoin_chain_tip(&self) -> Result<(), String> {
// 0: Make sure bitcoind is synchronized.
bitcoind_wait_for_chain_tip(&self.config.bitcoind, &self.ctx);
// 1: Catch up blocks DB so it is at least at the same height as the ordinals DB.
if let Some((start_block, end_block)) =
should_sync_rocks_db(&self.config, &self.pg_pools, &self.ctx).await?
{
try_info!(
self.ctx,
"Blocks DB is out of sync with ordinals DB, archiving blocks from #{start_block} to #{end_block}"
);
let blocks_post_processor =
start_block_archiving_processor(&self.config, &self.ctx, true, None);
let blocks = BlockHeights::BlockRange(start_block, end_block)
.get_sorted_entries()
.map_err(|_e| "Block start / end block spec invalid".to_string())?;
bitcoind_download_blocks(
&self.config,
blocks.into(),
first_inscription_height(&self.config),
&blocks_post_processor,
10_000,
&self.ctx,
)
.await?;
}
// 2: Catch up ordinals DB until it reaches bitcoind block height. This will also advance blocks DB and BRC-20 DB if
// enabled.
let mut last_block_processed = 0;
while let Some((start_block, end_block, speed)) =
should_sync_ordinals_db(&self.config, &self.pg_pools, &self.ctx).await?
{
if last_block_processed == end_block {
break;
}
let blocks_post_processor = start_inscription_indexing_processor(
&self.config,
&self.pg_pools,
&self.ctx,
&self.prometheus,
);
try_info!(
self.ctx,
"Indexing inscriptions from #{start_block} to #{end_block}"
);
let blocks = BlockHeights::BlockRange(start_block, end_block)
.get_sorted_entries()
.map_err(|_e| "Block start / end block spec invalid".to_string())?;
bitcoind_download_blocks(
&self.config,
blocks.into(),
first_inscription_height(&self.config),
&blocks_post_processor,
speed,
&self.ctx,
)
.await?;
last_block_processed = end_block;
}
try_info!(self.ctx, "Index has reached bitcoin chain tip");
Ok(())
}
}
pub async fn chainhook_sidecar_mutate_blocks(
blocks_to_mutate: &mut [BitcoinBlockDataCached],
block_ids_to_rollback: &[BlockIdentifier],
cache_l2: &Arc<DashMap<(u32, [u8; 8]), TransactionBytesCursor, BuildHasherDefault<FxHasher>>>,
brc20_cache: &mut Option<Brc20MemoryCache>,
prometheus: &PrometheusMonitoring,
config: &Config,
pg_pools: &PgConnectionPools,
ctx: &Context,
) -> Result<(), String> {
if !block_ids_to_rollback.is_empty() {
let blocks_db_rw = open_blocks_db_with_retry(true, config, ctx);
for block_id in block_ids_to_rollback.iter() {
blocks::delete_blocks_in_block_range(
block_id.index as u32,
block_id.index as u32,
&blocks_db_rw,
ctx,
);
rollback_block(block_id.index, config, pg_pools, ctx).await?;
}
blocks_db_rw
.flush()
.map_err(|e| format!("error dropping rollback blocks from rocksdb: {e}"))?;
}
for cached_block in blocks_to_mutate.iter_mut() {
if cached_block.processed_by_sidecar {
continue;
}
let block_bytes = match BlockBytesCursor::from_standardized_block(&cached_block.block) {
Ok(block_bytes) => block_bytes,
Err(e) => {
return Err(format!(
"Unable to compress block #{}: #{e}",
cached_block.block.block_identifier.index
));
}
};
{
let blocks_db_rw = open_blocks_db_with_retry(true, config, ctx);
blocks::insert_entry_in_blocks(
cached_block.block.block_identifier.index as u32,
&block_bytes,
true,
&blocks_db_rw,
ctx,
);
blocks_db_rw
.flush()
.map_err(|e| format!("error inserting block to rocksdb: {e}"))?;
}
let mut cache_l1 = BTreeMap::new();
let mut sequence_cursor = SequenceCursor::new();
index_block(
&mut cached_block.block,
&vec![],
&mut sequence_cursor,
&mut cache_l1,
cache_l2,
brc20_cache.as_mut(),
prometheus,
config,
pg_pools,
ctx,
)
.await?;
cached_block.processed_by_sidecar = true;
}
Ok(())
}

View File

@@ -1,49 +0,0 @@
#[macro_export]
macro_rules! try_info {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| info!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| info!(l, $tag));
};
}
#[macro_export]
macro_rules! try_debug {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| debug!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| debug!(l, $tag));
};
}
#[macro_export]
macro_rules! try_warn {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| warn!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| warn!(l, $tag));
};
}
#[macro_export]
macro_rules! try_error {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| error!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| error!(l, $tag));
};
}
#[macro_export]
macro_rules! try_crit {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| crit!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| crit!(l, $tag));
};
}

View File

@@ -1,105 +0,0 @@
pub mod logger;
pub mod monitoring;
use std::{
fs,
io::{Read, Write},
path::Path,
};
use chainhook_types::TransactionIdentifier;
pub fn read_file_content_at_path(file_path: &Path) -> Result<Vec<u8>, String> {
use std::{fs::File, io::BufReader};
let file = File::open(file_path)
.map_err(|e| format!("unable to read file {}\n{:?}", file_path.display(), e))?;
let mut file_reader = BufReader::new(file);
let mut file_buffer = vec![];
file_reader
.read_to_end(&mut file_buffer)
.map_err(|e| format!("unable to read file {}\n{:?}", file_path.display(), e))?;
Ok(file_buffer)
}
pub fn write_file_content_at_path(file_path: &Path, content: &[u8]) -> Result<(), String> {
use std::fs::File;
let mut parent_directory = file_path.to_path_buf();
parent_directory.pop();
fs::create_dir_all(&parent_directory).map_err(|e| {
format!(
"unable to create parent directory {}\n{}",
parent_directory.display(),
e
)
})?;
let mut file = File::create(file_path)
.map_err(|e| format!("unable to open file {}\n{}", file_path.display(), e))?;
file.write_all(content)
.map_err(|e| format!("unable to write file {}\n{}", file_path.display(), e))?;
Ok(())
}
pub fn format_inscription_id(
transaction_identifier: &TransactionIdentifier,
inscription_subindex: usize,
) -> String {
format!(
"{}i{}",
transaction_identifier.get_hash_bytes_str(),
inscription_subindex,
)
}
pub fn parse_satpoint_to_watch(outpoint_to_watch: &str) -> (TransactionIdentifier, usize, u64) {
let comps: Vec<&str> = outpoint_to_watch.split(":").collect();
let tx = TransactionIdentifier::new(comps[0]);
let output_index = comps[1].to_string().parse::<usize>().unwrap_or_else(|_| {
panic!(
"fatal: unable to extract output_index from outpoint {}",
outpoint_to_watch
)
});
let offset = comps[2].to_string().parse::<u64>().unwrap_or_else(|_| {
panic!(
"fatal: unable to extract offset from outpoint {}",
outpoint_to_watch
)
});
(tx, output_index, offset)
}
pub fn format_outpoint_to_watch(
transaction_identifier: &TransactionIdentifier,
output_index: usize,
) -> String {
format!(
"{}:{}",
transaction_identifier.get_hash_bytes_str(),
output_index
)
}
pub fn parse_inscription_id(inscription_id: &str) -> (TransactionIdentifier, usize) {
let comps: Vec<&str> = inscription_id.split("i").collect();
let tx = TransactionIdentifier::new(comps[0]);
let output_index = comps[1].to_string().parse::<usize>().unwrap_or_else(|_| {
panic!(
"fatal: unable to extract output_index from inscription_id {}",
inscription_id
)
});
(tx, output_index)
}
pub fn parse_outpoint_to_watch(outpoint_to_watch: &str) -> (TransactionIdentifier, usize) {
let comps: Vec<&str> = outpoint_to_watch.split(":").collect();
let tx = TransactionIdentifier::new(comps[0]);
let output_index = comps[1].to_string().parse::<usize>().unwrap_or_else(|_| {
panic!(
"fatal: unable to extract output_index from outpoint {}",
outpoint_to_watch
)
});
(tx, output_index)
}

View File

@@ -1,5 +1,5 @@
[package]
name = "ordhook"
name = "ordinals"
version.workspace = true
edition = "2021"
@@ -13,7 +13,7 @@ rand = "0.9.0"
lru = "0.13.0"
config = { path = "../config" }
bitcoin = { workspace = true }
chainhook-sdk = { path = "../chainhook-sdk" }
bitcoind = { path = "../bitcoind" }
chainhook-types = { path = "../chainhook-types-rs" }
hiro-system-kit = { workspace = true }
reqwest = { version = "0.11", default-features = false, features = [

View File

@@ -102,9 +102,9 @@ impl Brc20MemoryCache {
match brc20_pg::get_token(tick, client).await? {
Some(db_token) => {
self.tokens.put(tick.clone(), db_token.clone());
return Ok(Some(db_token));
Ok(Some(db_token))
}
None => return Ok(None),
None => Ok(None),
}
}
@@ -122,7 +122,7 @@ impl Brc20MemoryCache {
.put(tick.to_string(), minted_supply);
return Ok(Some(minted_supply));
}
return Ok(None);
Ok(None)
}
pub async fn get_token_address_avail_balance<T: GenericClient>(
@@ -142,7 +142,7 @@ impl Brc20MemoryCache {
self.token_addr_avail_balances.put(key, balance);
return Ok(Some(balance));
}
return Ok(None);
Ok(None)
}
pub async fn get_unsent_token_transfers<T: GenericClient>(
@@ -182,7 +182,7 @@ impl Brc20MemoryCache {
self.ignore_inscription(*irrelevant_number);
}
}
return Ok(results);
Ok(results)
}
/// Marks an ordinal number as ignored so we don't bother computing its transfers for BRC20 purposes.
@@ -474,7 +474,7 @@ impl Brc20MemoryCache {
unreachable!("Invalid transfer ordinal number {}", ordinal_number)
};
self.unsent_transfers.put(ordinal_number, transfer.clone());
return Ok(transfer.clone());
Ok(transfer.clone())
}
async fn handle_cache_miss<T: GenericClient>(&mut self, client: &T) -> Result<(), String> {

View File

@@ -1,6 +1,6 @@
use std::collections::HashMap;
use chainhook_sdk::utils::Context;
use bitcoind::{try_info, utils::Context};
use chainhook_types::{
BitcoinBlockData, BlockIdentifier, Brc20BalanceData, Brc20Operation, Brc20TokenDeployData,
Brc20TransferData, OrdinalInscriptionTransferData, OrdinalOperation, TransactionIdentifier,
@@ -13,7 +13,7 @@ use super::{
parser::ParsedBrc20Operation,
verifier::{verify_brc20_operation, verify_brc20_transfers, VerifiedBrc20Operation},
};
use crate::{core::meta_protocols::brc20::u128_amount_to_decimals_str, try_info};
use crate::core::meta_protocols::brc20::u128_amount_to_decimals_str;
/// Index ordinal transfers in a single Bitcoin block looking for BRC-20 transfers.
async fn index_unverified_brc20_transfers(

View File

@@ -150,7 +150,7 @@ pub fn parse_brc20_operation(
} else {
limit = max.clone();
}
return Ok(Some(ParsedBrc20Operation::Deploy(
Ok(Some(ParsedBrc20Operation::Deploy(
ParsedBrc20TokenDeployData {
tick: json.tick.to_lowercase(),
display_tick: json.tick.clone(),
@@ -159,7 +159,7 @@ pub fn parse_brc20_operation(
dec: decimals.to_string(),
self_mint,
},
)));
)))
}
Err(_) => match serde_json::from_slice::<Brc20MintOrTransferJson>(inscription_body) {
Ok(json) => {
@@ -177,30 +177,26 @@ pub fn parse_brc20_operation(
}
match op_str {
"mint" => {
return Ok(Some(ParsedBrc20Operation::Mint(
ParsedBrc20BalanceData {
tick: json.tick.to_lowercase(),
amt: json.amt.clone(),
},
)));
Ok(Some(ParsedBrc20Operation::Mint(ParsedBrc20BalanceData {
tick: json.tick.to_lowercase(),
amt: json.amt.clone(),
})))
}
"transfer" => {
return Ok(Some(ParsedBrc20Operation::Transfer(
ParsedBrc20BalanceData {
tick: json.tick.to_lowercase(),
amt: json.amt.clone(),
},
)));
}
_ => return Ok(None),
"transfer" => Ok(Some(ParsedBrc20Operation::Transfer(
ParsedBrc20BalanceData {
tick: json.tick.to_lowercase(),
amt: json.amt.clone(),
},
))),
_ => Ok(None),
}
}
_ => return Ok(None),
_ => Ok(None),
}
}
Err(_) => return Ok(None),
Err(_) => Ok(None),
},
};
}
}
#[cfg(test)]

View File

@@ -1,4 +1,4 @@
use chainhook_sdk::utils::Context;
use bitcoind::utils::Context;
use chainhook_types::{
OrdinalInscriptionNumber, OrdinalInscriptionRevealData, OrdinalInscriptionTransferData,
OrdinalInscriptionTransferDestination,

View File

@@ -1,6 +1,6 @@
use std::collections::HashMap;
use chainhook_sdk::utils::Context;
use bitcoind::utils::Context;
use chainhook_types::{
BitcoinNetwork, BlockIdentifier, OrdinalInscriptionRevealData, OrdinalInscriptionTransferData,
OrdinalInscriptionTransferDestination, TransactionIdentifier,
@@ -86,7 +86,7 @@ pub async fn verify_brc20_operation(
return Ok(None);
}
let decimals = data.dec.parse::<u8>().unwrap();
return Ok(Some(VerifiedBrc20Operation::TokenDeploy(
Ok(Some(VerifiedBrc20Operation::TokenDeploy(
VerifiedBrc20TokenDeployData {
tick: data.tick.clone(),
display_tick: data.display_tick.clone(),
@@ -96,7 +96,7 @@ pub async fn verify_brc20_operation(
address: inscriber_address.clone(),
self_mint: data.self_mint,
},
)));
)))
}
ParsedBrc20Operation::Mint(data) => {
let Some(token) = cache.get_token(&data.tick, db_tx).await? else {
@@ -161,13 +161,13 @@ pub async fn verify_brc20_operation(
return Ok(None);
}
let real_mint_amt = amount.min(token.limit.0.min(remaining_supply));
return Ok(Some(VerifiedBrc20Operation::TokenMint(
Ok(Some(VerifiedBrc20Operation::TokenMint(
VerifiedBrc20BalanceData {
tick: token.ticker,
amt: real_mint_amt,
address: inscriber_address.clone(),
},
)));
)))
}
ParsedBrc20Operation::Transfer(data) => {
let Some(token) = cache.get_token(&data.tick, db_tx).await? else {
@@ -207,15 +207,15 @@ pub async fn verify_brc20_operation(
);
return Ok(None);
}
return Ok(Some(VerifiedBrc20Operation::TokenTransfer(
Ok(Some(VerifiedBrc20Operation::TokenTransfer(
VerifiedBrc20BalanceData {
tick: token.ticker,
amt: amount,
address: inscriber_address.clone(),
},
)));
)))
}
};
}
}
/// Given a list of ordinal transfers, verify which of them are valid `transfer_send` BRC-20 operations we haven't yet processed.

View File

@@ -7,24 +7,11 @@ pub mod test_builders;
use std::{hash::BuildHasherDefault, ops::Div};
use bitcoin::Network;
use chainhook_postgres::pg_pool_client;
use chainhook_sdk::utils::{bitcoind::bitcoind_get_block_height, Context};
use bitcoind::{indexer::bitcoin::cursor::TransactionBytesCursor, utils::Context};
use config::Config;
use dashmap::DashMap;
use fxhash::{FxBuildHasher, FxHasher};
use crate::{
db::{
blocks::{
find_last_block_inserted, find_pinned_block_bytes_at_block_height,
open_blocks_db_with_retry,
},
cursor::TransactionBytesCursor,
ordinals_pg,
},
service::PgConnectionPools,
};
pub fn first_inscription_height(config: &Config) -> u64 {
match config.bitcoind.network {
Network::Bitcoin => 767430,
@@ -118,70 +105,6 @@ pub fn compute_next_satpoint_data(
SatPosition::Output((selected_output_index, relative_offset_in_selected_output))
}
pub async fn should_sync_rocks_db(
config: &Config,
pg_pools: &PgConnectionPools,
ctx: &Context,
) -> Result<Option<(u64, u64)>, String> {
let blocks_db = open_blocks_db_with_retry(true, config, ctx);
let last_compressed_block = find_last_block_inserted(&blocks_db) as u64;
let ord_client = pg_pool_client(&pg_pools.ordinals).await?;
let last_indexed_block =
(ordinals_pg::get_chain_tip_block_height(&ord_client).await?).unwrap_or_default();
let res = if last_compressed_block < last_indexed_block {
Some((last_compressed_block, last_indexed_block))
} else {
None
};
Ok(res)
}
pub async fn should_sync_ordinals_db(
config: &Config,
pg_pools: &PgConnectionPools,
ctx: &Context,
) -> Result<Option<(u64, u64, usize)>, String> {
let blocks_db = open_blocks_db_with_retry(true, config, ctx);
let mut start_block = find_last_block_inserted(&blocks_db) as u64;
let ord_client = pg_pool_client(&pg_pools.ordinals).await?;
match ordinals_pg::get_chain_tip_block_height(&ord_client).await? {
Some(height) => {
if find_pinned_block_bytes_at_block_height(height as u32, 3, &blocks_db, ctx).is_none()
{
start_block = start_block.min(height);
} else {
start_block = height;
}
start_block += 1;
}
None => {
start_block = start_block.min(first_inscription_height(config));
}
};
// TODO: Gracefully handle Regtest, Testnet and Signet
let end_block = bitcoind_get_block_height(&config.bitcoind, ctx);
let (mut end_block, speed) = if start_block < 200_000 {
(end_block.min(200_000), 10_000)
} else if start_block < 550_000 {
(end_block.min(550_000), 1_000)
} else {
(end_block, 100)
};
if start_block < 767430 && end_block > 767430 {
end_block = 767430;
}
if start_block <= end_block {
Ok(Some((start_block, end_block, speed)))
} else {
Ok(None)
}
}
#[test]
fn test_identify_next_output_index_destination() {
assert_eq!(

View File

@@ -0,0 +1 @@
pub mod processors;

View File

@@ -0,0 +1,28 @@
use bitcoind::{try_error, try_info, utils::Context};
use rocksdb::DB;
use crate::db::blocks::insert_entry_in_blocks;
pub fn store_compacted_blocks(
mut compacted_blocks: Vec<(u64, Vec<u8>)>,
update_tip: bool,
blocks_db_rw: &DB,
ctx: &Context,
) {
compacted_blocks.sort_by(|(a, _), (b, _)| a.cmp(b));
for (block_height, compacted_block) in compacted_blocks.into_iter() {
insert_entry_in_blocks(
block_height as u32,
&compacted_block,
update_tip,
blocks_db_rw,
ctx,
);
try_info!(ctx, "Compacted block #{block_height} saved to disk");
}
if let Err(e) = blocks_db_rw.flush() {
try_error!(ctx, "{}", e.to_string());
}
}

View File

@@ -0,0 +1,214 @@
use std::{
collections::{BTreeMap, HashMap},
hash::BuildHasherDefault,
sync::Arc,
};
use bitcoind::{
indexer::bitcoin::cursor::TransactionBytesCursor, try_info, try_warn, utils::Context,
};
use chainhook_postgres::{pg_begin, pg_pool_client};
use chainhook_types::{BitcoinBlockData, TransactionIdentifier};
use config::Config;
use dashmap::DashMap;
use fxhash::FxHasher;
use crate::{
core::{
meta_protocols::brc20::{
brc20_pg, cache::Brc20MemoryCache, index::index_block_and_insert_brc20_operations,
},
protocol::{
inscription_parsing::parse_inscriptions_in_standardized_block,
inscription_sequencing::{
get_bitcoin_network, get_jubilee_block_height,
parallelize_inscription_data_computations,
update_block_inscriptions_with_consensus_sequence_data,
},
satoshi_numbering::TraversalResult,
satoshi_tracking::augment_block_with_transfers,
sequence_cursor::SequenceCursor,
},
},
db::ordinals_pg::{self, get_chain_tip_block_height},
utils::monitoring::PrometheusMonitoring,
PgConnectionPools,
};
pub async fn process_blocks(
next_blocks: &mut Vec<BitcoinBlockData>,
sequence_cursor: &mut SequenceCursor,
cache_l2: &Arc<DashMap<(u32, [u8; 8]), TransactionBytesCursor, BuildHasherDefault<FxHasher>>>,
brc20_cache: &mut Option<Brc20MemoryCache>,
prometheus: &PrometheusMonitoring,
config: &Config,
pg_pools: &PgConnectionPools,
ctx: &Context,
) -> Result<Vec<BitcoinBlockData>, String> {
let mut cache_l1 = BTreeMap::new();
let mut updated_blocks = vec![];
for _cursor in 0..next_blocks.len() {
let mut block = next_blocks.remove(0);
index_block(
&mut block,
next_blocks,
sequence_cursor,
&mut cache_l1,
cache_l2,
brc20_cache.as_mut(),
prometheus,
config,
pg_pools,
ctx,
)
.await?;
updated_blocks.push(block);
}
Ok(updated_blocks)
}
pub async fn index_block(
block: &mut BitcoinBlockData,
next_blocks: &Vec<BitcoinBlockData>,
sequence_cursor: &mut SequenceCursor,
cache_l1: &mut BTreeMap<(TransactionIdentifier, usize, u64), TraversalResult>,
cache_l2: &Arc<DashMap<(u32, [u8; 8]), TransactionBytesCursor, BuildHasherDefault<FxHasher>>>,
brc20_cache: Option<&mut Brc20MemoryCache>,
prometheus: &PrometheusMonitoring,
config: &Config,
pg_pools: &PgConnectionPools,
ctx: &Context,
) -> Result<(), String> {
let stopwatch = std::time::Instant::now();
let block_height = block.block_identifier.index;
try_info!(ctx, "Indexing block #{block_height}");
// Invalidate and recompute cursor when crossing the jubilee height
if block.block_identifier.index
== get_jubilee_block_height(&get_bitcoin_network(&block.metadata.network))
{
sequence_cursor.reset();
}
{
let mut ord_client = pg_pool_client(&pg_pools.ordinals).await?;
let ord_tx = pg_begin(&mut ord_client).await?;
if let Some(chain_tip) = get_chain_tip_block_height(&ord_tx).await? {
if block_height <= chain_tip {
try_warn!(ctx, "Block #{block_height} was already indexed, skipping");
return Ok(());
}
}
// Parsed BRC20 ops will be deposited here for this block.
let mut brc20_operation_map = HashMap::new();
parse_inscriptions_in_standardized_block(block, &mut brc20_operation_map, config, ctx);
let has_inscription_reveals = parallelize_inscription_data_computations(
block,
next_blocks,
cache_l1,
cache_l2,
config,
ctx,
)?;
if has_inscription_reveals {
update_block_inscriptions_with_consensus_sequence_data(
block,
sequence_cursor,
cache_l1,
&ord_tx,
ctx,
)
.await?;
}
augment_block_with_transfers(block, &ord_tx, ctx).await?;
// Write data
ordinals_pg::insert_block(block, &ord_tx).await?;
// BRC-20
if let (Some(brc20_cache), Some(brc20_pool)) = (brc20_cache, &pg_pools.brc20) {
let mut brc20_client = pg_pool_client(brc20_pool).await?;
let brc20_tx = pg_begin(&mut brc20_client).await?;
index_block_and_insert_brc20_operations(
block,
&mut brc20_operation_map,
brc20_cache,
&brc20_tx,
ctx,
)
.await?;
brc20_tx
.commit()
.await
.map_err(|e| format!("unable to commit brc20 pg transaction: {e}"))?;
}
prometheus.metrics_block_indexed(block_height);
prometheus.metrics_inscription_indexed(
ordinals_pg::get_highest_inscription_number(&ord_tx)
.await?
.unwrap_or(0) as u64,
);
ord_tx
.commit()
.await
.map_err(|e| format!("unable to commit ordinals pg transaction: {e}"))?;
}
try_info!(
ctx,
"Block #{block_height} indexed in {}s",
stopwatch.elapsed().as_millis() as f32 / 1000.0
);
Ok(())
}
pub async fn rollback_block(
block_height: u64,
_config: &Config,
pg_pools: &PgConnectionPools,
ctx: &Context,
) -> Result<(), String> {
try_info!(ctx, "Rolling back block #{block_height}");
{
let mut ord_client = pg_pool_client(&pg_pools.ordinals).await?;
let ord_tx = pg_begin(&mut ord_client).await?;
ordinals_pg::rollback_block(block_height, &ord_tx).await?;
// BRC-20
if let Some(brc20_pool) = &pg_pools.brc20 {
let mut brc20_client = pg_pool_client(brc20_pool).await?;
let brc20_tx = pg_begin(&mut brc20_client).await?;
brc20_pg::rollback_block_operations(block_height, &brc20_tx).await?;
brc20_tx
.commit()
.await
.map_err(|e| format!("unable to commit brc20 pg transaction: {e}"))?;
try_info!(
ctx,
"Rolled back BRC-20 operations at block #{block_height}"
);
}
ord_tx
.commit()
.await
.map_err(|e| format!("unable to commit ordinals pg transaction: {e}"))?;
try_info!(
ctx,
"Rolled back inscription activity at block #{block_height}"
);
}
Ok(())
}

View File

@@ -1,7 +1,7 @@
use std::{collections::HashMap, str, str::FromStr};
use bitcoin::{hash_types::Txid, Witness};
use chainhook_sdk::utils::Context;
use bitcoind::{try_warn, utils::Context};
use chainhook_types::{
BitcoinBlockData, BitcoinNetwork, BitcoinTransactionData, BlockIdentifier,
OrdinalInscriptionCurseType, OrdinalInscriptionNumber, OrdinalInscriptionRevealData,
@@ -15,12 +15,9 @@ use ord::{
};
use serde_json::json;
use crate::{
core::meta_protocols::brc20::{
brc20_activation_height,
parser::{parse_brc20_operation, ParsedBrc20Operation},
},
try_warn,
use crate::core::meta_protocols::brc20::{
brc20_activation_height,
parser::{parse_brc20_operation, ParsedBrc20Operation},
};
pub fn parse_inscriptions_from_witness(
@@ -172,7 +169,7 @@ pub fn parse_inscriptions_in_standardized_block(
mod test {
use std::collections::HashMap;
use chainhook_sdk::utils::Context;
use bitcoind::utils::Context;
use chainhook_types::OrdinalOperation;
use config::Config;

View File

@@ -5,7 +5,10 @@ use std::{
};
use bitcoin::Network;
use chainhook_sdk::utils::Context;
use bitcoind::{
indexer::bitcoin::cursor::TransactionBytesCursor, try_debug, try_error, try_info,
utils::Context,
};
use chainhook_types::{
BitcoinBlockData, BitcoinNetwork, BitcoinTransactionData, BlockIdentifier,
OrdinalInscriptionCurseType, OrdinalInscriptionTransferDestination, OrdinalOperation,
@@ -25,8 +28,7 @@ use super::{
};
use crate::{
core::{protocol::satoshi_tracking::UNBOUND_INSCRIPTION_SATPOINT, resolve_absolute_pointer},
db::{self, cursor::TransactionBytesCursor, ordinals_pg},
try_debug, try_error, try_info,
db::{self, ordinals_pg},
utils::format_inscription_id,
};
@@ -600,8 +602,8 @@ async fn update_tx_inscriptions_with_consensus_sequence_data(
mod test {
use std::collections::BTreeMap;
use bitcoind::utils::Context;
use chainhook_postgres::{pg_begin, pg_pool_client};
use chainhook_sdk::utils::Context;
use chainhook_types::{
bitcoin::{OutPoint, TxIn, TxOut},
OrdinalInscriptionCurseType, OrdinalInscriptionNumber, OrdinalInscriptionRevealData,

View File

@@ -1,19 +1,17 @@
use std::{hash::BuildHasherDefault, sync::Arc};
use chainhook_sdk::utils::Context;
use bitcoind::{
indexer::bitcoin::cursor::{BlockBytesCursor, TransactionBytesCursor},
try_error,
utils::Context,
};
use chainhook_types::{BlockIdentifier, OrdinalInscriptionNumber, TransactionIdentifier};
use config::Config;
use dashmap::DashMap;
use fxhash::FxHasher;
use ord::{height::Height, sat::Sat};
use crate::{
db::{
blocks::find_pinned_block_bytes_at_block_height,
cursor::{BlockBytesCursor, TransactionBytesCursor},
},
try_error,
};
use crate::db::blocks::find_pinned_block_bytes_at_block_height;
#[derive(Clone, Debug)]
pub struct TraversalResult {
@@ -305,7 +303,10 @@ pub fn compute_satoshi_number(
mod test {
use std::{hash::BuildHasherDefault, sync::Arc};
use chainhook_sdk::utils::Context;
use bitcoind::{
indexer::bitcoin::cursor::{TransactionBytesCursor, TransactionInputBytesCursor},
utils::Context,
};
use chainhook_types::{bitcoin::TxOut, BlockIdentifier, TransactionIdentifier};
use config::Config;
use dashmap::DashMap;
@@ -319,7 +320,6 @@ mod test {
},
db::{
blocks::{insert_standardized_block, open_blocks_db_with_retry},
cursor::{TransactionBytesCursor, TransactionInputBytesCursor},
drop_all_dbs,
},
};

View File

@@ -1,7 +1,7 @@
use std::collections::{HashMap, HashSet};
use bitcoin::{Address, Network, ScriptBuf};
use chainhook_sdk::utils::Context;
use bitcoind::{try_info, utils::Context};
use chainhook_types::{
BitcoinBlockData, BitcoinTransactionData, BlockIdentifier, OrdinalInscriptionTransferData,
OrdinalInscriptionTransferDestination, OrdinalOperation,
@@ -12,7 +12,6 @@ use super::inscription_sequencing::get_bitcoin_network;
use crate::{
core::{compute_next_satpoint_data, SatPosition},
db::ordinals_pg,
try_info,
utils::format_outpoint_to_watch,
};
@@ -251,8 +250,8 @@ pub async fn augment_transaction_with_ordinal_transfers(
#[cfg(test)]
mod test {
use bitcoin::Network;
use bitcoind::utils::Context;
use chainhook_postgres::{pg_begin, pg_pool_client};
use chainhook_sdk::utils::Context;
use chainhook_types::{
OrdinalInscriptionNumber, OrdinalInscriptionRevealData, OrdinalInscriptionTransferData,
OrdinalInscriptionTransferDestination, OrdinalOperation,

View File

@@ -1,12 +1,10 @@
use std::{path::PathBuf, thread::sleep, time::Duration};
use chainhook_sdk::utils::Context;
use bitcoind::{try_error, try_warn, utils::Context};
use config::Config;
use rand::{rng, Rng};
use rocksdb::{DBPinnableSlice, Options, DB};
use crate::{try_error, try_warn};
fn get_default_blocks_db_path(base_dir: &PathBuf) -> PathBuf {
let mut destination_path = base_dir.clone();
destination_path.push("hord.rocksdb");
@@ -164,56 +162,6 @@ pub fn find_pinned_block_bytes_at_block_height<'a>(
}
}
pub fn find_block_bytes_at_block_height(
block_height: u32,
retry: u8,
blocks_db: &DB,
ctx: &Context,
) -> Option<Vec<u8>> {
let mut attempt = 1;
// let mut read_options = rocksdb::ReadOptions::default();
// read_options.fill_cache(true);
// read_options.set_verify_checksums(false);
let mut backoff: f64 = 1.0;
let mut rng = rng();
loop {
match blocks_db.get(block_height.to_be_bytes()) {
Ok(Some(res)) => return Some(res),
_ => {
attempt += 1;
backoff = 2.0 * backoff + (backoff * rng.random_range(0.0..1.0));
let duration = std::time::Duration::from_millis((backoff * 1_000.0) as u64);
try_warn!(
ctx,
"Unable to find block #{}, will retry in {:?}",
block_height,
duration
);
std::thread::sleep(duration);
if attempt > retry {
return None;
}
}
}
}
}
pub fn run_compaction(blocks_db_rw: &DB, lim: u32) {
let gen = 0u32.to_be_bytes();
blocks_db_rw.compact_range(Some(&gen), Some(&lim.to_be_bytes()));
}
pub fn find_missing_blocks(blocks_db: &DB, start: u32, end: u32, ctx: &Context) -> Vec<u32> {
let mut missing_blocks = vec![];
for i in start..=end {
if find_pinned_block_bytes_at_block_height(i, 0, blocks_db, ctx).is_none() {
missing_blocks.push(i);
}
}
missing_blocks
}
pub fn remove_entry_from_blocks(block_height: u32, blocks_db_rw: &DB, ctx: &Context) {
if let Err(e) = blocks_db_rw.delete(block_height.to_be_bytes()) {
try_error!(ctx, "{}", e.to_string());
@@ -241,18 +189,20 @@ pub fn insert_standardized_block(
blocks_db_rw: &DB,
ctx: &Context,
) {
let block_bytes = match super::cursor::BlockBytesCursor::from_standardized_block(&block) {
Ok(block_bytes) => block_bytes,
Err(e) => {
try_error!(
ctx,
"Unable to compress block #{}: #{}",
block.block_identifier.index,
e.to_string()
);
return;
}
};
let block_bytes =
match bitcoind::indexer::bitcoin::cursor::BlockBytesCursor::from_standardized_block(&block)
{
Ok(block_bytes) => block_bytes,
Err(e) => {
try_error!(
ctx,
"Unable to compress block #{}: #{}",
block.block_identifier.index,
e.to_string()
);
return;
}
};
insert_entry_in_blocks(
block.block_identifier.index as u32,
&block_bytes,

View File

@@ -1,13 +1,12 @@
pub mod blocks;
pub mod cursor;
pub mod models;
pub mod ordinals_pg;
use bitcoind::{try_info, try_warn, utils::Context};
use chainhook_postgres::pg_connect_with_retry;
use chainhook_sdk::utils::Context;
use config::Config;
use crate::{core::meta_protocols::brc20::brc20_pg, try_info, try_warn};
use crate::core::meta_protocols::brc20::brc20_pg;
pub async fn migrate_dbs(config: &Config, ctx: &Context) -> Result<(), String> {
let Some(ordinals) = &config.ordinals else {

View File

@@ -5,7 +5,8 @@ use chainhook_postgres::{
utils,
};
use chainhook_types::{
BitcoinBlockData, OrdinalInscriptionNumber, OrdinalOperation, TransactionIdentifier,
BitcoinBlockData, BlockIdentifier, OrdinalInscriptionNumber, OrdinalOperation,
TransactionIdentifier,
};
use deadpool_postgres::GenericClient;
use refinery::embed_migrations;
@@ -33,6 +34,28 @@ pub async fn migrate(client: &mut Client) -> Result<(), String> {
};
}
pub async fn get_chain_tip<T: GenericClient>(
client: &T,
) -> Result<Option<BlockIdentifier>, String> {
let row = client
.query_opt("SELECT block_height, block_hash FROM chain_tip", &[])
.await
.map_err(|e| format!("get_chain_tip: {e}"))?;
let Some(row) = row else {
return Ok(None);
};
let height: Option<PgNumericU64> = row.get("block_height");
let hash: Option<String> = row.get("block_hash");
if let (Some(height), Some(hash)) = (height, hash) {
Ok(Some(BlockIdentifier {
index: height.0,
hash: format!("0x{hash}"),
}))
} else {
Ok(None)
}
}
pub async fn get_chain_tip_block_height<T: GenericClient>(
client: &T,
) -> Result<Option<u64>, String> {
@@ -710,13 +733,16 @@ async fn update_counts_by_block<T: GenericClient>(
}
pub async fn update_chain_tip<T: GenericClient>(
block_height: u64,
chain_tip: &BlockIdentifier,
client: &T,
) -> Result<(), String> {
client
.query(
"UPDATE chain_tip SET block_height = $1",
&[&PgNumericU64(block_height)],
"UPDATE chain_tip SET block_height = $1, block_hash = $2",
&[
&PgNumericU64(chain_tip.index),
&chain_tip.hash[2..].to_string(),
],
)
.await
.map_err(|e| format!("update_chain_tip: {e}"))?;
@@ -865,11 +891,13 @@ pub async fn insert_block<T: GenericClient>(
client,
)
.await?;
update_chain_tip(block.block_identifier.index, client).await?;
update_chain_tip(&block.block_identifier, client).await?;
Ok(())
}
/// Rolls back a previously-indexed block. It is the responsibility of the caller to make sure `block_height` is the last block
/// that was indexed.
pub async fn rollback_block<T: GenericClient>(block_height: u64, client: &T) -> Result<(), String> {
// Delete previous current locations, deduct owner counts, remove orphaned sats
let moved_sat_rows = client
@@ -1005,7 +1033,21 @@ pub async fn rollback_block<T: GenericClient>(block_height: u64, client: &T) ->
)
.await
.map_err(|e| format!("rollback_block (4): {e}"))?;
update_chain_tip(block_height - 1, client).await?;
client
.execute(
"WITH last_block AS (
SELECT block_height, block_hash
FROM locations
ORDER BY block_height DESC
LIMIT 1
)
UPDATE chain_tip SET
block_height = (SELECT block_height FROM last_block),
block_hash = (SELECT block_hash FROM last_block)",
&[],
)
.await
.map_err(|e| format!("rollback_block (5): {e}"))?;
Ok(())
}
@@ -1440,7 +1482,8 @@ mod test {
assert_eq!(0, get_type_count("blessed", &client).await);
assert_eq!(0, get_block_reveal_count(800000, &client).await);
assert_eq!(0, get_sat_rarity_count("common", &client).await);
assert_eq!(Some(799999), get_chain_tip_block_height(&client).await?);
// We don't have a previous block so it goes to none.
assert_eq!(None, get_chain_tip_block_height(&client).await?);
}
}
pg_reset_db(&mut pg_client).await?;

View File

@@ -0,0 +1,229 @@
use core::{
first_inscription_height,
meta_protocols::brc20::cache::brc20_new_cache,
new_traversals_lazy_cache,
pipeline::processors::{
block_archiving::store_compacted_blocks,
inscription_indexing::{process_blocks, rollback_block},
},
protocol::sequence_cursor::SequenceCursor,
};
use std::{sync::Arc, thread::JoinHandle};
use bitcoind::{
indexer::{start_bitcoin_indexer, Indexer, IndexerCommand},
try_debug,
utils::{future_block_on, Context},
};
use chainhook_postgres::{pg_pool, pg_pool_client};
use chainhook_types::BlockIdentifier;
use config::Config;
use db::{
blocks::{self, find_last_block_inserted, open_blocks_db_with_retry},
migrate_dbs,
};
use deadpool_postgres::Pool;
use utils::monitoring::PrometheusMonitoring;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate lazy_static;
extern crate serde;
pub mod core;
pub mod db;
pub mod utils;
#[derive(Debug, Clone)]
pub struct PgConnectionPools {
pub ordinals: Pool,
pub brc20: Option<Pool>,
}
fn pg_pools(config: &Config) -> PgConnectionPools {
PgConnectionPools {
ordinals: pg_pool(&config.ordinals.as_ref().unwrap().db).unwrap(),
brc20: config
.ordinals_brc20_config()
.map(|brc20| pg_pool(&brc20.db).unwrap()),
}
}
async fn new_ordinals_indexer_runloop(
prometheus: &PrometheusMonitoring,
config: &Config,
ctx: &Context,
) -> Result<Indexer, String> {
let (commands_tx, commands_rx) = crossbeam_channel::unbounded::<IndexerCommand>();
let pg_pools = pg_pools(config);
let config_moved = config.clone();
let ctx_moved = ctx.clone();
let pg_pools_moved = pg_pools.clone();
let prometheus_moved = prometheus.clone();
let handle: JoinHandle<()> = hiro_system_kit::thread_named("ordinals_indexer")
.spawn(move || {
future_block_on(&ctx_moved.clone(), async move {
let cache_l2 = Arc::new(new_traversals_lazy_cache(2048));
let garbage_collect_every_n_blocks = 100;
let mut garbage_collect_nth_block = 0;
let mut sequence_cursor = SequenceCursor::new();
let mut brc20_cache: Option<core::meta_protocols::brc20::cache::Brc20MemoryCache> =
brc20_new_cache(&config_moved);
loop {
match commands_rx.recv() {
Ok(command) => match command {
IndexerCommand::StoreCompactedBlocks(blocks) => {
let blocks_db_rw =
open_blocks_db_with_retry(true, &config_moved, &ctx_moved);
store_compacted_blocks(blocks, true, &blocks_db_rw, &ctx_moved);
}
IndexerCommand::IndexBlocks {
mut apply_blocks,
rollback_block_ids,
} => {
if !rollback_block_ids.is_empty() {
let blocks_db_rw =
open_blocks_db_with_retry(true, &config_moved, &ctx_moved);
for block_id in rollback_block_ids.iter() {
blocks::delete_blocks_in_block_range(
block_id.index as u32,
block_id.index as u32,
&blocks_db_rw,
&ctx_moved,
);
rollback_block(
block_id.index,
&config_moved,
&pg_pools_moved,
&ctx_moved,
)
.await?;
}
blocks_db_rw.flush().map_err(|e| {
format!("error dropping rollback blocks from rocksdb: {e}")
})?;
}
let blocks = match process_blocks(
&mut apply_blocks,
&mut sequence_cursor,
&cache_l2,
&mut brc20_cache,
&prometheus_moved,
&config_moved,
&pg_pools_moved,
&ctx_moved,
)
.await
{
Ok(blocks) => blocks,
Err(e) => return Err(format!("error indexing blocks: {e}")),
};
garbage_collect_nth_block += blocks.len();
if garbage_collect_nth_block > garbage_collect_every_n_blocks {
try_debug!(
ctx_moved,
"Clearing cache L2 ({} entries)",
cache_l2.len()
);
cache_l2.clear();
garbage_collect_nth_block = 0;
}
}
},
Err(_) => todo!(),
}
}
});
})
.expect("unable to spawn thread");
let pg_chain_tip = {
let ord_client = pg_pool_client(&pg_pools.ordinals).await?;
db::ordinals_pg::get_chain_tip(&ord_client).await?
};
let blocks_chain_tip = {
let blocks_db = open_blocks_db_with_retry(false, config, ctx);
let height = find_last_block_inserted(&blocks_db);
// Blocks DB does not have the hash available.
if height > 0 {
Some(BlockIdentifier {
index: height as u64,
hash: "0x0000000000000000000000000000000000000000000000000000000000000000".into(),
})
} else {
None
}
};
let chain_tip = match (pg_chain_tip, blocks_chain_tip) {
// Index chain tip is the minimum of postgres DB tip vs blocks DB tip.
(Some(x), Some(y)) => Some(if x.index <= y.index { x } else { y }),
// No blocks DB means start from zero so we can pull them.
(Some(_), None) => None,
// No postgres DB means we might be using an archived blocks DB, make sure we index from the first inscription chain tip.
(None, Some(y)) => {
let x = BlockIdentifier {
index: first_inscription_height(config) - 1,
hash: "0x0000000000000000000000000000000000000000000000000000000000000000".into(),
};
Some(if x.index <= y.index { x } else { y })
}
// Start from zero.
(None, None) => None,
};
Ok(Indexer {
commands_tx,
chain_tip,
thread_handle: handle,
})
}
pub async fn get_chain_tip(config: &Config) -> Result<BlockIdentifier, String> {
let pool = pg_pool(&config.ordinals.as_ref().unwrap().db).unwrap();
let ord_client = pg_pool_client(&pool).await?;
Ok(db::ordinals_pg::get_chain_tip(&ord_client).await?.unwrap())
}
pub async fn rollback_block_range(
start_block: u64,
end_block: u64,
config: &Config,
ctx: &Context,
) -> Result<(), String> {
let blocks_db_rw = open_blocks_db_with_retry(true, config, ctx);
let pg_pools = pg_pools(config);
blocks::delete_blocks_in_block_range(start_block as u32, end_block as u32, &blocks_db_rw, ctx);
for block in start_block..=end_block {
rollback_block(block, config, &pg_pools, ctx).await?;
}
blocks_db_rw
.flush()
.map_err(|e| format!("error dropping rollback blocks from rocksdb: {e}"))
}
/// Starts the ordinals indexing process. Will block the main thread indefinitely until explicitly stopped or it reaches chain tip
/// and `stream_blocks_at_chain_tip` is set to false.
pub async fn start_ordinals_indexer(
stream_blocks_at_chain_tip: bool,
config: &Config,
ctx: &Context,
) -> Result<(), String> {
migrate_dbs(config, ctx).await?;
let indexer = new_ordinals_indexer_runloop(&PrometheusMonitoring::new(), config, ctx).await?;
start_bitcoin_indexer(
&indexer,
first_inscription_height(config),
stream_blocks_at_chain_tip,
true,
config,
ctx,
)
.await
}

View File

@@ -0,0 +1,25 @@
pub mod monitoring;
use chainhook_types::TransactionIdentifier;
pub fn format_inscription_id(
transaction_identifier: &TransactionIdentifier,
inscription_subindex: usize,
) -> String {
format!(
"{}i{}",
transaction_identifier.get_hash_bytes_str(),
inscription_subindex,
)
}
pub fn format_outpoint_to_watch(
transaction_identifier: &TransactionIdentifier,
output_index: usize,
) -> String {
format!(
"{}:{}",
transaction_identifier.get_hash_bytes_str(),
output_index
)
}

View File

@@ -1,4 +1,4 @@
use chainhook_sdk::utils::Context;
use bitcoind::{try_debug, try_info, try_warn, utils::Context};
use hyper::{
header::CONTENT_TYPE,
service::{make_service_fn, service_fn},
@@ -9,8 +9,6 @@ use prometheus::{
Encoder, Registry, TextEncoder,
};
use crate::{try_debug, try_info, try_warn};
type UInt64Gauge = GenericGauge<AtomicU64>;
#[derive(Debug, Clone)]

View File

@@ -4,7 +4,7 @@ version.workspace = true
edition = "2021"
[dependencies]
chainhook-sdk = { path = "../chainhook-sdk" }
bitcoind = { path = "../bitcoind" }
chainhook-types = { path = "../chainhook-types-rs" }
bitcoin = { workspace = true }
lru = "0.12.3"

View File

@@ -1,18 +1,14 @@
use std::collections::HashMap;
use chainhook_sdk::utils::Context;
use bitcoind::{try_debug, try_info, utils::Context};
use tokio_postgres::Transaction;
use crate::{
db::{
models::{
db_balance_change::DbBalanceChange, db_ledger_entry::DbLedgerEntry, db_rune::DbRune,
db_supply_change::DbSupplyChange,
},
pg_insert_balance_changes, pg_insert_ledger_entries, pg_insert_runes,
pg_insert_supply_changes,
use crate::db::{
models::{
db_balance_change::DbBalanceChange, db_ledger_entry::DbLedgerEntry, db_rune::DbRune,
db_supply_change::DbSupplyChange,
},
try_debug, try_info,
pg_insert_balance_changes, pg_insert_ledger_entries, pg_insert_runes, pg_insert_supply_changes,
};
/// Holds rows that have yet to be inserted into the database.

View File

@@ -1,7 +1,7 @@
use std::{collections::HashMap, num::NonZeroUsize, str::FromStr};
use bitcoin::{Network, ScriptBuf};
use chainhook_sdk::utils::Context;
use bitcoind::{try_debug, try_info, try_warn, utils::Context};
use chainhook_types::bitcoin::TxIn;
use config::Config;
use lru::LruCache;
@@ -12,17 +12,13 @@ use super::{
db_cache::DbCache, input_rune_balance::InputRuneBalance, transaction_cache::TransactionCache,
transaction_location::TransactionLocation, utils::move_block_output_cache_to_output_cache,
};
use crate::{
db::{
cache::utils::input_rune_balances_from_tx_inputs,
models::{
db_balance_change::DbBalanceChange, db_ledger_entry::DbLedgerEntry,
db_ledger_operation::DbLedgerOperation, db_rune::DbRune,
db_supply_change::DbSupplyChange,
},
pg_get_max_rune_number, pg_get_rune_by_id, pg_get_rune_total_mints,
use crate::db::{
cache::utils::input_rune_balances_from_tx_inputs,
models::{
db_balance_change::DbBalanceChange, db_ledger_entry::DbLedgerEntry,
db_ledger_operation::DbLedgerOperation, db_rune::DbRune, db_supply_change::DbSupplyChange,
},
try_debug, try_info, try_warn,
pg_get_max_rune_number, pg_get_rune_by_id, pg_get_rune_total_mints,
};
/// Holds rune data across multiple blocks for faster computations. Processes rune events as they happen during transactions and
@@ -301,7 +297,7 @@ impl IndexCache {
self.db_cache.flush(db_tx, ctx).await;
let db_rune = pg_get_rune_by_id(rune_id, db_tx, ctx).await?;
self.rune_cache.put(*rune_id, db_rune.clone());
return Some(db_rune);
Some(db_rune)
}
async fn get_cached_rune_total_mints(
@@ -323,7 +319,7 @@ impl IndexCache {
self.db_cache.flush(db_tx, ctx).await;
let total = pg_get_rune_total_mints(rune_id, db_tx, ctx).await?;
self.rune_total_mints_cache.put(*rune_id, total);
return Some(total);
Some(total)
}
/// Take ledger entries returned by the `TransactionCache` and add them to the `DbCache`. Update global balances and counters

View File

@@ -4,21 +4,18 @@ use std::{
};
use bitcoin::ScriptBuf;
use chainhook_sdk::utils::Context;
use bitcoind::{try_debug, try_info, try_warn, utils::Context};
use ordinals::{Cenotaph, Edict, Etching, Rune, RuneId};
use super::{
input_rune_balance::InputRuneBalance, transaction_location::TransactionLocation,
utils::move_rune_balance_to_output,
};
use crate::{
db::{
cache::utils::{is_rune_mintable, new_sequential_ledger_entry},
models::{
db_ledger_entry::DbLedgerEntry, db_ledger_operation::DbLedgerOperation, db_rune::DbRune,
},
use crate::db::{
cache::utils::{is_rune_mintable, new_sequential_ledger_entry},
models::{
db_ledger_entry::DbLedgerEntry, db_ledger_operation::DbLedgerOperation, db_rune::DbRune,
},
try_debug, try_info, try_warn,
};
/// Holds cached data relevant to a single transaction during indexing.
@@ -406,7 +403,7 @@ mod test {
use std::collections::VecDeque;
use bitcoin::ScriptBuf;
use chainhook_sdk::utils::Context;
use bitcoind::utils::Context;
use maplit::hashmap;
use ordinals::{Edict, Etching, Rune, Terms};

View File

@@ -1,21 +1,18 @@
use std::collections::{HashMap, VecDeque};
use bitcoin::{Address, ScriptBuf};
use chainhook_sdk::utils::Context;
use bitcoind::{try_info, try_warn, utils::Context};
use chainhook_types::bitcoin::TxIn;
use lru::LruCache;
use ordinals::RuneId;
use tokio_postgres::Transaction;
use super::{input_rune_balance::InputRuneBalance, transaction_location::TransactionLocation};
use crate::{
db::{
models::{
db_ledger_entry::DbLedgerEntry, db_ledger_operation::DbLedgerOperation, db_rune::DbRune,
},
pg_get_input_rune_balances,
use crate::db::{
models::{
db_ledger_entry::DbLedgerEntry, db_ledger_operation::DbLedgerOperation, db_rune::DbRune,
},
try_info, try_warn,
pg_get_input_rune_balances,
};
/// Takes all transaction inputs and transforms them into rune balances to be allocated for operations. Looks inside an output LRU
@@ -316,7 +313,7 @@ mod test {
use std::collections::{HashMap, VecDeque};
use bitcoin::ScriptBuf;
use chainhook_sdk::utils::Context;
use bitcoind::utils::Context;
use maplit::hashmap;
use ordinals::RuneId;
@@ -686,7 +683,7 @@ mod test {
mod input_balances {
use std::num::NonZeroUsize;
use chainhook_sdk::utils::Context;
use bitcoind::utils::Context;
use chainhook_types::{
bitcoin::{OutPoint, TxIn},
TransactionIdentifier,

View File

@@ -5,16 +5,13 @@ use bitcoin::{
transaction::{TxOut, Version},
Amount, Network, ScriptBuf, Transaction,
};
use chainhook_sdk::utils::Context;
use bitcoind::{try_info, utils::Context};
use chainhook_types::{BitcoinBlockData, BitcoinTransactionData};
use ordinals::{Artifact, Runestone};
use tokio_postgres::Client;
use super::cache::index_cache::IndexCache;
use crate::{
db::{cache::transaction_location::TransactionLocation, pg_roll_back_block},
try_info,
};
use crate::db::{cache::transaction_location::TransactionLocation, pg_roll_back_block};
pub fn get_rune_genesis_block_height(network: Network) -> u64 {
match network {

View File

@@ -1,8 +1,9 @@
use std::{collections::HashMap, process, str::FromStr};
use bitcoind::{try_error, try_info, utils::Context};
use cache::input_rune_balance::InputRuneBalance;
use chainhook_postgres::types::{PgBigIntU32, PgNumericU128, PgNumericU64};
use chainhook_sdk::utils::Context;
use chainhook_types::BlockIdentifier;
use config::Config;
use models::{
db_balance_change::DbBalanceChange, db_ledger_entry::DbLedgerEntry, db_rune::DbRune,
@@ -12,8 +13,6 @@ use ordinals::RuneId;
use refinery::embed_migrations;
use tokio_postgres::{types::ToSql, Client, Error, GenericClient, NoTls, Transaction};
use crate::{try_error, try_info};
pub mod cache;
pub mod index;
pub mod models;
@@ -383,6 +382,26 @@ pub async fn pg_get_block_height(client: &mut Client, _ctx: &Context) -> Option<
max.map(|max| max.0)
}
pub async fn get_chain_tip(client: &mut Client, _ctx: &Context) -> Option<BlockIdentifier> {
let row = client
.query_opt(
"SELECT block_height, block_hash
FROM ledger
ORDER BY block_height DESC
LIMIT 1",
&[],
)
.await
.expect("get_chain_tip");
let row = row?;
let block_height: PgNumericU64 = row.get("block_height");
let block_hash: String = row.get("block_hash");
Some(BlockIdentifier {
index: block_height.0,
hash: format!("0x{block_hash}"),
})
}
pub async fn pg_get_rune_by_id(
id: &RuneId,
db_tx: &mut Transaction<'_>,

View File

@@ -1,48 +1,118 @@
#[macro_use]
extern crate hiro_system_kit;
use std::thread::JoinHandle;
use bitcoind::{
indexer::{start_bitcoin_indexer, Indexer, IndexerCommand},
utils::{future_block_on, Context},
};
use chainhook_types::BlockIdentifier;
use config::Config;
use db::{
cache::index_cache::IndexCache,
index::{get_rune_genesis_block_height, index_block, roll_back_block},
pg_connect,
};
extern crate serde;
pub mod db;
pub mod scan;
pub mod service;
#[macro_export]
macro_rules! try_info {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| info!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| info!(l, $tag));
};
async fn new_runes_indexer_runloop(config: &Config, ctx: &Context) -> Result<Indexer, String> {
let (commands_tx, commands_rx) = crossbeam_channel::unbounded::<IndexerCommand>();
let config_moved = config.clone();
let ctx_moved = ctx.clone();
let handle: JoinHandle<()> = hiro_system_kit::thread_named("runes_indexer")
.spawn(move || {
future_block_on(&ctx_moved.clone(), async move {
let mut index_cache = IndexCache::new(
&config_moved,
&mut pg_connect(&config_moved, false, &ctx_moved).await,
&ctx_moved,
)
.await;
loop {
match commands_rx.recv() {
Ok(command) => match command {
IndexerCommand::StoreCompactedBlocks(_) => {
// No-op
}
IndexerCommand::IndexBlocks {
mut apply_blocks,
rollback_block_ids,
} => {
let mut pg_client =
pg_connect(&config_moved, false, &ctx_moved).await;
for block_id in rollback_block_ids.iter() {
roll_back_block(&mut pg_client, block_id.index, &ctx_moved)
.await;
}
for block in apply_blocks.iter_mut() {
index_block(
&mut pg_client,
&mut index_cache,
block,
&ctx_moved,
)
.await;
}
}
},
Err(_) => todo!(),
}
}
});
})
.expect("unable to spawn thread");
let mut pg_client = pg_connect(config, false, ctx).await;
let chain_tip = db::get_chain_tip(&mut pg_client, ctx)
.await
.unwrap_or(BlockIdentifier {
index: get_rune_genesis_block_height(config.bitcoind.network) - 1,
hash: "0x0000000000000000000000000000000000000000000000000000000000000000".into(),
});
Ok(Indexer {
commands_tx,
chain_tip: Some(chain_tip),
thread_handle: handle,
})
}
#[macro_export]
macro_rules! try_debug {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| debug!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| debug!(l, $tag));
};
pub async fn get_chain_tip(config: &Config, ctx: &Context) -> Result<BlockIdentifier, String> {
let mut pg_client = pg_connect(config, false, ctx).await;
Ok(db::get_chain_tip(&mut pg_client, ctx).await.unwrap())
}
#[macro_export]
macro_rules! try_warn {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| warn!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| warn!(l, $tag));
};
pub async fn rollback_block_range(
start_block: u64,
end_block: u64,
config: &Config,
ctx: &Context,
) -> Result<(), String> {
let mut pg_client = pg_connect(config, false, ctx).await;
for block_id in start_block..=end_block {
roll_back_block(&mut pg_client, block_id, ctx).await;
}
Ok(())
}
#[macro_export]
macro_rules! try_error {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| error!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| error!(l, $tag));
};
/// Starts the runes indexing process. Will block the main thread indefinitely until explicitly stopped or it reaches chain tip
/// and `stream_blocks_at_chain_tip` is set to false.
pub async fn start_runes_indexer(
stream_blocks_at_chain_tip: bool,
config: &Config,
ctx: &Context,
) -> Result<(), String> {
pg_connect(config, true, ctx).await;
let indexer = new_runes_indexer_runloop(config, ctx).await?;
start_bitcoin_indexer(
&indexer,
get_rune_genesis_block_height(config.bitcoind.network),
stream_blocks_at_chain_tip,
false,
config,
ctx,
)
.await
}

View File

@@ -1,88 +0,0 @@
use chainhook_sdk::{
indexer::bitcoin::{
build_http_client, download_and_parse_block_with_retry, retrieve_block_hash_with_retry,
standardize_bitcoin_block,
},
utils::{bitcoind::bitcoind_get_block_height, BlockHeights, Context},
};
use chainhook_types::BitcoinNetwork;
use config::Config;
use tokio_postgres::Client;
use crate::{
db::{
cache::index_cache::IndexCache,
index::{index_block, roll_back_block},
},
try_info,
};
pub async fn drop_blocks(start_block: u64, end_block: u64, pg_client: &mut Client, ctx: &Context) {
for block in start_block..=end_block {
roll_back_block(pg_client, block, ctx).await;
}
}
pub async fn scan_blocks(
blocks: Vec<u64>,
config: &Config,
pg_client: &mut Client,
index_cache: &mut IndexCache,
ctx: &Context,
) -> Result<(), String> {
let block_heights_to_scan_res = BlockHeights::Blocks(blocks).get_sorted_entries();
let mut block_heights_to_scan = block_heights_to_scan_res
.map_err(|_e| "Block start / end block spec invalid".to_string())?;
try_info!(
ctx,
"Scanning {} Bitcoin blocks",
block_heights_to_scan.len()
);
let bitcoin_config = config.bitcoind.clone();
let mut number_of_blocks_scanned = 0;
let http_client = build_http_client();
while let Some(current_block_height) = block_heights_to_scan.pop_front() {
number_of_blocks_scanned += 1;
let block_hash = retrieve_block_hash_with_retry(
&http_client,
&current_block_height,
&bitcoin_config,
ctx,
)
.await?;
let raw_block =
download_and_parse_block_with_retry(&http_client, &block_hash, &bitcoin_config, ctx)
.await?;
let mut block = standardize_bitcoin_block(
raw_block,
&BitcoinNetwork::from_network(bitcoin_config.network),
ctx,
)
.unwrap();
index_block(pg_client, index_cache, &mut block, ctx).await;
if block_heights_to_scan.is_empty() {
let bitcoind_tip = bitcoind_get_block_height(&config.bitcoind, ctx);
let new_tip = match block_heights_to_scan.back() {
Some(end_block) => {
if *end_block > bitcoind_tip {
bitcoind_tip
} else {
*end_block
}
}
None => bitcoind_tip,
};
for entry in (current_block_height + 1)..new_tip {
block_heights_to_scan.push_back(entry);
}
}
}
try_info!(ctx, "{number_of_blocks_scanned} blocks scanned");
Ok(())
}

View File

@@ -1 +0,0 @@
pub mod bitcoin;

View File

@@ -1,179 +0,0 @@
use std::{cmp::Ordering, sync::mpsc::channel};
use chainhook_sdk::{
observer::{start_event_observer, BitcoinBlockDataCached, ObserverEvent, ObserverSidecar},
utils::{bitcoind::bitcoind_get_block_height, Context},
};
use chainhook_types::BlockIdentifier;
use config::Config;
use crossbeam_channel::select;
use crate::{
db::{
cache::index_cache::IndexCache,
index::{get_rune_genesis_block_height, index_block, roll_back_block},
pg_connect, pg_get_block_height,
},
scan::bitcoin::scan_blocks,
try_error, try_info,
};
pub async fn get_index_chain_tip(config: &Config, ctx: &Context) -> u64 {
let mut pg_client = pg_connect(config, true, ctx).await;
pg_get_block_height(&mut pg_client, ctx)
.await
.unwrap_or(get_rune_genesis_block_height(config.bitcoind.network) - 1)
}
pub async fn catch_up_to_bitcoin_chain_tip(config: &Config, ctx: &Context) -> Result<(), String> {
let mut pg_client = pg_connect(config, true, ctx).await;
let mut index_cache = IndexCache::new(config, &mut pg_client, ctx).await;
loop {
let chain_tip = pg_get_block_height(&mut pg_client, ctx)
.await
.unwrap_or(get_rune_genesis_block_height(config.bitcoind.network) - 1);
let bitcoind_chain_tip = bitcoind_get_block_height(&config.bitcoind, ctx);
match bitcoind_chain_tip.cmp(&chain_tip) {
Ordering::Less => {
try_info!(
ctx,
"Waiting for bitcoind to reach height {}, currently at {}",
chain_tip,
bitcoind_chain_tip
);
std::thread::sleep(std::time::Duration::from_secs(10));
}
Ordering::Greater => {
try_info!(
ctx,
"Block height is behind bitcoind, scanning block range {} to {}",
chain_tip + 1,
bitcoind_chain_tip
);
scan_blocks(
((chain_tip + 1)..=bitcoind_chain_tip).collect(),
config,
&mut pg_client,
&mut index_cache,
ctx,
)
.await?;
}
Ordering::Equal => {
try_info!(ctx, "Caught up to bitcoind chain tip at {}", chain_tip);
break;
}
}
}
Ok(())
}
pub async fn start_service(config: &Config, ctx: &Context) -> Result<(), String> {
catch_up_to_bitcoin_chain_tip(config, ctx).await?;
// Start chainhook event observer, we're at chain tip.
let (observer_cmd_tx, observer_cmd_rx) = channel();
let (observer_event_tx, observer_event_rx) = crossbeam_channel::unbounded();
let observer_sidecar = set_up_observer_sidecar_runloop(config, ctx)
.await
.expect("unable to set up observer sidecar");
let event_observer_config = config.bitcoind.clone();
let context = ctx.clone();
let observer_cmd_tx_moved = observer_cmd_tx.clone();
let _ = std::thread::spawn(move || {
start_event_observer(
event_observer_config,
observer_cmd_tx_moved,
observer_cmd_rx,
Some(observer_event_tx),
Some(observer_sidecar),
context,
)
.expect("unable to start Stacks chain observer");
});
try_info!(ctx, "Listening for new blocks via Chainhook SDK");
loop {
let event = match observer_event_rx.recv() {
Ok(cmd) => cmd,
Err(e) => {
try_error!(ctx, "Error: broken channel {}", e.to_string());
break;
}
};
if let ObserverEvent::Terminate = event {
try_info!(ctx, "Received termination event from Chainhook SDK");
break;
}
}
Ok(())
}
pub async fn set_up_observer_sidecar_runloop(
config: &Config,
ctx: &Context,
) -> Result<ObserverSidecar, String> {
// Sidecar will be receiving blocks to mutate
let (block_mutator_in_tx, block_mutator_in_rx) = crossbeam_channel::unbounded();
// Sidecar will be sending mutated blocks back to chainhook-sdk
let (block_mutator_out_tx, block_mutator_out_rx) = crossbeam_channel::unbounded();
// HandleBlock
let (chain_event_notifier_tx, chain_event_notifier_rx) = crossbeam_channel::unbounded();
let observer_sidecar = ObserverSidecar {
bitcoin_blocks_mutator: Some((block_mutator_in_tx, block_mutator_out_rx)),
bitcoin_chain_event_notifier: Some(chain_event_notifier_tx),
};
let ctx = ctx.clone();
let config = config.clone();
let _ = hiro_system_kit::thread_named("Observer Sidecar Runloop").spawn(move || {
hiro_system_kit::nestable_block_on(async {
let mut index_cache =
IndexCache::new(&config, &mut pg_connect(&config, false, &ctx).await, &ctx).await;
loop {
select! {
recv(block_mutator_in_rx) -> msg => {
if let Ok((mut blocks_to_mutate, blocks_ids_to_rollback)) = msg {
chainhook_sidecar_mutate_blocks(
&mut index_cache,
&mut blocks_to_mutate,
&blocks_ids_to_rollback,
&config,
&ctx,
).await;
let _ = block_mutator_out_tx.send(blocks_to_mutate);
}
}
recv(chain_event_notifier_rx) -> msg => {
if let Ok(_command) = msg {
// We don't need to do anything here because we already indexed the block during the mutation above.
}
}
}
}
});
});
Ok(observer_sidecar)
}
pub async fn chainhook_sidecar_mutate_blocks(
index_cache: &mut IndexCache,
blocks_to_mutate: &mut [BitcoinBlockDataCached],
block_ids_to_rollback: &[BlockIdentifier],
config: &Config,
ctx: &Context,
) {
try_info!(ctx, "Received mutate blocks message from Chainhook SDK");
let mut pg_client = pg_connect(config, false, ctx).await;
for block_id in block_ids_to_rollback.iter() {
roll_back_block(&mut pg_client, block_id.index, ctx).await;
}
for cache in blocks_to_mutate.iter_mut() {
if !cache.processed_by_sidecar {
index_block(&mut pg_client, index_cache, &mut cache.block, ctx).await;
cache.processed_by_sidecar = true;
}
}
}

View File

@@ -0,0 +1,12 @@
ALTER TABLE chain_tip ADD COLUMN block_hash TEXT;
ALTER TABLE chain_tip ALTER COLUMN block_height DROP NOT NULL;
WITH last_block AS (
SELECT block_height, block_hash
FROM locations
ORDER BY block_height DESC
LIMIT 1
)
UPDATE chain_tip SET
block_height = (SELECT block_height FROM last_block),
block_hash = (SELECT block_hash FROM last_block);