feat(runes): add runes indexer (#453)

* rename ordhook-cli to cli

* rename ordhook-cli

* update configs

* update config to future support runehook integration

* new code

* add ci

* files

* standard

* add files

* rename binary to bitcoin-indexer and commands to ordinals from ordhook

* config component

* integration

* runes build

* fix runes

* indexer toml

* add runes tests to ci

* rename dockerfile

* fix: doctest

---------

Co-authored-by: ASuciuX <asuciu@hiro.so>
This commit is contained in:
Rafael Cárdenas
2025-02-28 13:22:27 -06:00
committed by GitHub
parent da5596afec
commit fd2a8496e3
68 changed files with 5385 additions and 1802 deletions

View File

@@ -1,5 +1,5 @@
[alias]
ordhook-install = "install --path components/ordhook-cli --locked --force"
ordhook-install = "install --path components/cli --locked --force"
[env]
RUST_TEST_THREADS = "1"

View File

@@ -124,10 +124,11 @@ jobs:
fail-fast: false
matrix:
suite:
- ordhook-cli
- ordhook-core
- cli
- chainhook-sdk
- chainhook-postgres
- ordhook-core
- runes
runs-on: ubuntu-latest
defaults:
run:
@@ -229,7 +230,7 @@ jobs:
uses: docker/metadata-action@v5
with:
images: |
hirosystems/ordhook
hirosystems/bitcoin-indexer
tags: |
type=ref,event=branch
type=ref,event=pr
@@ -250,7 +251,7 @@ jobs:
context: .
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
file: ./dockerfiles/components/ordhook.dockerfile
file: ./dockerfiles/components/bitcoin-indexer.dockerfile
build-args: |
GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }}
cache-from: type=gha

2
.gitignore vendored
View File

@@ -20,7 +20,7 @@ components/chainhook-types-js/dist
*.rdb
/Ordhook.toml
cache/
/cache/
./tests
tmp/
data

48
.vscode/Indexer.toml vendored Normal file
View File

@@ -0,0 +1,48 @@
[storage]
working_dir = "tmp"
[metrics]
enabled = true
prometheus_port = 9153
[ordinals.db]
database = "ordinals"
host = "localhost"
port = 5432
username = "postgres"
password = "postgres"
[ordinals.meta_protocols.brc20]
enabled = true
lru_cache_size = 10000
[ordinals.meta_protocols.brc20.db]
database = "brc20"
host = "localhost"
port = 5432
username = "postgres"
password = "postgres"
[runes]
lru_cache_size = 10000
[runes.db]
database = "runes"
host = "localhost"
port = 5432
username = "postgres"
password = "postgres"
[bitcoind]
network = "mainnet"
rpc_url = "http://localhost:8332"
rpc_username = "rafaelcr"
rpc_password = "developer"
zmq_url = "tcp://0.0.0.0:18543"
[resources]
ulimit = 2048
cpu_core_available = 6
memory_available = 16
bitcoind_rpc_threads = 2
bitcoind_rpc_timeout = 15

6
.vscode/launch.json vendored
View File

@@ -20,7 +20,7 @@
"request": "launch",
"name": "run: ordhook service",
"cargo": {
"args": ["build", "--bin=ordhook", "--package=ordhook-cli"],
"args": ["build", "--bin=ordhook", "--package=cli"],
"filter": {
"name": "ordhook",
"kind": "bin"
@@ -29,7 +29,7 @@
"args": [
"service",
"start",
"--config-path=${workspaceFolder}/.vscode/ordhook.toml",
"--config-path=${workspaceFolder}/.vscode/Indexer.toml",
],
"cwd": "${workspaceFolder}"
},
@@ -38,7 +38,7 @@
"request": "launch",
"name": "Debug unit tests in executable 'ordhook'",
"cargo": {
"args": ["test", "--no-run", "--bin=ordhook", "--package=ordhook-cli"],
"args": ["test", "--no-run", "--bin=ordhook", "--package=cli"],
"filter": {
"name": "ordhook",
"kind": "bin"

46
.vscode/ordhook.toml vendored
View File

@@ -1,46 +0,0 @@
[storage]
working_dir = "tmp"
observers_working_dir = "tmp"
[ordinals_db]
database = "ordinals"
host = "localhost"
port = 5432
username = "postgres"
password = "postgres"
[brc20_db]
database = "brc20"
host = "localhost"
port = 5432
username = "postgres"
password = "postgres"
# The Http Api allows you to register / deregister
# dynamically predicates.
# Disable by default.
#
# [http_api]
# http_port = 20456
[network]
mode = "mainnet"
bitcoind_rpc_url = "http://localhost:8332"
bitcoind_rpc_username = "rafaelcr"
bitcoind_rpc_password = "developer"
bitcoind_zmq_url = "tcp://0.0.0.0:18543"
[resources]
ulimit = 2048
cpu_core_available = 6
memory_available = 16
bitcoind_rpc_threads = 2
bitcoind_rpc_timeout = 15
expected_observers_count = 1
[logs]
ordinals_internals = true
chainhook_internals = true
[meta_protocols]
brc20 = true

686
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,18 +3,21 @@ members = [
"components/chainhook-sdk",
"components/chainhook-postgres",
"components/chainhook-types-rs",
"components/ordhook-cli",
"components/cli",
"components/config",
"components/ordhook-core",
"components/ord",
"components/runes",
]
default-members = ["components/ordhook-cli"]
default-members = ["components/cli"]
resolver = "2"
[workspace.dependencies]
bitcoin = "0.31.2"
bitcoin = "0.32.5"
deadpool-postgres = "0.14.0"
hiro-system-kit = "0.3.4"
refinery = { version = "0.8", features = ["tokio-postgres"] }
tokio = { version = "1.38.1", features = ["full"] }
tokio-postgres = "0.7.10"
[workspace.package]

View File

@@ -1,6 +1,6 @@
{
"name": "runes-api",
"version": "0.3.0",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {

View File

@@ -4,11 +4,12 @@ version.workspace = true
edition = "2021"
[dependencies]
config = { path = "../config" }
bytes = "1.3"
deadpool-postgres = { workspace = true }
num-traits = "0.2.14"
slog = { version = "2.7.0" }
tokio = { version = "1.38.0", features = ["rt-multi-thread", "macros"] }
tokio = { workspace = true }
tokio-postgres = { workspace = true }
[dev-dependencies]

View File

@@ -1,6 +1,7 @@
pub mod types;
pub mod utils;
use config::PgDatabaseConfig;
use deadpool_postgres::{Manager, ManagerConfig, Object, Pool, RecyclingMethod, Transaction};
use tokio_postgres::{Client, Config, NoTls, Row};
@@ -9,21 +10,9 @@ use tokio_postgres::{Client, Config, NoTls, Row};
/// vary depending on column counts. Queries should use other custom chunk sizes as needed.
pub const BATCH_QUERY_CHUNK_SIZE: usize = 500;
/// A Postgres configuration for a single database.
#[derive(Clone, Debug)]
pub struct PgConnectionConfig {
pub dbname: String,
pub host: String,
pub port: u16,
pub user: String,
pub password: Option<String>,
pub search_path: Option<String>,
pub pool_max_size: Option<usize>,
}
/// Creates a Postgres connection pool based on a single database config. You can then use this pool to create ad-hoc clients and
/// transactions for interacting with the database.
pub fn pg_pool(config: &PgConnectionConfig) -> Result<Pool, String> {
pub fn pg_pool(config: &PgDatabaseConfig) -> Result<Pool, String> {
let mut pg_config = Config::new();
pg_config
.dbname(&config.dbname)
@@ -69,7 +58,7 @@ pub async fn pg_begin(client: &mut Object) -> Result<Transaction<'_>, String> {
}
/// Connects to postgres directly (without a Pool) and returns an open client.
pub async fn pg_connect(config: &PgConnectionConfig) -> Result<Client, String> {
pub async fn pg_connect(config: &PgDatabaseConfig) -> Result<Client, String> {
let mut pg_config = Config::new();
pg_config
.dbname(&config.dbname)
@@ -93,7 +82,7 @@ pub async fn pg_connect(config: &PgConnectionConfig) -> Result<Client, String> {
}
/// Connects to postgres with infinite retries and returns an open client.
pub async fn pg_connect_with_retry(config: &PgConnectionConfig) -> Client {
pub async fn pg_connect_with_retry(config: &PgDatabaseConfig) -> Client {
loop {
match pg_connect(config).await {
Ok(client) => return client,
@@ -154,11 +143,13 @@ pub async fn pg_test_roll_back_migrations(pg_client: &mut tokio_postgres::Client
#[cfg(test)]
mod test {
use config::PgDatabaseConfig;
use crate::{pg_begin, pg_pool, pg_pool_client};
#[tokio::test]
async fn test_pg_connection_and_transaction() -> Result<(), String> {
let pool = pg_pool(&crate::PgConnectionConfig {
let pool = pg_pool(&PgDatabaseConfig {
dbname: "postgres".to_string(),
host: "localhost".to_string(),
port: 5432,

View File

@@ -6,6 +6,7 @@ license = "GPL-3.0"
edition = "2021"
[dependencies]
config = { path = "../config" }
serde = { version = "1", features = ["rc"] }
serde_json = { version = "1", features = ["arbitrary_precision"] }
serde-hex = "0.1.0"
@@ -20,13 +21,12 @@ reqwest = { version = "0.12", default-features = false, features = [
"json",
"rustls-tls",
] }
tokio = { version = "1.38.1", features = ["full"] }
tokio = { workspace = true }
base58 = "0.2.0"
crossbeam-channel = "0.5.6"
hex = "0.4.3"
zmq = "0.10.0"
lazy_static = "1.4.0"
chainhook-types = { path = "../chainhook-types-rs" }
[dev-dependencies]

View File

@@ -1,6 +1,5 @@
use std::time::Duration;
use crate::observer::BitcoinConfig;
use crate::try_debug;
use crate::utils::Context;
use bitcoincore_rpc::bitcoin::hashes::Hash;
@@ -13,6 +12,7 @@ use chainhook_types::{
BitcoinTransactionData,BitcoinTransactionMetadata, BlockHeader, BlockIdentifier,
TransactionIdentifier,
};
use config::BitcoindConfig;
use hiro_system_kit::slog;
use reqwest::Client as HttpClient;
use serde::Deserialize;
@@ -142,7 +142,7 @@ pub fn build_http_client() -> HttpClient {
pub async fn download_and_parse_block_with_retry(
http_client: &HttpClient,
block_hash: &str,
bitcoin_config: &BitcoinConfig,
bitcoin_config: &BitcoindConfig,
ctx: &Context,
) -> Result<BitcoinBlockFullBreakdown, String> {
let mut errors_count = 0;
@@ -172,7 +172,7 @@ pub async fn download_and_parse_block_with_retry(
pub async fn retrieve_block_hash_with_retry(
http_client: &HttpClient,
block_height: &u64,
bitcoin_config: &BitcoinConfig,
bitcoin_config: &BitcoindConfig,
ctx: &Context,
) -> Result<String, String> {
let mut errors_count = 0;
@@ -202,7 +202,7 @@ pub async fn retrieve_block_hash_with_retry(
pub async fn retrieve_block_hash(
http_client: &HttpClient,
block_height: &u64,
bitcoin_config: &BitcoinConfig,
bitcoin_config: &BitcoindConfig,
_ctx: &Context,
) -> Result<String, String> {
let body = json!({
@@ -213,7 +213,7 @@ pub async fn retrieve_block_hash(
});
let block_hash = http_client
.post(&bitcoin_config.rpc_url)
.basic_auth(&bitcoin_config.username, Some(&bitcoin_config.password))
.basic_auth(&bitcoin_config.rpc_username, Some(&bitcoin_config.rpc_password))
.header("Content-Type", "application/json")
.header("Host", &bitcoin_config.rpc_url[7..])
.json(&body)
@@ -233,7 +233,7 @@ pub async fn retrieve_block_hash(
pub async fn try_download_block_bytes_with_retry(
http_client: HttpClient,
block_height: u64,
bitcoin_config: BitcoinConfig,
bitcoin_config: BitcoindConfig,
ctx: Context,
) -> Result<Vec<u8>, String> {
let block_hash =
@@ -272,7 +272,7 @@ pub struct RpcErrorResponse {
pub async fn download_block(
http_client: &HttpClient,
block_hash: &str,
bitcoin_config: &BitcoinConfig,
bitcoin_config: &BitcoindConfig,
_ctx: &Context,
) -> Result<Vec<u8>, String> {
let body = json!({
@@ -283,7 +283,7 @@ pub async fn download_block(
});
let res = http_client
.post(&bitcoin_config.rpc_url)
.basic_auth(&bitcoin_config.username, Some(&bitcoin_config.password))
.basic_auth(&bitcoin_config.rpc_username, Some(&bitcoin_config.rpc_password))
.header("Content-Type", "application/json")
.header("Host", &bitcoin_config.rpc_url[7..])
.json(&body)
@@ -329,7 +329,7 @@ pub fn parse_downloaded_block(
pub async fn download_and_parse_block(
http_client: &HttpClient,
block_hash: &str,
bitcoin_config: &BitcoinConfig,
bitcoin_config: &BitcoindConfig,
_ctx: &Context,
) -> Result<BitcoinBlockFullBreakdown, String> {
let response = download_block(http_client, block_hash, bitcoin_config, _ctx).await?;

View File

@@ -3,9 +3,8 @@ pub mod fork_scratch_pad;
use crate::utils::{AbstractBlock, Context};
use chainhook_types::{
BitcoinBlockSignaling, BitcoinNetwork, BlockHeader, BlockIdentifier, BlockchainEvent,
};
use chainhook_types::{BlockHeader, BlockIdentifier, BlockchainEvent};
use config::BitcoindConfig;
use hiro_system_kit::slog;
use std::collections::VecDeque;
@@ -32,24 +31,14 @@ impl BitcoinChainContext {
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct IndexerConfig {
pub bitcoin_network: BitcoinNetwork,
pub bitcoind_rpc_url: String,
pub bitcoind_rpc_username: String,
pub bitcoind_rpc_password: String,
pub bitcoin_block_signaling: BitcoinBlockSignaling,
pub prometheus_monitoring_port: Option<u16>,
}
pub struct Indexer {
pub config: IndexerConfig,
pub config: BitcoindConfig,
bitcoin_blocks_pool: ForkScratchPad,
pub bitcoin_context: BitcoinChainContext,
}
impl Indexer {
pub fn new(config: IndexerConfig) -> Indexer {
pub fn new(config: BitcoindConfig) -> Indexer {
let bitcoin_blocks_pool = ForkScratchPad::new();
let bitcoin_context = BitcoinChainContext::new();

View File

@@ -7,9 +7,10 @@ use crate::indexer::bitcoin::{
use crate::utils::Context;
use chainhook_types::{
BitcoinBlockData, BitcoinBlockSignaling, BitcoinChainEvent, BitcoinChainUpdatedWithBlocksData,
BitcoinBlockData, BitcoinChainEvent, BitcoinChainUpdatedWithBlocksData,
BitcoinChainUpdatedWithReorgData, BitcoinNetwork, BlockIdentifier, BlockchainEvent,
};
use config::BitcoindConfig;
use hiro_system_kit;
use hiro_system_kit::slog;
use rocket::serde::Deserialize;
@@ -32,153 +33,6 @@ pub enum Event {
BitcoinChainEvent(BitcoinChainEvent),
}
#[derive(Debug, Clone)]
pub struct EventObserverConfig {
pub bitcoind_rpc_username: String,
pub bitcoind_rpc_password: String,
pub bitcoind_rpc_url: String,
pub bitcoin_block_signaling: BitcoinBlockSignaling,
pub bitcoin_network: BitcoinNetwork,
}
/// A builder that is used to create a general purpose [EventObserverConfig].
///
/// ## Examples
/// ```
/// use chainhook_sdk::observer::EventObserverConfig;
/// use chainhook_sdk::observer::EventObserverConfigBuilder;
///
/// fn get_config() -> Result<EventObserverConfig, String> {
/// EventObserverConfigBuilder::new()
/// .bitcoind_rpc_password("my_password")
/// .bitcoin_network("mainnet")
/// .finish()
/// }
/// ```
#[derive(Deserialize, Debug, Clone)]
pub struct EventObserverConfigBuilder {
pub bitcoind_rpc_username: Option<String>,
pub bitcoind_rpc_password: Option<String>,
pub bitcoind_rpc_url: Option<String>,
pub bitcoind_zmq_url: Option<String>,
pub bitcoin_network: Option<String>,
}
impl Default for EventObserverConfigBuilder {
fn default() -> Self {
Self::new()
}
}
impl EventObserverConfigBuilder {
pub fn new() -> Self {
EventObserverConfigBuilder {
bitcoind_rpc_username: None,
bitcoind_rpc_password: None,
bitcoind_rpc_url: None,
bitcoind_zmq_url: None,
bitcoin_network: None,
}
}
/// Sets the bitcoind node's RPC username.
pub fn bitcoind_rpc_username(&mut self, username: &str) -> &mut Self {
self.bitcoind_rpc_username = Some(username.to_string());
self
}
/// Sets the bitcoind node's RPC password.
pub fn bitcoind_rpc_password(&mut self, password: &str) -> &mut Self {
self.bitcoind_rpc_password = Some(password.to_string());
self
}
/// Sets the bitcoind node's RPC url.
pub fn bitcoind_rpc_url(&mut self, url: &str) -> &mut Self {
self.bitcoind_rpc_url = Some(url.to_string());
self
}
/// Sets the bitcoind node's ZMQ url, used by the observer to receive new block events from bitcoind.
pub fn bitcoind_zmq_url(&mut self, url: &str) -> &mut Self {
self.bitcoind_zmq_url = Some(url.to_string());
self
}
/// Sets the Bitcoin network. Must be a valid bitcoin network string according to [BitcoinNetwork::from_str].
pub fn bitcoin_network(&mut self, network: &str) -> &mut Self {
self.bitcoin_network = Some(network.to_string());
self
}
/// Attempts to convert a [EventObserverConfigBuilder] instance into an [EventObserverConfig], filling in
/// defaults as necessary according to [EventObserverConfig::default].
///
/// This function will return an error if the `bitcoin_network` or `stacks_network` strings are set and are not a valid [BitcoinNetwork] or [StacksNetwork].
///
pub fn finish(&self) -> Result<EventObserverConfig, String> {
EventObserverConfig::new_using_overrides(Some(self))
}
}
impl EventObserverConfig {
pub fn default() -> Self {
EventObserverConfig {
bitcoind_rpc_username: "devnet".into(),
bitcoind_rpc_password: "devnet".into(),
bitcoind_rpc_url: "http://localhost:18443".into(),
bitcoin_block_signaling: BitcoinBlockSignaling::ZeroMQ(
"tcp://localhost:18543".to_string(),
),
bitcoin_network: BitcoinNetwork::Regtest,
}
}
pub fn get_bitcoin_config(&self) -> BitcoinConfig {
BitcoinConfig {
username: self.bitcoind_rpc_username.clone(),
password: self.bitcoind_rpc_password.clone(),
rpc_url: self.bitcoind_rpc_url.clone(),
network: self.bitcoin_network.clone(),
bitcoin_block_signaling: self.bitcoin_block_signaling.clone(),
}
}
/// Helper to allow overriding some default fields in creating a new EventObserverConfig.
///
/// *Note: This is used by external crates, so it should not be removed, even if not used internally by Chainhook.*
pub fn new_using_overrides(
overrides: Option<&EventObserverConfigBuilder>,
) -> Result<EventObserverConfig, String> {
let bitcoin_network =
if let Some(network) = overrides.and_then(|c| c.bitcoin_network.as_ref()) {
BitcoinNetwork::from_str(network)?
} else {
BitcoinNetwork::Regtest
};
let config = EventObserverConfig {
bitcoind_rpc_username: overrides
.and_then(|c| c.bitcoind_rpc_username.clone())
.unwrap_or_else(|| "devnet".to_string()),
bitcoind_rpc_password: overrides
.and_then(|c| c.bitcoind_rpc_password.clone())
.unwrap_or_else(|| "devnet".to_string()),
bitcoind_rpc_url: overrides
.and_then(|c| c.bitcoind_rpc_url.clone())
.unwrap_or_else(|| "http://localhost:18443".to_string()),
bitcoin_block_signaling: overrides
.and_then(|c| c.bitcoind_zmq_url.as_ref())
.map(|url| BitcoinBlockSignaling::ZeroMQ(url.clone()))
.unwrap_or_else(|| {
BitcoinBlockSignaling::ZeroMQ("tcp://localhost:18543".to_string())
}),
bitcoin_network,
};
Ok(config)
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum ObserverCommand {
StandardizeBitcoinBlock(BitcoinBlockFullBreakdown),
@@ -220,15 +74,6 @@ pub struct BitcoinRPCRequest {
pub jsonrpc: serde_json::Value,
}
#[derive(Debug, Clone)]
pub struct BitcoinConfig {
pub username: String,
pub password: String,
pub rpc_url: String,
pub network: BitcoinNetwork,
pub bitcoin_block_signaling: BitcoinBlockSignaling,
}
#[derive(Debug, Clone)]
pub struct BitcoinBlockDataCached {
pub block: BitcoinBlockData,
@@ -303,14 +148,14 @@ impl ObserverSidecar {
/// ### Examples
/// ```
/// use chainhook_sdk::observer::EventObserverBuilder;
/// use chainhook_sdk::observer::EventObserverConfig;
/// use chainhook_sdk::observer::ObserverCommand;
/// use chainhook_sdk::utils::Context;
/// use config::BitcoindConfig;
/// use std::error::Error;
/// use std::sync::mpsc::{Receiver, Sender};
///
/// fn start_event_observer(
/// config: EventObserverConfig,
/// config: BitcoindConfig,
/// observer_commands_tx: &Sender<ObserverCommand>,
/// observer_commands_rx: Receiver<ObserverCommand>,
/// ctx: &Context,
@@ -325,7 +170,7 @@ impl ObserverSidecar {
/// }
/// ```
pub struct EventObserverBuilder {
config: EventObserverConfig,
config: BitcoindConfig,
observer_commands_tx: Sender<ObserverCommand>,
observer_commands_rx: Receiver<ObserverCommand>,
ctx: Context,
@@ -335,7 +180,7 @@ pub struct EventObserverBuilder {
impl EventObserverBuilder {
pub fn new(
config: EventObserverConfig,
config: BitcoindConfig,
observer_commands_tx: &Sender<ObserverCommand>,
observer_commands_rx: Receiver<ObserverCommand>,
ctx: &Context,
@@ -382,17 +227,19 @@ impl EventObserverBuilder {
/// Spawns a thread to observe blockchain events. Use [EventObserverBuilder] to configure easily.
pub fn start_event_observer(
config: EventObserverConfig,
config: BitcoindConfig,
observer_commands_tx: Sender<ObserverCommand>,
observer_commands_rx: Receiver<ObserverCommand>,
observer_events_tx: Option<crossbeam_channel::Sender<ObserverEvent>>,
observer_sidecar: Option<ObserverSidecar>,
ctx: Context,
) -> Result<(), Box<dyn Error>> {
match config.bitcoin_block_signaling {
BitcoinBlockSignaling::ZeroMQ(ref url) => {
ctx.try_log(|logger| {
slog::info!(logger, "Observing Bitcoin chain events via ZeroMQ: {}", url)
slog::info!(
logger,
"Observing Bitcoin chain events via ZeroMQ: {}",
config.zmq_url
)
});
let context_cloned = ctx.clone();
let event_observer_config_moved = config.clone();
@@ -423,13 +270,11 @@ pub fn start_event_observer(
}
})
.expect("unable to spawn thread");
}
}
Ok(())
}
pub async fn start_bitcoin_event_observer(
config: EventObserverConfig,
config: BitcoindConfig,
_observer_commands_tx: Sender<ObserverCommand>,
observer_commands_rx: Receiver<ObserverCommand>,
observer_events_tx: Option<crossbeam_channel::Sender<ObserverEvent>>,
@@ -461,7 +306,7 @@ pub enum HandleBlock {
}
pub async fn start_observer_commands_handler(
config: EventObserverConfig,
config: BitcoindConfig,
observer_commands_rx: Receiver<ObserverCommand>,
observer_events_tx: Option<crossbeam_channel::Sender<ObserverEvent>>,
ingestion_shutdown: Option<Shutdown>,
@@ -496,7 +341,7 @@ pub async fn start_observer_commands_handler(
let block = loop {
match standardize_bitcoin_block(
block_data.clone(),
&config.bitcoin_network,
&BitcoinNetwork::from_network(config.network),
&ctx,
) {
Ok(block) => break Some(block),
@@ -512,7 +357,7 @@ pub async fn start_observer_commands_handler(
block_data = match download_and_parse_block_with_retry(
&http_client,
&block_hash,
&config.get_bitcoin_config(),
&config,
&ctx,
)
.await

View File

@@ -1,4 +1,4 @@
use chainhook_types::BitcoinBlockSignaling;
use config::BitcoindConfig;
use hiro_system_kit::slog;
use std::sync::mpsc::Sender;
use zmq::Socket;
@@ -13,7 +13,7 @@ use crate::{
};
use std::collections::VecDeque;
use super::{EventObserverConfig, ObserverCommand};
use super::ObserverCommand;
fn new_zmq_socket() -> Socket {
let context = zmq::Context::new();
@@ -32,14 +32,11 @@ fn new_zmq_socket() -> Socket {
}
pub async fn start_zeromq_runloop(
config: &EventObserverConfig,
config: &BitcoindConfig,
observer_commands_tx: Sender<ObserverCommand>,
ctx: &Context,
) {
let BitcoinBlockSignaling::ZeroMQ(ref bitcoind_zmq_url) = config.bitcoin_block_signaling;
let bitcoind_zmq_url = bitcoind_zmq_url.clone();
let bitcoin_config = config.get_bitcoin_config();
let bitcoind_zmq_url = config.zmq_url.clone();
let http_client = build_http_client();
try_info!(
@@ -88,7 +85,7 @@ pub async fn start_zeromq_runloop(
let block = match download_and_parse_block_with_retry(
&http_client,
&block_hash,
&bitcoin_config,
&config,
ctx,
)
.await

View File

@@ -1,19 +1,16 @@
use std::{thread::sleep, time::Duration};
use bitcoincore_rpc::{Auth, Client, RpcApi};
use hiro_system_kit::slog;
use crate::utils::Context;
use crate::indexer::IndexerConfig;
use bitcoincore_rpc::{Auth, Client, RpcApi};
use config::BitcoindConfig;
use hiro_system_kit::slog;
use crate::{try_error, try_info};
fn bitcoind_get_client(config: &IndexerConfig, ctx: &Context) -> Client {
fn bitcoind_get_client(config: &BitcoindConfig, ctx: &Context) -> Client {
loop {
let auth = Auth::UserPass(
config.bitcoind_rpc_username.clone(),
config.bitcoind_rpc_password.clone(),
);
match Client::new(&config.bitcoind_rpc_url, auth) {
let auth = Auth::UserPass(config.rpc_username.clone(), config.rpc_password.clone());
match Client::new(&config.rpc_url, auth) {
Ok(con) => {
return con;
}
@@ -26,7 +23,7 @@ fn bitcoind_get_client(config: &IndexerConfig, ctx: &Context) -> Client {
}
/// Retrieves the block height from bitcoind.
pub fn bitcoind_get_block_height(config: &IndexerConfig, ctx: &Context) -> u64 {
pub fn bitcoind_get_block_height(config: &BitcoindConfig, ctx: &Context) -> u64 {
let bitcoin_rpc = bitcoind_get_client(config, ctx);
loop {
match bitcoin_rpc.get_blockchain_info() {
@@ -46,7 +43,7 @@ pub fn bitcoind_get_block_height(config: &IndexerConfig, ctx: &Context) -> u64 {
}
/// Checks if bitcoind is still synchronizing blocks and waits until it's finished if that is the case.
pub fn bitcoind_wait_for_chain_tip(config: &IndexerConfig, ctx: &Context) {
pub fn bitcoind_wait_for_chain_tip(config: &BitcoindConfig, ctx: &Context) {
let bitcoin_rpc = bitcoind_get_client(config, ctx);
let mut confirmations = 0;
loop {
@@ -63,7 +60,10 @@ pub fn bitcoind_wait_for_chain_tip(config: &IndexerConfig, ctx: &Context) {
try_info!(ctx, "bitcoind: Verifying chain tip");
} else {
confirmations = 0;
try_info!(ctx, "bitcoind: Node has not reached chain tip, trying again");
try_info!(
ctx,
"bitcoind: Node has not reached chain tip, trying again"
);
}
}
Err(e) => {

View File

@@ -1,6 +1,7 @@
use crate::bitcoin::{TxIn, TxOut};
use crate::ordinals::OrdinalOperation;
use crate::Brc20Operation;
use bitcoin::Network;
use schemars::JsonSchema;
use std::cmp::Ordering;
use std::fmt::Display;
@@ -446,17 +447,15 @@ impl BitcoinNetwork {
BitcoinNetwork::Signet => "signet",
}
}
}
#[derive(Deserialize, Debug, Clone, PartialEq)]
pub enum BitcoinBlockSignaling {
ZeroMQ(String),
}
impl BitcoinBlockSignaling {
pub fn is_bitcoind_zmq_block_signaling_expected(&self) -> bool {
match &self {
_ => false,
pub fn from_network(network: Network) -> BitcoinNetwork {
match network {
Network::Bitcoin => BitcoinNetwork::Mainnet,
Network::Testnet => BitcoinNetwork::Testnet,
Network::Testnet4 => BitcoinNetwork::Testnet,
Network::Signet => BitcoinNetwork::Signet,
Network::Regtest => BitcoinNetwork::Regtest,
_ => unreachable!(),
}
}
}

View File

@@ -1,15 +1,17 @@
[package]
name = "ordhook-cli"
name = "cli"
version.workspace = true
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[[bin]]
name = "ordhook"
name = "bitcoin-indexer"
path = "src/main.rs"
[dependencies]
config = { path = "../config" }
ordhook = { path = "../ordhook-core" }
runes = { path = "../runes" }
chainhook-types = { path = "../chainhook-types-rs" }
chainhook-sdk = { path = "../chainhook-sdk" }
hex = "0.4.3"

View File

@@ -0,0 +1,111 @@
use clap::{Parser, Subcommand};
#[derive(Parser, Debug)]
#[clap(name = "bitcoin-indexer", author, version, about, long_about = None)]
pub enum Protocol {
/// Ordinals commands
#[clap(subcommand)]
Ordinals(Command),
/// Runes commands
#[clap(subcommand)]
Runes(Command),
/// Generate a new configuration file
#[clap(subcommand)]
Config(ConfigCommand),
}
#[derive(Subcommand, PartialEq, Clone, Debug)]
pub enum Command {
/// Stream and index Bitcoin blocks
#[clap(subcommand)]
Service(ServiceCommand),
/// Perform maintenance operations on local index
#[clap(subcommand)]
Index(IndexCommand),
/// Database operations
#[clap(subcommand)]
Database(DatabaseCommand),
}
#[derive(Subcommand, PartialEq, Clone, Debug)]
pub enum DatabaseCommand {
/// Migrates database
#[clap(name = "migrate", bin_name = "migrate")]
Migrate(MigrateDatabaseCommand),
}
#[derive(Parser, PartialEq, Clone, Debug)]
pub struct MigrateDatabaseCommand {
#[clap(long = "config-path")]
pub config_path: String,
}
#[derive(Subcommand, PartialEq, Clone, Debug)]
#[clap(bin_name = "config", aliases = &["config"])]
pub enum ConfigCommand {
/// Generate new config
#[clap(name = "new", bin_name = "new", aliases = &["generate"])]
New(NewConfigCommand),
}
#[derive(Parser, PartialEq, Clone, Debug)]
pub struct NewConfigCommand {
/// Target Regtest network
#[clap(
long = "regtest",
conflicts_with = "testnet",
conflicts_with = "mainnet"
)]
pub regtest: bool,
/// Target Testnet network
#[clap(
long = "testnet",
conflicts_with = "regtest",
conflicts_with = "mainnet"
)]
pub testnet: bool,
/// Target Mainnet network
#[clap(
long = "mainnet",
conflicts_with = "testnet",
conflicts_with = "regtest"
)]
pub mainnet: bool,
}
#[derive(Subcommand, PartialEq, Clone, Debug)]
pub enum ServiceCommand {
/// Start service
#[clap(name = "start", bin_name = "start")]
Start(ServiceStartCommand),
}
#[derive(Parser, PartialEq, Clone, Debug)]
pub struct ServiceStartCommand {
#[clap(long = "config-path")]
pub config_path: String,
}
#[derive(Subcommand, PartialEq, Clone, Debug)]
pub enum IndexCommand {
/// Sync index to latest bitcoin block
#[clap(name = "sync", bin_name = "sync")]
Sync(SyncIndexCommand),
/// Rollback index blocks
#[clap(name = "rollback", bin_name = "drop")]
Rollback(RollbackIndexCommand),
}
#[derive(Parser, PartialEq, Clone, Debug)]
pub struct SyncIndexCommand {
#[clap(long = "config-path")]
pub config_path: String,
}
#[derive(Parser, PartialEq, Clone, Debug)]
pub struct RollbackIndexCommand {
/// Number of blocks to rollback from index tip
pub blocks: u32,
#[clap(long = "config-path")]
pub config_path: String,
}

View File

@@ -0,0 +1,177 @@
use chainhook_sdk::utils::Context;
use clap::Parser;
use commands::{Command, ConfigCommand, DatabaseCommand, IndexCommand, Protocol, ServiceCommand};
use config::generator::generate_toml_config;
use config::Config;
use hiro_system_kit;
use ordhook::db::migrate_dbs;
use ordhook::service::Service;
use ordhook::try_info;
use std::path::PathBuf;
use std::thread::sleep;
use std::time::Duration;
use std::{process, u64};
mod commands;
pub fn main() {
let logger = hiro_system_kit::log::setup_logger();
let _guard = hiro_system_kit::log::setup_global_logger(logger.clone());
let ctx = Context {
logger: Some(logger),
tracer: false,
};
let opts: Protocol = match Protocol::try_parse() {
Ok(opts) => opts,
Err(e) => {
println!("{}", e);
process::exit(1);
}
};
if let Err(e) = hiro_system_kit::nestable_block_on(handle_command(opts, &ctx)) {
error!(ctx.expect_logger(), "{e}");
std::thread::sleep(std::time::Duration::from_millis(500));
process::exit(1);
}
}
fn check_maintenance_mode(ctx: &Context) {
let maintenance_enabled = std::env::var("ORDHOOK_MAINTENANCE").unwrap_or("0".into());
if maintenance_enabled.eq("1") {
try_info!(
ctx,
"Entering maintenance mode. Unset ORDHOOK_MAINTENANCE and reboot to resume operations"
);
sleep(Duration::from_secs(u64::MAX))
}
}
fn confirm_rollback(current_chain_tip: u64, blocks_to_rollback: u32) -> Result<(), String> {
println!("Index chain tip is at #{current_chain_tip}");
println!(
"{} blocks will be dropped. New index chain tip will be at #{}. Confirm? [Y/n]",
blocks_to_rollback,
current_chain_tip - blocks_to_rollback as u64
);
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).unwrap();
if buffer.starts_with('n') {
return Err("Deletion aborted".to_string());
}
Ok(())
}
async fn handle_command(opts: Protocol, ctx: &Context) -> Result<(), String> {
match opts {
Protocol::Ordinals(subcmd) => match subcmd {
Command::Service(subcmd) => match subcmd {
ServiceCommand::Start(cmd) => {
check_maintenance_mode(ctx);
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_ordinals_config()?;
migrate_dbs(&config, ctx).await?;
let mut service = Service::new(&config, ctx);
// TODO(rafaelcr): This only works if there's a rocksdb file already containing blocks previous to the first
// inscription height.
let start_block = service.get_index_chain_tip().await?;
try_info!(ctx, "Index chain tip is at #{start_block}");
return service.run(false).await;
}
},
Command::Index(index_command) => match index_command {
IndexCommand::Sync(cmd) => {
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_ordinals_config()?;
migrate_dbs(&config, ctx).await?;
let service = Service::new(&config, ctx);
service.catch_up_to_bitcoin_chain_tip().await?;
}
IndexCommand::Rollback(cmd) => {
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_ordinals_config()?;
let service = Service::new(&config, ctx);
let chain_tip = service.get_index_chain_tip().await?;
confirm_rollback(chain_tip, cmd.blocks)?;
let service = Service::new(&config, ctx);
let block_heights: Vec<u64> =
((chain_tip - cmd.blocks as u64)..=chain_tip).collect();
service.rollback(&block_heights).await?;
println!("{} blocks dropped", cmd.blocks);
}
},
Command::Database(database_command) => match database_command {
DatabaseCommand::Migrate(cmd) => {
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_ordinals_config()?;
migrate_dbs(&config, ctx).await?;
}
},
},
Protocol::Runes(subcmd) => match subcmd {
Command::Service(service_command) => match service_command {
ServiceCommand::Start(cmd) => {
check_maintenance_mode(ctx);
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_runes_config()?;
return runes::service::start_service(&config, ctx).await;
}
},
Command::Index(index_command) => match index_command {
IndexCommand::Sync(cmd) => {
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_runes_config()?;
runes::service::catch_up_to_bitcoin_chain_tip(&config, ctx).await?;
}
IndexCommand::Rollback(cmd) => {
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_runes_config()?;
let chain_tip = runes::service::get_index_chain_tip(&config, ctx).await;
confirm_rollback(chain_tip, cmd.blocks)?;
let mut pg_client = runes::db::pg_connect(&config, false, &ctx).await;
runes::scan::bitcoin::drop_blocks(
chain_tip - cmd.blocks as u64,
chain_tip,
&mut pg_client,
&ctx,
)
.await;
}
},
Command::Database(database_command) => match database_command {
DatabaseCommand::Migrate(cmd) => {
let config = Config::from_file_path(&cmd.config_path)?;
config.assert_runes_config()?;
let _ = runes::db::pg_connect(&config, true, ctx).await;
}
},
},
Protocol::Config(subcmd) => match subcmd {
ConfigCommand::New(cmd) => {
use std::fs::File;
use std::io::Write;
let network = match (cmd.mainnet, cmd.testnet, cmd.regtest) {
(true, false, false) => "mainnet",
(false, true, false) => "testnet",
(false, false, true) => "regtest",
_ => return Err("Invalid network".into()),
};
let config_content = generate_toml_config(network);
let mut file_path = PathBuf::new();
file_path.push("Indexer.toml");
let mut file = File::create(&file_path)
.map_err(|e| format!("unable to open file {}\n{}", file_path.display(), e))?;
file.write_all(config_content.as_bytes())
.map_err(|e| format!("unable to write file {}\n{}", file_path.display(), e))?;
println!("Created file Indexer.toml");
}
},
}
Ok(())
}

View File

@@ -1,12 +1,8 @@
#[macro_use]
extern crate serde_derive;
pub mod cli;
#[macro_use]
extern crate hiro_system_kit;
pub mod cli;
pub mod config;
#[cfg(feature = "tcmalloc")]
#[global_allocator]
static GLOBAL: tcmalloc2::TcMalloc = tcmalloc2::TcMalloc;

View File

@@ -0,0 +1,13 @@
[package]
name = "config"
version.workspace = true
edition = "2021"
[dependencies]
bitcoin = { workspace = true }
serde = "1"
serde_json = "1"
serde_derive = "1"
hiro-system-kit = { workspace = true }
num_cpus = "1.16.0"
toml = { version = "0.5.11", features = ["preserve_order"] }

View File

@@ -0,0 +1,216 @@
use std::path::PathBuf;
use bitcoin::Network;
use crate::toml::ConfigToml;
pub const DEFAULT_WORKING_DIR: &str = "data";
pub const DEFAULT_ULIMIT: usize = 2048;
pub const DEFAULT_MEMORY_AVAILABLE: usize = 8;
pub const DEFAULT_BITCOIND_RPC_THREADS: usize = 4;
pub const DEFAULT_BITCOIND_RPC_TIMEOUT: u32 = 15;
pub const DEFAULT_LRU_CACHE_SIZE: usize = 50_000;
#[derive(Clone, Debug)]
pub struct Config {
pub bitcoind: BitcoindConfig,
pub ordinals: Option<OrdinalsConfig>,
pub runes: Option<RunesConfig>,
pub resources: ResourcesConfig,
pub storage: StorageConfig,
pub metrics: Option<MetricsConfig>,
}
#[derive(Clone, Debug)]
pub struct OrdinalsConfig {
pub db: PgDatabaseConfig,
pub meta_protocols: Option<OrdinalsMetaProtocolsConfig>,
}
#[derive(Clone, Debug)]
pub struct OrdinalsMetaProtocolsConfig {
pub brc20: Option<OrdinalsBrc20Config>,
}
#[derive(Clone, Debug)]
pub struct OrdinalsBrc20Config {
pub enabled: bool,
pub lru_cache_size: usize,
pub db: PgDatabaseConfig,
}
#[derive(Clone, Debug)]
pub struct RunesConfig {
pub lru_cache_size: usize,
pub db: PgDatabaseConfig,
}
#[derive(Clone, Debug)]
pub struct BitcoindConfig {
pub network: Network,
pub rpc_url: String,
pub rpc_username: String,
pub rpc_password: String,
pub zmq_url: String,
}
/// A Postgres configuration for a single database.
#[derive(Clone, Debug)]
pub struct PgDatabaseConfig {
pub dbname: String,
pub host: String,
pub port: u16,
pub user: String,
pub password: Option<String>,
pub search_path: Option<String>,
pub pool_max_size: Option<usize>,
}
#[derive(Clone, Debug)]
pub struct StorageConfig {
pub working_dir: String,
}
#[derive(Clone, Debug)]
pub struct MetricsConfig {
pub enabled: bool,
pub prometheus_port: u16,
}
#[derive(Deserialize, Debug, Clone)]
pub struct ResourcesConfig {
pub ulimit: usize,
pub cpu_core_available: usize,
pub memory_available: usize,
pub bitcoind_rpc_threads: usize,
pub bitcoind_rpc_timeout: u32,
}
impl ResourcesConfig {
pub fn get_optimal_thread_pool_capacity(&self) -> usize {
// Generally speaking when dealing a pool, we need one thread for
// feeding the thread pool and eventually another thread for
// handling the "reduce" step.
self.cpu_core_available.saturating_sub(2).max(1)
}
}
impl Config {
pub fn from_file_path(file_path: &str) -> Result<Config, String> {
ConfigToml::config_from_file_path(file_path)
}
pub fn expected_cache_path(&self) -> PathBuf {
let mut destination_path = PathBuf::new();
destination_path.push(&self.storage.working_dir);
destination_path
}
pub fn devnet_default() -> Config {
Config {
storage: StorageConfig {
working_dir: default_cache_path(),
},
resources: ResourcesConfig {
cpu_core_available: num_cpus::get(),
memory_available: DEFAULT_MEMORY_AVAILABLE,
ulimit: DEFAULT_ULIMIT,
bitcoind_rpc_threads: DEFAULT_BITCOIND_RPC_THREADS,
bitcoind_rpc_timeout: DEFAULT_BITCOIND_RPC_TIMEOUT,
},
bitcoind: BitcoindConfig {
rpc_url: "http://0.0.0.0:18443".into(),
rpc_username: "devnet".into(),
rpc_password: "devnet".into(),
network: Network::Regtest,
zmq_url: "http://0.0.0.0:18543".into(),
},
ordinals: Some(OrdinalsConfig {
db: PgDatabaseConfig {
dbname: "ordinals".to_string(),
host: "localhost".to_string(),
port: 5432,
user: "postgres".to_string(),
password: Some("postgres".to_string()),
search_path: None,
pool_max_size: None,
},
meta_protocols: None,
}),
runes: Some(RunesConfig {
lru_cache_size: DEFAULT_LRU_CACHE_SIZE,
db: PgDatabaseConfig {
dbname: "runes".to_string(),
host: "localhost".to_string(),
port: 5432,
user: "postgres".to_string(),
password: Some("postgres".to_string()),
search_path: None,
pool_max_size: None,
},
}),
metrics: Some(MetricsConfig {
enabled: true,
prometheus_port: 9153,
}),
}
}
pub fn testnet_default() -> Config {
let mut default = Config::devnet_default();
default.bitcoind.network = Network::Testnet;
default
}
pub fn mainnet_default() -> Config {
let mut default = Config::devnet_default();
default.bitcoind.rpc_url = "http://localhost:8332".into();
default.bitcoind.network = Network::Bitcoin;
default
}
// TODO: Move this to a shared test utils component
pub fn test_default() -> Config {
let mut config = Self::mainnet_default();
config.storage.working_dir = "tmp".to_string();
config.resources.bitcoind_rpc_threads = 1;
config.resources.cpu_core_available = 1;
config
}
pub fn ordinals_brc20_config(&self) -> Option<&OrdinalsBrc20Config> {
if let Some(OrdinalsConfig {
meta_protocols:
Some(OrdinalsMetaProtocolsConfig {
brc20: Some(brc20), ..
}),
..
}) = &self.ordinals
{
if brc20.enabled {
return Some(brc20);
}
}
None
}
pub fn assert_ordinals_config(&self) -> Result<(), String> {
if self.ordinals.is_none() {
return Err(format!("Config entry for `ordinals` not found in config file."));
}
Ok(())
}
pub fn assert_runes_config(&self) -> Result<(), String> {
if self.runes.is_none() {
return Err(format!("Config entry for `runes` not found in config file."));
}
Ok(())
}
}
pub fn default_cache_path() -> String {
let mut cache_path = std::env::current_dir().expect("unable to get current dir");
cache_path.push("data");
format!("{}", cache_path.display())
}

View File

@@ -0,0 +1,55 @@
pub fn generate_toml_config(network: &str) -> String {
let conf = format!(
r#"[storage]
working_dir = "tmp"
[metrics]
enabled = true
prometheus_port = 9153
[ordinals.db]
database = "ordinals"
host = "localhost"
port = 5432
username = "postgres"
password = "postgres"
[ordinals.meta_protocols.brc20]
enabled = true
lru_cache_size = 10000
[ordinals.meta_protocols.brc20.db]
database = "brc20"
host = "localhost"
port = 5432
username = "postgres"
password = "postgres"
[runes]
lru_cache_size = 10000
[runes.db]
database = "runes"
host = "localhost"
port = 5432
username = "postgres"
password = "postgres"
[bitcoind]
network = "{network}"
rpc_url = "http://localhost:8332"
rpc_username = "devnet"
rpc_password = "devnet"
zmq_url = "tcp://0.0.0.0:18543"
[resources]
ulimit = 2048
cpu_core_available = 6
memory_available = 16
bitcoind_rpc_threads = 2
bitcoind_rpc_timeout = 15
"#,
network = network.to_lowercase(),
);
conf
}

View File

@@ -0,0 +1,8 @@
#[macro_use]
extern crate serde_derive;
pub mod toml;
pub mod generator;
mod config;
pub use config::*;

View File

@@ -0,0 +1,199 @@
use std::fs::File;
use std::io::{BufReader, Read};
use bitcoin::Network;
use crate::{
BitcoindConfig, Config, MetricsConfig, OrdinalsBrc20Config, OrdinalsConfig,
OrdinalsMetaProtocolsConfig, PgDatabaseConfig, ResourcesConfig, RunesConfig, StorageConfig,
DEFAULT_BITCOIND_RPC_THREADS, DEFAULT_BITCOIND_RPC_TIMEOUT, DEFAULT_LRU_CACHE_SIZE,
DEFAULT_MEMORY_AVAILABLE, DEFAULT_ULIMIT, DEFAULT_WORKING_DIR,
};
#[derive(Deserialize, Clone, Debug)]
pub struct PgDatabaseConfigToml {
pub database: String,
pub host: String,
pub port: u16,
pub username: String,
pub password: Option<String>,
pub search_path: Option<String>,
pub pool_max_size: Option<usize>,
}
impl PgDatabaseConfigToml {
fn to_config(self) -> PgDatabaseConfig {
PgDatabaseConfig {
dbname: self.database,
host: self.host,
port: self.port,
user: self.username,
password: self.password,
search_path: self.search_path,
pool_max_size: self.pool_max_size,
}
}
}
#[derive(Deserialize, Clone, Debug)]
pub struct OrdinalsConfigToml {
pub db: PgDatabaseConfigToml,
pub meta_protocols: Option<OrdinalsMetaProtocolsConfigToml>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct OrdinalsMetaProtocolsConfigToml {
pub brc20: Option<OrdinalsBrc20ConfigToml>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct OrdinalsBrc20ConfigToml {
pub enabled: bool,
pub lru_cache_size: Option<usize>,
pub db: PgDatabaseConfigToml,
}
#[derive(Deserialize, Clone, Debug)]
pub struct RunesConfigToml {
pub lru_cache_size: Option<usize>,
pub db: PgDatabaseConfigToml,
}
#[derive(Deserialize, Debug, Clone)]
pub struct StorageConfigToml {
pub working_dir: Option<String>,
}
#[derive(Deserialize, Debug, Clone)]
pub struct ResourcesConfigToml {
pub ulimit: Option<usize>,
pub cpu_core_available: Option<usize>,
pub memory_available: Option<usize>,
pub bitcoind_rpc_threads: Option<usize>,
pub bitcoind_rpc_timeout: Option<u32>,
}
#[derive(Deserialize, Debug, Clone)]
pub struct BitcoindConfigToml {
pub network: String,
pub rpc_url: String,
pub rpc_username: String,
pub rpc_password: String,
pub zmq_url: String,
}
#[derive(Deserialize, Debug, Clone)]
pub struct MetricsConfigToml {
pub enabled: bool,
pub prometheus_port: u16,
}
#[derive(Deserialize, Debug, Clone)]
pub struct ConfigToml {
pub storage: StorageConfigToml,
pub ordinals: Option<OrdinalsConfigToml>,
pub runes: Option<RunesConfigToml>,
pub bitcoind: BitcoindConfigToml,
pub resources: ResourcesConfigToml,
pub metrics: Option<MetricsConfigToml>,
}
impl ConfigToml {
pub fn config_from_file_path(file_path: &str) -> Result<Config, String> {
let file = File::open(file_path)
.map_err(|e| format!("unable to read file {}\n{:?}", file_path, e))?;
let mut file_reader = BufReader::new(file);
let mut file_buffer = vec![];
file_reader
.read_to_end(&mut file_buffer)
.map_err(|e| format!("unable to read file {}\n{:?}", file_path, e))?;
let config_file: ConfigToml = match toml::from_slice(&file_buffer) {
Ok(s) => s,
Err(e) => {
return Err(format!("Config file malformatted {}", e));
}
};
ConfigToml::config_from_toml(config_file)
}
fn config_from_toml(toml: ConfigToml) -> Result<Config, String> {
let bitcoin_network = match toml.bitcoind.network.as_str() {
"devnet" => Network::Regtest,
"testnet" => Network::Testnet,
"mainnet" => Network::Bitcoin,
"signet" => Network::Signet,
_ => return Err("bitcoind.network not supported".to_string()),
};
let ordinals = match toml.ordinals {
Some(ordinals) => Some(OrdinalsConfig {
db: ordinals.db.to_config(),
meta_protocols: match ordinals.meta_protocols {
Some(meta_protocols) => Some(OrdinalsMetaProtocolsConfig {
brc20: match meta_protocols.brc20 {
Some(brc20) => Some(OrdinalsBrc20Config {
enabled: brc20.enabled,
lru_cache_size: brc20
.lru_cache_size
.unwrap_or(DEFAULT_LRU_CACHE_SIZE),
db: brc20.db.to_config(),
}),
None => None,
},
}),
None => None,
},
}),
None => None,
};
let runes = match toml.runes {
Some(runes) => Some(RunesConfig {
lru_cache_size: runes.lru_cache_size.unwrap_or(DEFAULT_LRU_CACHE_SIZE),
db: runes.db.to_config(),
}),
None => None,
};
let metrics = match toml.metrics {
Some(metrics) => Some(MetricsConfig {
enabled: metrics.enabled,
prometheus_port: metrics.prometheus_port,
}),
None => None,
};
let config = Config {
storage: StorageConfig {
working_dir: toml
.storage
.working_dir
.unwrap_or(DEFAULT_WORKING_DIR.into()),
},
ordinals,
runes,
resources: ResourcesConfig {
ulimit: toml.resources.ulimit.unwrap_or(DEFAULT_ULIMIT),
cpu_core_available: toml.resources.cpu_core_available.unwrap_or(num_cpus::get()),
memory_available: toml
.resources
.memory_available
.unwrap_or(DEFAULT_MEMORY_AVAILABLE),
bitcoind_rpc_threads: toml
.resources
.bitcoind_rpc_threads
.unwrap_or(DEFAULT_BITCOIND_RPC_THREADS),
bitcoind_rpc_timeout: toml
.resources
.bitcoind_rpc_timeout
.unwrap_or(DEFAULT_BITCOIND_RPC_TIMEOUT),
},
bitcoind: BitcoindConfig {
rpc_url: toml.bitcoind.rpc_url.to_string(),
rpc_username: toml.bitcoind.rpc_username.to_string(),
rpc_password: toml.bitcoind.rpc_password.to_string(),
network: bitcoin_network,
zmq_url: toml.bitcoind.zmq_url,
},
metrics,
};
Ok(config)
}
}

View File

@@ -1,433 +0,0 @@
use crate::config::file::ConfigFile;
use crate::config::generator::generate_config;
use chainhook_sdk::utils::{BlockHeights, Context};
use clap::{Parser, Subcommand};
use hiro_system_kit;
use ordhook::core::first_inscription_height;
use ordhook::core::pipeline::bitcoind_download_blocks;
use ordhook::core::pipeline::processors::block_archiving::start_block_archiving_processor;
use ordhook::db::blocks::{
find_block_bytes_at_block_height, find_last_block_inserted, find_missing_blocks,
open_blocks_db_with_retry, open_readonly_blocks_db,
};
use ordhook::db::cursor::BlockBytesCursor;
use ordhook::db::{migrate_dbs, reset_dbs};
use ordhook::service::Service;
use ordhook::try_info;
use std::path::PathBuf;
use std::thread::sleep;
use std::time::Duration;
use std::{process, u64};
#[derive(Parser, Debug)]
#[clap(name = "ordhook", author, version, about, long_about = None)]
struct Opts {
#[clap(subcommand)]
command: Command,
}
#[derive(Subcommand, PartialEq, Clone, Debug)]
enum Command {
/// Generate a new configuration file
#[clap(subcommand)]
Config(ConfigCommand),
/// Stream Bitcoin blocks and index ordinals inscriptions and transfers
#[clap(subcommand)]
Service(ServiceCommand),
/// Perform maintenance operations on local index
#[clap(subcommand)]
Index(IndexCommand),
/// Database operations
#[clap(subcommand)]
Database(DatabaseCommand),
}
#[derive(Subcommand, PartialEq, Clone, Debug)]
enum DatabaseCommand {
/// Migrates database
#[clap(name = "migrate", bin_name = "migrate")]
Migrate(DatabaseMigrateCommand),
/// Resets database to an empty state
#[clap(name = "reset", bin_name = "reset")]
Reset(DatabaseMigrateCommand),
}
#[derive(Parser, PartialEq, Clone, Debug)]
struct DatabaseMigrateCommand {
/// Load config file path
#[clap(long = "config-path")]
pub config_path: Option<String>,
}
#[derive(Subcommand, PartialEq, Clone, Debug)]
enum RepairCommand {
/// Rewrite blocks data in hord.rocksdb
#[clap(name = "blocks", bin_name = "blocks")]
Blocks(RepairStorageCommand),
}
#[derive(Parser, PartialEq, Clone, Debug)]
struct RepairStorageCommand {
/// Interval of blocks (--interval 767430:800000)
#[clap(long = "interval", conflicts_with = "blocks")]
pub blocks_interval: Option<String>,
/// List of blocks (--blocks 767430,767431,767433,800000)
#[clap(long = "blocks", conflicts_with = "interval")]
pub blocks: Option<String>,
/// Network threads
#[clap(long = "network-threads")]
pub network_threads: Option<usize>,
/// Load config file path
#[clap(long = "config-path")]
pub config_path: Option<String>,
/// Cascade to observers
#[clap(short, long, action = clap::ArgAction::SetTrue)]
pub repair_observers: Option<bool>,
/// Display debug logs
#[clap(short, long, action = clap::ArgAction::SetTrue)]
pub debug: Option<bool>,
}
impl RepairStorageCommand {
pub fn get_blocks(&self) -> Vec<u64> {
let blocks = match (&self.blocks_interval, &self.blocks) {
(Some(interval), None) => {
let blocks = interval.split(':').collect::<Vec<_>>();
let start_block: u64 = blocks
.first()
.expect("unable to get start_block")
.parse::<u64>()
.expect("unable to parse start_block");
let end_block: u64 = blocks
.get(1)
.expect("unable to get end_block")
.parse::<u64>()
.expect("unable to parse end_block");
BlockHeights::BlockRange(start_block, end_block).get_sorted_entries()
}
(None, Some(blocks)) => {
let blocks = blocks
.split(',')
.map(|b| b.parse::<u64>().expect("unable to parse block"))
.collect::<Vec<_>>();
BlockHeights::Blocks(blocks).get_sorted_entries()
}
_ => unreachable!(),
};
blocks.unwrap().into()
}
}
#[derive(Subcommand, PartialEq, Clone, Debug)]
#[clap(bin_name = "config", aliases = &["config"])]
enum ConfigCommand {
/// Generate new config
#[clap(name = "new", bin_name = "new", aliases = &["generate"])]
New(NewConfig),
}
#[derive(Parser, PartialEq, Clone, Debug)]
struct NewConfig {
/// Target Regtest network
#[clap(
long = "regtest",
conflicts_with = "testnet",
conflicts_with = "mainnet"
)]
pub regtest: bool,
/// Target Testnet network
#[clap(
long = "testnet",
conflicts_with = "regtest",
conflicts_with = "mainnet"
)]
pub testnet: bool,
/// Target Mainnet network
#[clap(
long = "mainnet",
conflicts_with = "testnet",
conflicts_with = "regtest"
)]
pub mainnet: bool,
}
#[derive(Subcommand, PartialEq, Clone, Debug)]
enum ServiceCommand {
/// Start chainhook-cli
#[clap(name = "start", bin_name = "start")]
Start(StartCommand),
}
#[derive(Parser, PartialEq, Clone, Debug)]
struct StartCommand {
/// Target Regtest network
#[clap(
long = "regtest",
conflicts_with = "testnet",
conflicts_with = "mainnet"
)]
pub regtest: bool,
/// Target Testnet network
#[clap(
long = "testnet",
conflicts_with = "regtest",
conflicts_with = "mainnet"
)]
pub testnet: bool,
/// Target Mainnet network
#[clap(
long = "mainnet",
conflicts_with = "testnet",
conflicts_with = "regtest"
)]
pub mainnet: bool,
/// Load config file path
#[clap(
long = "config-path",
conflicts_with = "mainnet",
conflicts_with = "testnet",
conflicts_with = "regtest"
)]
pub config_path: Option<String>,
/// Check blocks integrity
#[clap(long = "check-blocks-integrity")]
pub block_integrity_check: bool,
}
#[derive(Subcommand, PartialEq, Clone, Debug)]
enum IndexCommand {
/// Initialize a new ordhook db
#[clap(name = "new", bin_name = "new")]
New(SyncOrdhookDbCommand),
/// Catch-up ordhook db
#[clap(name = "sync", bin_name = "sync")]
Sync(SyncOrdhookDbCommand),
/// Rebuild inscriptions entries for a given block
#[clap(name = "drop", bin_name = "drop")]
Drop(DropOrdhookDbCommand),
/// Check integrity
#[clap(name = "check", bin_name = "check")]
Check(CheckDbCommand),
/// Db maintenance related commands
#[clap(subcommand)]
Repair(RepairCommand),
}
#[derive(Parser, PartialEq, Clone, Debug)]
struct UpdateOrdhookDbCommand {
/// Starting block
pub start_block: u64,
/// Ending block
pub end_block: u64,
/// Load config file path
#[clap(long = "config-path")]
pub config_path: Option<String>,
/// Transfers only
pub transfers_only: Option<bool>,
}
#[derive(Parser, PartialEq, Clone, Debug)]
struct SyncOrdhookDbCommand {
/// Load config file path
#[clap(long = "config-path")]
pub config_path: Option<String>,
}
#[derive(Parser, PartialEq, Clone, Debug)]
struct DropOrdhookDbCommand {
pub blocks: u32,
/// Load config file path
#[clap(long = "config-path")]
pub config_path: Option<String>,
}
#[derive(Parser, PartialEq, Clone, Debug)]
struct PatchOrdhookDbCommand {
/// Load config file path
#[clap(long = "config-path")]
pub config_path: Option<String>,
}
#[derive(Parser, PartialEq, Clone, Debug)]
struct MigrateOrdhookDbCommand {
/// Load config file path
#[clap(long = "config-path")]
pub config_path: Option<String>,
}
#[derive(Parser, PartialEq, Clone, Debug)]
struct CheckDbCommand {
/// Starting block
pub start_block: u64,
/// Ending block
pub end_block: u64,
/// Load config file path
#[clap(long = "config-path")]
pub config_path: Option<String>,
}
pub fn main() {
let logger = hiro_system_kit::log::setup_logger();
let _guard = hiro_system_kit::log::setup_global_logger(logger.clone());
let ctx = Context {
logger: Some(logger),
tracer: false,
};
let opts: Opts = match Opts::try_parse() {
Ok(opts) => opts,
Err(e) => {
println!("{}", e);
process::exit(1);
}
};
if let Err(e) = hiro_system_kit::nestable_block_on(handle_command(opts, &ctx)) {
error!(ctx.expect_logger(), "{e}");
std::thread::sleep(std::time::Duration::from_millis(500));
process::exit(1);
}
}
async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
match opts.command {
Command::Service(subcmd) => match subcmd {
ServiceCommand::Start(cmd) => {
let maintenance_enabled =
std::env::var("ORDHOOK_MAINTENANCE").unwrap_or("0".into());
if maintenance_enabled.eq("1") {
try_info!(ctx, "Entering maintenance mode. Unset ORDHOOK_MAINTENANCE and reboot to resume operations");
sleep(Duration::from_secs(u64::MAX))
}
let config = ConfigFile::default(
cmd.regtest,
cmd.testnet,
cmd.mainnet,
&cmd.config_path,
&None,
)?;
migrate_dbs(&config, ctx).await?;
let mut service = Service::new(&config, ctx);
// TODO(rafaelcr): This only works if there's a rocksdb file already containing blocks previous to the first
// inscription height.
let start_block = service.get_index_chain_tip().await?;
try_info!(ctx, "Index chain tip is at #{start_block}");
return service.run(cmd.block_integrity_check).await;
}
},
Command::Config(subcmd) => match subcmd {
ConfigCommand::New(cmd) => {
use std::fs::File;
use std::io::Write;
let config =
ConfigFile::default(cmd.regtest, cmd.testnet, cmd.mainnet, &None, &None)?;
let config_content = generate_config(&config.network.bitcoin_network);
let mut file_path = PathBuf::new();
file_path.push("Ordhook.toml");
let mut file = File::create(&file_path)
.map_err(|e| format!("unable to open file {}\n{}", file_path.display(), e))?;
file.write_all(config_content.as_bytes())
.map_err(|e| format!("unable to write file {}\n{}", file_path.display(), e))?;
println!("Created file Ordhook.toml");
}
},
Command::Index(IndexCommand::New(cmd)) => {
let config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
migrate_dbs(&config, ctx).await?;
open_blocks_db_with_retry(true, &config, ctx);
}
Command::Index(IndexCommand::Sync(cmd)) => {
let config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
migrate_dbs(&config, ctx).await?;
let service = Service::new(&config, ctx);
service.catch_up_to_bitcoin_chain_tip().await?;
}
Command::Index(IndexCommand::Repair(subcmd)) => match subcmd {
RepairCommand::Blocks(cmd) => {
let mut config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
if let Some(network_threads) = cmd.network_threads {
config.resources.bitcoind_rpc_threads = network_threads;
}
let blocks = cmd.get_blocks();
let block_ingestion_processor =
start_block_archiving_processor(&config, ctx, false, None);
bitcoind_download_blocks(
&config,
blocks,
first_inscription_height(&config),
&block_ingestion_processor,
10_000,
ctx,
)
.await?;
if let Some(true) = cmd.debug {
let blocks_db = open_blocks_db_with_retry(false, &config, ctx);
for i in cmd.get_blocks().into_iter() {
let block_bytes =
find_block_bytes_at_block_height(i as u32, 10, &blocks_db, ctx)
.expect("unable to retrieve block {i}");
let block = BlockBytesCursor::new(&block_bytes);
info!(ctx.expect_logger(), "--------------------");
info!(ctx.expect_logger(), "Block: {i}");
for tx in block.iter_tx() {
info!(ctx.expect_logger(), "Tx: {}", hex::encode(tx.txid));
}
}
}
}
},
Command::Index(IndexCommand::Check(cmd)) => {
let config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
{
let blocks_db = open_readonly_blocks_db(&config, ctx)?;
let tip = find_last_block_inserted(&blocks_db);
println!("Tip: {}", tip);
let missing_blocks = find_missing_blocks(&blocks_db, 1, tip, ctx);
println!("{:?}", missing_blocks);
}
}
Command::Index(IndexCommand::Drop(cmd)) => {
let config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
let service = Service::new(&config, ctx);
let chain_tip = service.get_index_chain_tip().await?;
println!("Index chain tip is at #{chain_tip}");
println!(
"{} blocks will be dropped. New index chain tip will be at #{}. Confirm? [Y/n]",
cmd.blocks,
chain_tip - cmd.blocks as u64
);
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).unwrap();
if buffer.starts_with('n') {
return Err("Deletion aborted".to_string());
}
let service = Service::new(&config, ctx);
let block_heights: Vec<u64> = ((chain_tip - cmd.blocks as u64)..=chain_tip).collect();
service.rollback(&block_heights).await?;
println!("{} blocks dropped", cmd.blocks);
}
Command::Database(DatabaseCommand::Migrate(cmd)) => {
let config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
migrate_dbs(&config, ctx).await?;
}
Command::Database(DatabaseCommand::Reset(cmd)) => {
let config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
println!(
"WARNING: This operation will delete ALL index data and cannot be undone. Confirm? [Y/n]"
);
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).unwrap();
if buffer.to_lowercase().starts_with('n') {
return Err("Aborted".to_string());
}
reset_dbs(&config, ctx).await?;
}
}
Ok(())
}

View File

@@ -1,240 +0,0 @@
use chainhook_types::{BitcoinBlockSignaling, BitcoinNetwork};
use chainhook_sdk::indexer::IndexerConfig;
use ordhook::config::{
Config, LogConfig, MetaProtocolsConfig, ResourcesConfig, SnapshotConfig,
SnapshotConfigDownloadUrls, StorageConfig, DEFAULT_BITCOIND_RPC_THREADS,
DEFAULT_BITCOIND_RPC_TIMEOUT, DEFAULT_BRC20_LRU_CACHE_SIZE, DEFAULT_MEMORY_AVAILABLE,
DEFAULT_ULIMIT,
};
use std::fs::File;
use std::io::{BufReader, Read};
#[derive(Deserialize, Debug, Clone)]
pub struct ConfigFile {
pub storage: StorageConfigFile,
pub ordinals_db: PostgresConfigFile,
pub brc20_db: Option<PostgresConfigFile>,
pub http_api: Option<PredicatesApiConfigFile>,
pub resources: ResourcesConfigFile,
pub network: NetworkConfigFile,
pub logs: Option<LogConfigFile>,
pub snapshot: Option<SnapshotConfigFile>,
pub meta_protocols: Option<MetaProtocolsConfigFile>,
}
impl ConfigFile {
pub fn from_file_path(file_path: &str) -> Result<Config, String> {
let file = File::open(file_path)
.map_err(|e| format!("unable to read file {}\n{:?}", file_path, e))?;
let mut file_reader = BufReader::new(file);
let mut file_buffer = vec![];
file_reader
.read_to_end(&mut file_buffer)
.map_err(|e| format!("unable to read file {}\n{:?}", file_path, e))?;
let config_file: ConfigFile = match toml::from_slice(&file_buffer) {
Ok(s) => s,
Err(e) => {
return Err(format!("Config file malformatted {}", e));
}
};
ConfigFile::from_config_file(config_file)
}
pub fn from_config_file(config_file: ConfigFile) -> Result<Config, String> {
let bitcoin_network = match config_file.network.mode.as_str() {
"devnet" => BitcoinNetwork::Regtest,
"testnet" => BitcoinNetwork::Testnet,
"mainnet" => BitcoinNetwork::Mainnet,
"signet" => BitcoinNetwork::Signet,
_ => return Err("network.mode not supported".to_string()),
};
let snapshot = match config_file.snapshot {
Some(bootstrap) => match bootstrap.ordinals_url {
Some(ref url) => SnapshotConfig::Download(SnapshotConfigDownloadUrls {
ordinals: url.to_string(),
brc20: bootstrap.brc20_url,
}),
None => SnapshotConfig::Build,
},
None => SnapshotConfig::Build,
};
let config = Config {
storage: StorageConfig {
working_dir: config_file.storage.working_dir.unwrap_or("ordhook".into()),
observers_working_dir: config_file
.storage
.observers_working_dir
.unwrap_or("observers".into()),
},
ordinals_db: ordhook::config::PgConnectionConfig {
dbname: config_file.ordinals_db.database,
host: config_file.ordinals_db.host,
port: config_file.ordinals_db.port,
user: config_file.ordinals_db.username,
password: config_file.ordinals_db.password,
search_path: config_file.ordinals_db.search_path,
pool_max_size: config_file.ordinals_db.pool_max_size,
},
brc20_db: match config_file.brc20_db {
Some(brc20_db) => Some(ordhook::config::PgConnectionConfig {
dbname: brc20_db.database,
host: brc20_db.host,
port: brc20_db.port,
user: brc20_db.username,
password: brc20_db.password,
search_path: brc20_db.search_path,
pool_max_size: brc20_db.pool_max_size,
}),
None => None,
},
snapshot,
resources: ResourcesConfig {
ulimit: config_file.resources.ulimit.unwrap_or(DEFAULT_ULIMIT),
cpu_core_available: config_file
.resources
.cpu_core_available
.unwrap_or(num_cpus::get()),
memory_available: config_file
.resources
.memory_available
.unwrap_or(DEFAULT_MEMORY_AVAILABLE),
bitcoind_rpc_threads: config_file
.resources
.bitcoind_rpc_threads
.unwrap_or(DEFAULT_BITCOIND_RPC_THREADS),
bitcoind_rpc_timeout: config_file
.resources
.bitcoind_rpc_timeout
.unwrap_or(DEFAULT_BITCOIND_RPC_TIMEOUT),
expected_observers_count: config_file
.resources
.expected_observers_count
.unwrap_or(1),
brc20_lru_cache_size: config_file
.resources
.brc20_lru_cache_size
.unwrap_or(DEFAULT_BRC20_LRU_CACHE_SIZE),
},
network: IndexerConfig {
bitcoind_rpc_url: config_file.network.bitcoind_rpc_url.to_string(),
bitcoind_rpc_username: config_file.network.bitcoind_rpc_username.to_string(),
bitcoind_rpc_password: config_file.network.bitcoind_rpc_password.to_string(),
bitcoin_block_signaling: match config_file.network.bitcoind_zmq_url {
Some(ref zmq_url) => BitcoinBlockSignaling::ZeroMQ(zmq_url.clone()),
None => BitcoinBlockSignaling::ZeroMQ("".to_string()),
},
bitcoin_network,
prometheus_monitoring_port: config_file.network.prometheus_monitoring_port,
},
logs: LogConfig {
ordinals_internals: config_file
.logs
.as_ref()
.and_then(|l| l.ordinals_internals)
.unwrap_or(true),
chainhook_internals: config_file
.logs
.as_ref()
.and_then(|l| l.chainhook_internals)
.unwrap_or(true),
},
meta_protocols: MetaProtocolsConfig {
brc20: config_file
.meta_protocols
.as_ref()
.and_then(|l| l.brc20)
.unwrap_or(false),
},
};
Ok(config)
}
pub fn default(
devnet: bool,
testnet: bool,
mainnet: bool,
config_path: &Option<String>,
meta_protocols: &Option<String>,
) -> Result<Config, String> {
let mut config = match (devnet, testnet, mainnet, config_path) {
(true, false, false, _) => Config::devnet_default(),
(false, true, false, _) => Config::testnet_default(),
(false, false, true, _) => Config::mainnet_default(),
(false, false, false, Some(config_path)) => ConfigFile::from_file_path(config_path)?,
_ => Err("Invalid combination of arguments".to_string())?,
};
if let Some(meta_protocols) = meta_protocols {
match meta_protocols.as_str() {
"brc20" => config.meta_protocols.brc20 = true,
_ => Err("Invalid meta protocol".to_string())?,
}
}
Ok(config)
}
}
#[derive(Deserialize, Debug, Clone)]
pub struct LogConfigFile {
pub ordinals_internals: Option<bool>,
pub chainhook_internals: Option<bool>,
}
#[derive(Deserialize, Clone, Debug)]
pub struct PostgresConfigFile {
pub database: String,
pub host: String,
pub port: u16,
pub username: String,
pub password: Option<String>,
pub search_path: Option<String>,
pub pool_max_size: Option<usize>,
}
#[derive(Deserialize, Debug, Clone)]
pub struct StorageConfigFile {
pub working_dir: Option<String>,
pub observers_working_dir: Option<String>,
}
#[derive(Deserialize, Debug, Clone)]
pub struct PredicatesApiConfigFile {
pub http_port: Option<u16>,
pub database_uri: Option<String>,
pub display_logs: Option<bool>,
pub disabled: Option<bool>,
}
#[derive(Deserialize, Debug, Clone)]
pub struct SnapshotConfigFile {
pub ordinals_url: Option<String>,
pub brc20_url: Option<String>,
}
#[derive(Deserialize, Debug, Clone)]
pub struct MetaProtocolsConfigFile {
pub brc20: Option<bool>,
}
#[derive(Deserialize, Debug, Clone)]
pub struct ResourcesConfigFile {
pub ulimit: Option<usize>,
pub cpu_core_available: Option<usize>,
pub memory_available: Option<usize>,
pub bitcoind_rpc_threads: Option<usize>,
pub bitcoind_rpc_timeout: Option<u32>,
pub expected_observers_count: Option<usize>,
pub brc20_lru_cache_size: Option<usize>,
}
#[derive(Deserialize, Debug, Clone)]
pub struct NetworkConfigFile {
pub mode: String,
pub bitcoind_rpc_url: String,
pub bitcoind_rpc_username: String,
pub bitcoind_rpc_password: String,
pub bitcoind_zmq_url: Option<String>,
pub prometheus_monitoring_port: Option<u16>,
}

View File

@@ -1,50 +0,0 @@
use chainhook_types::BitcoinNetwork;
pub fn generate_config(network: &BitcoinNetwork) -> String {
let network = format!("{:?}", network);
let conf = format!(
r#"[storage]
working_dir = "ordhook"
# The Http Api allows you to register / deregister
# dynamically predicates.
# Disable by default.
#
# [http_api]
# http_port = 20456
[network]
mode = "{network}"
bitcoind_rpc_url = "http://0.0.0.0:8332"
bitcoind_rpc_username = "devnet"
bitcoind_rpc_password = "devnet"
# Bitcoin block events can be received by Chainhook
# either through a Bitcoin node's ZeroMQ interface,
# or through the Stacks node. Zmq is being
# used by default:
bitcoind_zmq_url = "tcp://0.0.0.0:18543"
# but stacks can also be used:
# stacks_node_rpc_url = "http://0.0.0.0:20443"
[resources]
ulimit = 2048
cpu_core_available = 16
memory_available = 32
bitcoind_rpc_threads = 4
bitcoind_rpc_timeout = 15
expected_observers_count = 1
# Disable the following section if the state
# must be built locally
[snapshot]
ordinals_url = "https://archive.hiro.so/mainnet/ordhook/mainnet-ordhook-sqlite-latest"
brc20_url = "https://archive.hiro.so/mainnet/ordhook/mainnet-ordhook-brc20-latest"
[logs]
ordinals_internals = true
chainhook_internals = true
"#,
network = network.to_lowercase(),
);
conf
}

View File

@@ -1,2 +0,0 @@
pub mod file;
pub mod generator;

View File

@@ -11,6 +11,7 @@ serde_derive = "1"
hex = "0.4.3"
rand = "0.9.0"
lru = "0.13.0"
config = { path = "../config" }
bitcoin = { workspace = true }
chainhook-sdk = { path = "../chainhook-sdk" }
chainhook-types = { path = "../chainhook-types-rs" }
@@ -20,7 +21,7 @@ reqwest = { version = "0.11", default-features = false, features = [
"json",
"rustls-tls",
] }
tokio = { version = "1.35.1", features = ["full"] }
tokio = { workspace = true }
futures-util = "0.3.24"
flate2 = "1.0.24"
tar = "0.4.38"

View File

@@ -1,274 +0,0 @@
pub use chainhook_postgres::PgConnectionConfig;
use chainhook_sdk::{indexer::IndexerConfig, observer::EventObserverConfig};
use chainhook_types::{BitcoinBlockSignaling, BitcoinNetwork};
use std::path::PathBuf;
const DEFAULT_MAINNET_ORDINALS_SQLITE_ARCHIVE: &str =
"https://archive.hiro.so/mainnet/ordhook/mainnet-ordhook-sqlite-latest";
const DEFAULT_MAINNET_BRC20_SQLITE_ARCHIVE: &str =
"https://archive.hiro.so/mainnet/ordhook/mainnet-ordhook-brc20-latest";
pub const DEFAULT_INGESTION_PORT: u16 = 20455;
pub const DEFAULT_ULIMIT: usize = 2048;
pub const DEFAULT_MEMORY_AVAILABLE: usize = 8;
pub const DEFAULT_BITCOIND_RPC_THREADS: usize = 4;
pub const DEFAULT_BITCOIND_RPC_TIMEOUT: u32 = 15;
pub const DEFAULT_BRC20_LRU_CACHE_SIZE: usize = 50_000;
#[derive(Clone, Debug)]
pub struct Config {
pub storage: StorageConfig,
pub ordinals_db: PgConnectionConfig,
pub brc20_db: Option<PgConnectionConfig>,
pub resources: ResourcesConfig,
pub network: IndexerConfig,
pub snapshot: SnapshotConfig,
pub meta_protocols: MetaProtocolsConfig,
pub logs: LogConfig,
}
#[derive(Clone, Debug)]
pub struct MetaProtocolsConfig {
pub brc20: bool,
}
#[derive(Clone, Debug)]
pub struct LogConfig {
pub ordinals_internals: bool,
pub chainhook_internals: bool,
}
#[derive(Clone, Debug)]
pub struct StorageConfig {
pub working_dir: String,
pub observers_working_dir: String,
}
#[derive(Clone, Debug)]
pub struct SnapshotConfigDownloadUrls {
pub ordinals: String,
pub brc20: Option<String>,
}
#[derive(Clone, Debug)]
pub enum SnapshotConfig {
Build,
Download(SnapshotConfigDownloadUrls),
}
#[derive(Clone, Debug)]
pub struct PathConfig {
pub file_path: PathBuf,
}
#[derive(Clone, Debug)]
pub struct UrlConfig {
pub file_url: String,
}
#[derive(Deserialize, Debug, Clone)]
pub struct ResourcesConfig {
pub ulimit: usize,
pub cpu_core_available: usize,
pub memory_available: usize,
pub bitcoind_rpc_threads: usize,
pub bitcoind_rpc_timeout: u32,
pub expected_observers_count: usize,
pub brc20_lru_cache_size: usize,
}
impl ResourcesConfig {
pub fn get_optimal_thread_pool_capacity(&self) -> usize {
// Generally speaking when dealing a pool, we need one thread for
// feeding the thread pool and eventually another thread for
// handling the "reduce" step.
self.cpu_core_available.saturating_sub(2).max(1)
}
}
impl Config {
pub fn get_event_observer_config(&self) -> EventObserverConfig {
EventObserverConfig {
bitcoind_rpc_username: self.network.bitcoind_rpc_username.clone(),
bitcoind_rpc_password: self.network.bitcoind_rpc_password.clone(),
bitcoind_rpc_url: self.network.bitcoind_rpc_url.clone(),
bitcoin_block_signaling: self.network.bitcoin_block_signaling.clone(),
bitcoin_network: self.network.bitcoin_network.clone(),
}
}
pub fn should_bootstrap_through_download(&self) -> bool {
match &self.snapshot {
SnapshotConfig::Build => false,
SnapshotConfig::Download(_) => true,
}
}
pub fn expected_cache_path(&self) -> PathBuf {
let mut destination_path = PathBuf::new();
destination_path.push(&self.storage.working_dir);
destination_path
}
pub fn expected_observers_cache_path(&self) -> PathBuf {
let mut destination_path = PathBuf::new();
destination_path.push(&self.storage.observers_working_dir);
destination_path
}
pub fn devnet_default() -> Config {
Config {
storage: StorageConfig {
working_dir: default_cache_path(),
observers_working_dir: default_observers_cache_path(),
},
ordinals_db: PgConnectionConfig {
dbname: "ordinals".to_string(),
host: "localhost".to_string(),
port: 5432,
user: "postgres".to_string(),
password: Some("postgres".to_string()),
search_path: None,
pool_max_size: None,
},
brc20_db: None,
snapshot: SnapshotConfig::Build,
resources: ResourcesConfig {
cpu_core_available: num_cpus::get(),
memory_available: DEFAULT_MEMORY_AVAILABLE,
ulimit: DEFAULT_ULIMIT,
bitcoind_rpc_threads: DEFAULT_BITCOIND_RPC_THREADS,
bitcoind_rpc_timeout: DEFAULT_BITCOIND_RPC_TIMEOUT,
expected_observers_count: 1,
brc20_lru_cache_size: DEFAULT_BRC20_LRU_CACHE_SIZE,
},
network: IndexerConfig {
bitcoind_rpc_url: "http://0.0.0.0:18443".into(),
bitcoind_rpc_username: "devnet".into(),
bitcoind_rpc_password: "devnet".into(),
bitcoin_block_signaling: BitcoinBlockSignaling::ZeroMQ(
"http://0.0.0.0:18543".into(),
),
bitcoin_network: BitcoinNetwork::Regtest,
prometheus_monitoring_port: None,
},
logs: LogConfig {
ordinals_internals: true,
chainhook_internals: false,
},
meta_protocols: MetaProtocolsConfig { brc20: false },
}
}
pub fn testnet_default() -> Config {
Config {
storage: StorageConfig {
working_dir: default_cache_path(),
observers_working_dir: default_observers_cache_path(),
},
ordinals_db: PgConnectionConfig {
dbname: "ordinals".to_string(),
host: "localhost".to_string(),
port: 5432,
user: "postgres".to_string(),
password: Some("postgres".to_string()),
search_path: None,
pool_max_size: None,
},
brc20_db: None,
snapshot: SnapshotConfig::Build,
resources: ResourcesConfig {
cpu_core_available: num_cpus::get(),
memory_available: DEFAULT_MEMORY_AVAILABLE,
ulimit: DEFAULT_ULIMIT,
bitcoind_rpc_threads: DEFAULT_BITCOIND_RPC_THREADS,
bitcoind_rpc_timeout: DEFAULT_BITCOIND_RPC_TIMEOUT,
expected_observers_count: 1,
brc20_lru_cache_size: DEFAULT_BRC20_LRU_CACHE_SIZE,
},
network: IndexerConfig {
bitcoind_rpc_url: "http://0.0.0.0:18332".into(),
bitcoind_rpc_username: "devnet".into(),
bitcoind_rpc_password: "devnet".into(),
bitcoin_block_signaling: BitcoinBlockSignaling::ZeroMQ(
"http://0.0.0.0:18543".into(),
),
bitcoin_network: BitcoinNetwork::Testnet,
prometheus_monitoring_port: Some(9153),
},
logs: LogConfig {
ordinals_internals: true,
chainhook_internals: false,
},
meta_protocols: MetaProtocolsConfig { brc20: false },
}
}
pub fn mainnet_default() -> Config {
Config {
storage: StorageConfig {
working_dir: default_cache_path(),
observers_working_dir: default_observers_cache_path(),
},
ordinals_db: PgConnectionConfig {
dbname: "ordinals".to_string(),
host: "localhost".to_string(),
port: 5432,
user: "postgres".to_string(),
password: Some("postgres".to_string()),
search_path: None,
pool_max_size: None,
},
brc20_db: None,
snapshot: SnapshotConfig::Download(SnapshotConfigDownloadUrls {
ordinals: DEFAULT_MAINNET_ORDINALS_SQLITE_ARCHIVE.to_string(),
brc20: Some(DEFAULT_MAINNET_BRC20_SQLITE_ARCHIVE.to_string()),
}),
resources: ResourcesConfig {
cpu_core_available: num_cpus::get(),
memory_available: DEFAULT_MEMORY_AVAILABLE,
ulimit: DEFAULT_ULIMIT,
bitcoind_rpc_threads: DEFAULT_BITCOIND_RPC_THREADS,
bitcoind_rpc_timeout: DEFAULT_BITCOIND_RPC_TIMEOUT,
expected_observers_count: 1,
brc20_lru_cache_size: DEFAULT_BRC20_LRU_CACHE_SIZE,
},
network: IndexerConfig {
bitcoind_rpc_url: "http://0.0.0.0:8332".into(),
bitcoind_rpc_username: "devnet".into(),
bitcoind_rpc_password: "devnet".into(),
bitcoin_block_signaling: BitcoinBlockSignaling::ZeroMQ(
"http://0.0.0.0:18543".into(),
),
bitcoin_network: BitcoinNetwork::Mainnet,
prometheus_monitoring_port: Some(9153),
},
logs: LogConfig {
ordinals_internals: true,
chainhook_internals: false,
},
meta_protocols: MetaProtocolsConfig { brc20: false },
}
}
#[cfg(test)]
pub fn test_default() -> Config {
let mut config = Self::mainnet_default();
config.storage.working_dir = "tmp".to_string();
config.resources.bitcoind_rpc_threads = 1;
config.resources.cpu_core_available = 1;
config
}
}
pub fn default_cache_path() -> String {
let mut cache_path = std::env::current_dir().expect("unable to get current dir");
cache_path.push("ordhook");
format!("{}", cache_path.display())
}
pub fn default_observers_cache_path() -> String {
let mut cache_path = std::env::current_dir().expect("unable to get current dir");
cache_path.push("observers");
format!("{}", cache_path.display())
}

View File

@@ -8,13 +8,12 @@ use chainhook_types::{
BlockIdentifier, OrdinalInscriptionRevealData, OrdinalInscriptionTransferData,
TransactionIdentifier,
};
use config::Config;
use deadpool_postgres::GenericClient;
use lru::LruCache;
use maplit::hashmap;
use crate::{
config::Config, core::protocol::satoshi_tracking::parse_output_and_offset_from_satpoint,
};
use crate::core::protocol::satoshi_tracking::parse_output_and_offset_from_satpoint;
use super::{
brc20_pg,
@@ -24,11 +23,13 @@ use super::{
/// If the given `config` has BRC-20 enabled, returns a BRC-20 memory cache.
pub fn brc20_new_cache(config: &Config) -> Option<Brc20MemoryCache> {
if config.meta_protocols.brc20 {
Some(Brc20MemoryCache::new(config.resources.brc20_lru_cache_size))
} else {
None
let Some(brc20) = config.ordinals_brc20_config() else {
return None;
};
if !brc20.enabled {
return None;
}
Some(Brc20MemoryCache::new(brc20.lru_cache_size))
}
/// Keeps BRC20 DB rows before they're inserted into Postgres. Use `flush` to insert.

View File

@@ -4,17 +4,17 @@ pub mod protocol;
#[cfg(test)]
pub mod test_builders;
use bitcoin::Network;
use chainhook_postgres::pg_pool_client;
use config::Config;
use dashmap::DashMap;
use fxhash::{FxBuildHasher, FxHasher};
use std::hash::BuildHasherDefault;
use std::ops::Div;
use chainhook_sdk::utils::Context;
use chainhook_types::BitcoinNetwork;
use crate::{
config::Config,
db::{
blocks::{
find_last_block_inserted, find_pinned_block_bytes_at_block_height,
@@ -23,16 +23,18 @@ use crate::{
cursor::TransactionBytesCursor,
ordinals_pg,
},
service::PgConnectionPools
service::PgConnectionPools,
};
use chainhook_sdk::utils::bitcoind::bitcoind_get_block_height;
pub fn first_inscription_height(config: &Config) -> u64 {
match config.network.bitcoin_network {
BitcoinNetwork::Mainnet => 767430,
BitcoinNetwork::Regtest => 1,
BitcoinNetwork::Testnet => 2413343,
BitcoinNetwork::Signet => 112402,
match config.bitcoind.network {
Network::Bitcoin => 767430,
Network::Regtest => 1,
Network::Testnet => 2413343,
Network::Testnet4 => 0,
Network::Signet => 112402,
_ => unreachable!(),
}
}
@@ -164,7 +166,7 @@ pub async fn should_sync_ordinals_db(
};
// TODO: Gracefully handle Regtest, Testnet and Signet
let end_block = bitcoind_get_block_height(&config.network, ctx);
let end_block = bitcoind_get_block_height(&config.bitcoind, ctx);
let (mut end_block, speed) = if start_block < 200_000 {
(end_block.min(200_000), 10_000)
} else if start_block < 550_000 {

View File

@@ -1,15 +1,14 @@
pub mod processors;
use chainhook_sdk::observer::BitcoinConfig;
use chainhook_sdk::utils::Context;
use chainhook_types::BitcoinBlockData;
use chainhook_types::{BitcoinBlockData, BitcoinNetwork};
use config::Config;
use crossbeam_channel::bounded;
use std::collections::{HashMap, VecDeque};
use std::thread::{sleep, JoinHandle};
use std::time::Duration;
use tokio::task::JoinSet;
use crate::config::Config;
use crate::db::cursor::BlockBytesCursor;
use crate::{try_debug, try_info};
@@ -44,20 +43,12 @@ pub async fn bitcoind_download_blocks(
speed: usize,
ctx: &Context,
) -> Result<(), String> {
let bitcoin_config = BitcoinConfig {
username: config.network.bitcoind_rpc_username.clone(),
password: config.network.bitcoind_rpc_password.clone(),
rpc_url: config.network.bitcoind_rpc_url.clone(),
network: config.network.bitcoin_network.clone(),
bitcoin_block_signaling: config.network.bitcoin_block_signaling.clone(),
};
let number_of_blocks_to_process = blocks.len() as u64;
let (block_compressed_tx, block_compressed_rx) = crossbeam_channel::bounded(speed);
let http_client = build_http_client();
let moved_config = bitcoin_config.clone();
let moved_config = config.bitcoind.clone();
let moved_ctx = ctx.clone();
let moved_http_client = http_client.clone();
@@ -99,7 +90,7 @@ pub async fn bitcoind_download_blocks(
}
let moved_ctx: Context = ctx.clone();
let moved_bitcoin_network = bitcoin_config.network.clone();
let moved_bitcoin_network = config.bitcoind.network.clone();
let mut tx_thread_pool = vec![];
let mut rx_thread_pool = vec![];
@@ -127,7 +118,7 @@ pub async fn bitcoind_download_blocks(
let block_data = if block_height >= start_sequencing_blocks_at_height {
let block = standardize_bitcoin_block(
raw_block_data,
&moved_bitcoin_network,
&BitcoinNetwork::from_network(moved_bitcoin_network),
&moved_ctx,
)
.expect("unable to deserialize block");

View File

@@ -1,5 +1,6 @@
use chainhook_sdk::utils::Context;
use chainhook_types::BitcoinBlockData;
use config::Config;
use crossbeam_channel::{Sender, TryRecvError};
use rocksdb::DB;
use std::{
@@ -8,7 +9,6 @@ use std::{
};
use crate::{
config::Config,
core::pipeline::{PostProcessorCommand, PostProcessorController, PostProcessorEvent},
db::blocks::{insert_entry_in_blocks, open_blocks_db_with_retry},
try_error, try_info,

View File

@@ -8,6 +8,7 @@ use std::{
use chainhook_postgres::{pg_begin, pg_pool_client};
use chainhook_sdk::utils::Context;
use chainhook_types::{BitcoinBlockData, TransactionIdentifier};
use config::Config;
use crossbeam_channel::TryRecvError;
use dashmap::DashMap;
@@ -25,8 +26,9 @@ use crate::{
protocol::{
inscription_parsing::parse_inscriptions_in_standardized_block,
inscription_sequencing::{
update_block_inscriptions_with_consensus_sequence_data, get_bitcoin_network, get_jubilee_block_height,
get_bitcoin_network, get_jubilee_block_height,
parallelize_inscription_data_computations,
update_block_inscriptions_with_consensus_sequence_data,
},
satoshi_numbering::TraversalResult,
satoshi_tracking::augment_block_with_transfers,
@@ -39,12 +41,9 @@ use crate::{
utils::monitoring::PrometheusMonitoring,
};
use crate::{
config::Config,
core::{
use crate::core::{
new_traversals_lazy_cache,
pipeline::{PostProcessorCommand, PostProcessorController, PostProcessorEvent},
},
};
pub fn start_inscription_indexing_processor(
@@ -224,7 +223,13 @@ pub async fn index_block(
ctx,
)?;
if has_inscription_reveals {
update_block_inscriptions_with_consensus_sequence_data(block, sequence_cursor, cache_l1, &ord_tx, ctx)
update_block_inscriptions_with_consensus_sequence_data(
block,
sequence_cursor,
cache_l1,
&ord_tx,
ctx,
)
.await?;
}
augment_block_with_transfers(block, &ord_tx, ctx).await?;
@@ -274,7 +279,7 @@ pub async fn index_block(
pub async fn rollback_block(
block_height: u64,
config: &Config,
_config: &Config,
pg_pools: &PgConnectionPools,
ctx: &Context,
) -> Result<(), String> {
@@ -286,7 +291,7 @@ pub async fn rollback_block(
ordinals_pg::rollback_block(block_height, &ord_tx).await?;
// BRC-20
if let (true, Some(brc20_pool)) = (config.meta_protocols.brc20, &pg_pools.brc20) {
if let Some(brc20_pool) = &pg_pools.brc20 {
let mut brc20_client = pg_pool_client(brc20_pool).await?;
let brc20_tx = pg_begin(&mut brc20_client).await?;

View File

@@ -6,11 +6,11 @@ use chainhook_types::{
OrdinalInscriptionCurseType, OrdinalInscriptionNumber, OrdinalInscriptionRevealData,
OrdinalOperation,
};
use config::Config;
use serde_json::json;
use std::collections::HashMap;
use std::str::FromStr;
use crate::config::Config;
use crate::core::meta_protocols::brc20::brc20_activation_height;
use crate::core::meta_protocols::brc20::parser::{parse_brc20_operation, ParsedBrc20Operation};
use crate::try_warn;
@@ -132,8 +132,8 @@ pub fn parse_inscriptions_from_standardized_tx(
tx.transaction_identifier.get_hash_bytes_str(),
) {
for (reveal, inscription) in inscriptions.into_iter() {
if config.meta_protocols.brc20
&& block_identifier.index >= brc20_activation_height(&network)
if let Some(brc20) = config.ordinals_brc20_config() {
if brc20.enabled && block_identifier.index >= brc20_activation_height(&network)
{
match parse_brc20_operation(&inscription) {
Ok(Some(op)) => {
@@ -145,6 +145,7 @@ pub fn parse_inscriptions_from_standardized_tx(
}
};
}
}
operations.push(OrdinalOperation::InscriptionRevealed(reveal));
}
}
@@ -176,11 +177,9 @@ mod test {
use chainhook_sdk::utils::Context;
use chainhook_types::OrdinalOperation;
use config::Config;
use crate::{
config::Config,
core::test_builders::{TestBlockBuilder, TestTransactionBuilder, TestTxInBuilder},
};
use crate::core::test_builders::{TestBlockBuilder, TestTransactionBuilder, TestTxInBuilder};
use super::parse_inscriptions_in_standardized_block;

View File

@@ -11,6 +11,7 @@ use chainhook_types::{
OrdinalInscriptionCurseType, OrdinalInscriptionTransferDestination, OrdinalOperation,
TransactionIdentifier,
};
use config::Config;
use crossbeam_channel::unbounded;
use dashmap::DashMap;
use deadpool_postgres::Transaction;
@@ -18,7 +19,6 @@ use fxhash::FxHasher;
use crate::core::protocol::satoshi_tracking::UNBOUND_INSCRIPTION_SATPOINT;
use crate::{
config::Config,
core::resolve_absolute_pointer,
db::{self, cursor::TransactionBytesCursor, ordinals_pg},
try_debug, try_error, try_info,
@@ -65,11 +65,7 @@ pub fn parallelize_inscription_data_computations(
config: &Config,
ctx: &Context,
) -> Result<bool, String> {
let inner_ctx = if config.logs.ordinals_internals {
ctx.clone()
} else {
Context::empty()
};
let inner_ctx = ctx.clone();
try_debug!(
inner_ctx,

View File

@@ -5,8 +5,8 @@ use fxhash::FxHasher;
use std::hash::BuildHasherDefault;
use std::sync::Arc;
use crate::config::Config;
use crate::db::blocks::find_pinned_block_bytes_at_block_height;
use config::Config;
use crate::db::cursor::{BlockBytesCursor, TransactionBytesCursor};
use crate::try_error;
@@ -319,7 +319,6 @@ mod test {
use fxhash::FxHasher;
use crate::{
config::Config,
core::{
new_traversals_lazy_cache,
test_builders::{TestBlockBuilder, TestTransactionBuilder, TestTxInBuilder},
@@ -330,6 +329,7 @@ mod test {
drop_all_dbs,
},
};
use config::Config;
use super::compute_satoshi_number;

View File

@@ -1,10 +1,11 @@
use std::{path::PathBuf, thread::sleep, time::Duration};
use chainhook_sdk::utils::Context;
use config::Config;
use rand::{rng, Rng};
use rocksdb::{DBPinnableSlice, Options, DB};
use crate::{config::Config, try_error, try_warn};
use crate::{try_error, try_warn};
fn get_default_blocks_db_path(base_dir: &PathBuf) -> PathBuf {
let mut destination_path = base_dir.clone();

View File

@@ -6,32 +6,39 @@ pub mod ordinals_pg;
use chainhook_postgres::pg_connect_with_retry;
use chainhook_sdk::utils::Context;
use config::Config;
use crate::{config::Config, core::meta_protocols::brc20::brc20_pg, try_info, try_warn};
use crate::{core::meta_protocols::brc20::brc20_pg, try_info, try_warn};
pub async fn migrate_dbs(config: &Config, ctx: &Context) -> Result<(), String> {
let Some(ordinals) = &config.ordinals else {
unreachable!()
};
{
try_info!(ctx, "Running ordinals DB migrations");
let mut pg_client = pg_connect_with_retry(&config.ordinals_db).await;
let mut pg_client = pg_connect_with_retry(&ordinals.db).await;
ordinals_pg::migrate(&mut pg_client).await?;
}
if let (Some(brc20_db), true) = (&config.brc20_db, config.meta_protocols.brc20) {
if let Some(brc20) = config.ordinals_brc20_config() {
try_info!(ctx, "Running brc20 DB migrations");
let mut pg_client = pg_connect_with_retry(&brc20_db).await;
let mut pg_client = pg_connect_with_retry(&brc20.db).await;
brc20_pg::migrate(&mut pg_client).await?;
}
Ok(())
}
pub async fn reset_dbs(config: &Config, ctx: &Context) -> Result<(), String> {
let Some(ordinals) = &config.ordinals else {
unreachable!()
};
{
try_warn!(ctx, "Resetting ordinals DB");
let mut pg_client = pg_connect_with_retry(&config.ordinals_db).await;
let mut pg_client = pg_connect_with_retry(&ordinals.db).await;
pg_reset_db(&mut pg_client).await?;
}
if let (Some(brc20_db), true) = (&config.brc20_db, config.meta_protocols.brc20) {
if let Some(brc20) = config.ordinals_brc20_config() {
try_warn!(ctx, "Resetting brc20 DB");
let mut pg_client = pg_connect_with_retry(&brc20_db).await;
let mut pg_client = pg_connect_with_retry(&brc20.db).await;
pg_reset_db(&mut pg_client).await?;
}
Ok(())
@@ -62,8 +69,8 @@ pub async fn pg_reset_db(pg_client: &mut tokio_postgres::Client) -> Result<(), S
}
#[cfg(test)]
pub fn pg_test_config() -> chainhook_postgres::PgConnectionConfig {
chainhook_postgres::PgConnectionConfig {
pub fn pg_test_config() -> config::PgDatabaseConfig {
config::PgDatabaseConfig {
dbname: "postgres".to_string(),
host: "localhost".to_string(),
port: 5432,

View File

@@ -1,229 +0,0 @@
use crate::config::{Config, SnapshotConfig};
use crate::utils::read_file_content_at_path;
use crate::{try_error, try_info, try_warn};
use chainhook_sdk::utils::Context;
use flate2::read::GzDecoder;
use futures_util::StreamExt;
use progressing::mapping::Bar as MappingBar;
use progressing::Baring;
use std::fs::{self, File};
use std::io::{self, Cursor};
use std::io::{Read, Write};
use std::path::PathBuf;
use tar::Archive;
/// Downloads and decompresses a remote `tar.gz` file.
pub async fn download_and_decompress_archive_file(
file_url: String,
file_name: &str,
config: &Config,
ctx: &Context,
) -> Result<(), String> {
let destination_dir_path = config.expected_cache_path();
std::fs::create_dir_all(&destination_dir_path).unwrap_or_else(|e| {
try_error!(ctx, "{e}");
});
try_info!(ctx, "=> {file_url}");
let res = reqwest::get(&file_url)
.await
.or(Err(format!("Failed to GET from '{}'", &file_url)))?;
// Download chunks
let (tx, rx) = flume::bounded(0);
if res.status() == reqwest::StatusCode::OK {
let limit = res.content_length().unwrap_or(10_000_000_000) as i64;
let archive_tmp_file = PathBuf::from(format!("{file_name}.tar.gz"));
let decoder_thread = std::thread::spawn(move || {
{
let input = ChannelRead::new(rx);
let mut decoder = GzDecoder::new(input);
let mut tmp = File::create(&archive_tmp_file).unwrap();
let mut buffer = [0; 512_000];
loop {
match decoder.read(&mut buffer) {
Ok(0) => break,
Ok(n) => {
if let Err(e) = tmp.write_all(&buffer[..n]) {
let err = format!(
"unable to update compressed archive: {}",
e.to_string()
);
return Err(err);
}
}
Err(e) => {
let err =
format!("unable to write compressed archive: {}", e.to_string());
return Err(err);
}
}
}
let _ = tmp.flush();
}
let archive_file = File::open(&archive_tmp_file).unwrap();
let mut archive = Archive::new(archive_file);
if let Err(e) = archive.unpack(&destination_dir_path) {
let err = format!("unable to decompress file: {}", e.to_string());
return Err(err);
}
let _ = fs::remove_file(archive_tmp_file);
Ok(())
});
let mut progress_bar = MappingBar::with_range(0i64, limit);
progress_bar.set_len(60);
let mut stdout = std::io::stdout();
if ctx.logger.is_some() {
print!("{}", progress_bar);
let _ = stdout.flush();
}
let mut stream = res.bytes_stream();
let mut progress = 0;
let mut steps = 0;
let mut tx_err = None;
while let Some(item) = stream.next().await {
let chunk = item.or(Err(format!("Error while downloading file")))?;
if chunk.is_empty() {
continue;
}
progress += chunk.len() as i64;
steps += chunk.len() as i64;
if steps > 5_000_000 {
steps = 0;
}
progress_bar.set(progress);
if steps == 0 {
if ctx.logger.is_some() {
print!("\r{}", progress_bar);
let _ = stdout.flush();
}
}
if let Err(e) = tx.send_async(chunk.to_vec()).await {
let err = format!("unable to download archive: {}", e.to_string());
tx_err = Some(err);
break;
}
}
progress_bar.set(limit);
if ctx.logger.is_some() {
print!("\r{}", progress_bar);
let _ = stdout.flush();
println!();
}
drop(tx);
decoder_thread.join().unwrap()?;
if let Some(_e) = tx_err.take() {}
}
Ok(())
}
// Wrap a channel into something that impls `io::Read`
struct ChannelRead {
rx: flume::Receiver<Vec<u8>>,
current: Cursor<Vec<u8>>,
}
impl ChannelRead {
fn new(rx: flume::Receiver<Vec<u8>>) -> ChannelRead {
ChannelRead {
rx,
current: Cursor::new(vec![]),
}
}
}
impl Read for ChannelRead {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.current.position() == self.current.get_ref().len() as u64 {
// We've exhausted the previous chunk, get a new one.
if let Ok(vec) = self.rx.recv() {
self.current = io::Cursor::new(vec);
}
// If recv() "fails", it means the sender closed its part of
// the channel, which means EOF. Propagate EOF by allowing
// a read from the exhausted cursor.
}
self.current.read(buf)
}
}
/// Compares the SHA256 of a previous local archive to the latest remote archive and downloads if required.
async fn validate_or_download_archive_file(
snapshot_url: &String,
file_name: &str,
config: &Config,
ctx: &Context,
) {
let remote_archive_url = format!("{snapshot_url}.tar.gz");
let remote_sha_url = format!("{snapshot_url}.sha256");
let mut local_sqlite_file_path = config.expected_cache_path();
local_sqlite_file_path.push(format!("{file_name}.sqlite"));
let mut local_sha_file_path = config.expected_cache_path();
local_sha_file_path.push(format!("{file_name}.sqlite.sha256"));
// Compare local SHA256 to remote to see if there's a new one available.
let local_sha_file = read_file_content_at_path(&local_sha_file_path);
let remote_sha_file = match reqwest::get(&remote_sha_url).await {
Ok(response) => response.bytes().await,
Err(e) => Err(e),
};
let should_download = match (local_sha_file, remote_sha_file) {
(Ok(local), Ok(remote_response)) => {
let cache_not_expired = remote_response.starts_with(&local[0..32]) == false;
if cache_not_expired {
try_info!(ctx, "More recent {file_name}.sqlite file detected");
}
cache_not_expired == false
}
(_, _) => match std::fs::metadata(&local_sqlite_file_path) {
Ok(_) => false,
_ => {
try_info!(ctx, "Unable to retrieve {file_name}.sqlite file locally");
true
}
},
};
if should_download {
try_info!(ctx, "Downloading {remote_archive_url}");
match download_and_decompress_archive_file(remote_archive_url, file_name, &config, &ctx)
.await
{
Ok(_) => {}
Err(e) => {
try_error!(ctx, "{e}");
std::process::exit(1);
}
}
} else {
try_info!(
ctx,
"Basing ordinals evaluation on database {}",
local_sqlite_file_path.display()
);
}
}
/// Downloads remote SQLite archive datasets.
pub async fn download_archive_datasets_if_required(config: &Config, ctx: &Context) {
if !config.should_bootstrap_through_download() {
return;
}
let snapshot_urls = match &config.snapshot {
SnapshotConfig::Build => unreachable!(),
SnapshotConfig::Download(url) => url,
};
validate_or_download_archive_file(&snapshot_urls.ordinals, "hord", config, ctx).await;
if config.meta_protocols.brc20 {
match &snapshot_urls.brc20 {
Some(url) => validate_or_download_archive_file(url, "brc20", config, ctx).await,
None => {
try_warn!(ctx, "No brc20 snapshot url configured");
}
}
}
}

View File

@@ -9,9 +9,7 @@ extern crate lazy_static;
extern crate serde;
pub mod config;
pub mod core;
pub mod db;
pub mod download;
pub mod service;
pub mod utils;

View File

@@ -1,4 +1,3 @@
use crate::config::Config;
use crate::core::meta_protocols::brc20::cache::{brc20_new_cache, Brc20MemoryCache};
use crate::core::pipeline::bitcoind_download_blocks;
use crate::core::pipeline::processors::block_archiving::start_block_archiving_processor;
@@ -10,9 +9,7 @@ use crate::core::{
first_inscription_height, new_traversals_lazy_cache, should_sync_ordinals_db,
should_sync_rocks_db,
};
use crate::db::blocks::{
self, find_missing_blocks, open_blocks_db_with_retry, run_compaction,
};
use crate::db::blocks::{self, find_missing_blocks, open_blocks_db_with_retry, run_compaction};
use crate::db::cursor::{BlockBytesCursor, TransactionBytesCursor};
use crate::db::ordinals_pg;
use crate::utils::monitoring::{start_serving_prometheus_metrics, PrometheusMonitoring};
@@ -24,6 +21,7 @@ use chainhook_sdk::observer::{
use chainhook_sdk::utils::bitcoind::bitcoind_wait_for_chain_tip;
use chainhook_sdk::utils::{BlockHeights, Context};
use chainhook_types::BlockIdentifier;
use config::{Config, OrdinalsMetaProtocolsConfig};
use crossbeam_channel::select;
use dashmap::DashMap;
use deadpool_postgres::Pool;
@@ -49,14 +47,22 @@ pub struct Service {
impl Service {
pub fn new(config: &Config, ctx: &Context) -> Self {
let Some(ordinals_config) = &config.ordinals else {
unreachable!();
};
Self {
prometheus: PrometheusMonitoring::new(),
config: config.clone(),
ctx: ctx.clone(),
pg_pools: PgConnectionPools {
ordinals: pg_pool(&config.ordinals_db).unwrap(),
brc20: match (config.meta_protocols.brc20, &config.brc20_db) {
(true, Some(brc20_db)) => Some(pg_pool(&brc20_db).unwrap()),
ordinals: pg_pool(&ordinals_config.db).unwrap(),
brc20: match &ordinals_config.meta_protocols {
Some(OrdinalsMetaProtocolsConfig {
brc20: Some(brc20), ..
}) => match brc20.enabled {
true => Some(pg_pool(&brc20.db).unwrap()),
false => None,
},
_ => None,
},
},
@@ -85,9 +91,11 @@ impl Service {
pub async fn run(&mut self, check_blocks_integrity: bool) -> Result<(), String> {
// 1: Initialize Prometheus monitoring server.
if let Some(port) = self.config.network.prometheus_monitoring_port {
if let Some(metrics) = &self.config.metrics {
if metrics.enabled {
let registry_moved = self.prometheus.registry.clone();
let ctx_cloned = self.ctx.clone();
let port = metrics.prometheus_port;
let _ = std::thread::spawn(move || {
let _ = hiro_system_kit::nestable_block_on(start_serving_prometheus_metrics(
port,
@@ -96,6 +104,7 @@ impl Service {
));
});
}
}
let (max_inscription_number, chain_tip) = {
let ord_client = pg_pool_client(&self.pg_pools.ordinals).await?;
@@ -122,15 +131,10 @@ impl Service {
let zmq_observer_sidecar = self.set_up_bitcoin_zmq_observer_sidecar()?;
let (observer_command_tx, observer_command_rx) = channel();
let (observer_event_tx, observer_event_rx) = crossbeam_channel::unbounded();
let inner_ctx = if self.config.logs.chainhook_internals {
self.ctx.clone()
} else {
Context::empty()
};
let inner_ctx = self.ctx.clone();
let event_observer_config = self.config.get_event_observer_config();
let _ = start_event_observer(
event_observer_config,
self.config.bitcoind.clone(),
observer_command_tx.clone(),
observer_command_rx,
Some(observer_event_tx),
@@ -224,7 +228,7 @@ impl Service {
}
pub async fn check_blocks_db_integrity(&mut self) -> Result<(), String> {
bitcoind_wait_for_chain_tip(&self.config.network, &self.ctx);
bitcoind_wait_for_chain_tip(&self.config.bitcoind, &self.ctx);
let (tip, missing_blocks) = {
let blocks_db = open_blocks_db_with_retry(false, &self.config, &self.ctx);
let ord_client = pg_pool_client(&self.pg_pools.ordinals).await?;
@@ -263,7 +267,7 @@ impl Service {
/// Synchronizes and indexes all databases until their block height matches bitcoind's block height.
pub async fn catch_up_to_bitcoin_chain_tip(&self) -> Result<(), String> {
// 0: Make sure bitcoind is synchronized.
bitcoind_wait_for_chain_tip(&self.config.network, &self.ctx);
bitcoind_wait_for_chain_tip(&self.config.bitcoind, &self.ctx);
// 1: Catch up blocks DB so it is at least at the same height as the ordinals DB.
if let Some((start_block, end_block)) =

View File

@@ -0,0 +1,37 @@
[package]
name = "runes"
version.workspace = true
edition = "2021"
[dependencies]
chainhook-sdk = { path = "../chainhook-sdk" }
chainhook-types = { path = "../chainhook-types-rs" }
bitcoin = { workspace = true }
lru = "0.12.3"
ordinals = "0.0.15"
bytes = "1.3"
config = { path = "../config" }
serde = "1"
serde_derive = "1"
hex = "0.4.3"
rand = "0.8.5"
hiro-system-kit = { workspace = true }
ctrlc = { version = "3.2.2", optional = true }
# reqwest = { version = "0.11", features = ["stream", "json"] }
crossbeam-channel = "0.5.8"
clap = { version = "4.3.2", features = ["derive"] }
clap_generate = { version = "3.0.3" }
chainhook-postgres = { path = "../chainhook-postgres" }
tokio = { workspace = true }
tokio-postgres = { workspace = true }
deadpool-postgres = { workspace = true }
refinery = { workspace = true }
num-traits = "0.2.14"
maplit = "1.0.2"
[dev-dependencies]
test-case = "3.1.0"
[features]
debug = ["hiro-system-kit/debug"]
release = ["hiro-system-kit/release"]

View File

@@ -0,0 +1,92 @@
use std::collections::HashMap;
use chainhook_sdk::utils::Context;
use tokio_postgres::Transaction;
use crate::{
db::{
models::{
db_balance_change::DbBalanceChange, db_ledger_entry::DbLedgerEntry, db_rune::DbRune,
db_supply_change::DbSupplyChange,
},
pg_insert_balance_changes, pg_insert_ledger_entries, pg_insert_runes,
pg_insert_supply_changes,
},
try_debug, try_info,
};
/// Holds rows that have yet to be inserted into the database.
pub struct DbCache {
pub runes: Vec<DbRune>,
pub ledger_entries: Vec<DbLedgerEntry>,
pub supply_changes: HashMap<String, DbSupplyChange>,
pub balance_increases: HashMap<(String, String), DbBalanceChange>,
pub balance_deductions: HashMap<(String, String), DbBalanceChange>,
}
impl DbCache {
pub fn new() -> Self {
DbCache {
runes: Vec::new(),
ledger_entries: Vec::new(),
supply_changes: HashMap::new(),
balance_increases: HashMap::new(),
balance_deductions: HashMap::new(),
}
}
/// Insert all data into the DB and clear cache.
pub async fn flush(&mut self, db_tx: &mut Transaction<'_>, ctx: &Context) {
try_info!(ctx, "Flushing DB cache...");
if self.runes.len() > 0 {
try_debug!(ctx, "Flushing {} runes", self.runes.len());
let _ = pg_insert_runes(&self.runes, db_tx, ctx).await;
self.runes.clear();
}
if self.supply_changes.len() > 0 {
try_debug!(ctx, "Flushing {} supply changes", self.supply_changes.len());
let _ = pg_insert_supply_changes(
&self.supply_changes.values().cloned().collect(),
db_tx,
ctx,
)
.await;
self.supply_changes.clear();
}
if self.ledger_entries.len() > 0 {
try_debug!(ctx, "Flushing {} ledger entries", self.ledger_entries.len());
let _ = pg_insert_ledger_entries(&self.ledger_entries, db_tx, ctx).await;
self.ledger_entries.clear();
}
if self.balance_increases.len() > 0 {
try_debug!(
ctx,
"Flushing {} balance increases",
self.balance_increases.len()
);
let _ = pg_insert_balance_changes(
&self.balance_increases.values().cloned().collect(),
true,
db_tx,
ctx,
)
.await;
self.balance_increases.clear();
}
if self.balance_deductions.len() > 0 {
try_debug!(
ctx,
"Flushing {} balance deductions",
self.balance_deductions.len()
);
let _ = pg_insert_balance_changes(
&self.balance_deductions.values().cloned().collect(),
false,
db_tx,
ctx,
)
.await;
self.balance_deductions.clear();
}
}
}

View File

@@ -0,0 +1,448 @@
use std::{collections::HashMap, num::NonZeroUsize, str::FromStr};
use bitcoin::{Network, ScriptBuf};
use chainhook_sdk::utils::Context;
use chainhook_types::bitcoin::TxIn;
use config::Config;
use lru::LruCache;
use ordinals::{Cenotaph, Edict, Etching, Rune, RuneId, Runestone};
use tokio_postgres::{Client, Transaction};
use crate::{
db::{
cache::utils::input_rune_balances_from_tx_inputs,
models::{
db_balance_change::DbBalanceChange, db_ledger_entry::DbLedgerEntry,
db_ledger_operation::DbLedgerOperation, db_rune::DbRune,
db_supply_change::DbSupplyChange,
},
pg_get_max_rune_number, pg_get_rune_by_id, pg_get_rune_total_mints,
},
try_debug, try_info, try_warn,
};
use super::{
db_cache::DbCache, input_rune_balance::InputRuneBalance, transaction_cache::TransactionCache,
transaction_location::TransactionLocation, utils::move_block_output_cache_to_output_cache,
};
/// Holds rune data across multiple blocks for faster computations. Processes rune events as they happen during transactions and
/// generates database rows for later insertion.
pub struct IndexCache {
pub network: Network,
/// Number to be assigned to the next rune etching.
next_rune_number: u32,
/// LRU cache for runes.
rune_cache: LruCache<RuneId, DbRune>,
/// LRU cache for total mints for runes.
rune_total_mints_cache: LruCache<RuneId, u128>,
/// LRU cache for outputs with rune balances.
output_cache: LruCache<(String, u32), HashMap<RuneId, Vec<InputRuneBalance>>>,
/// Same as above but only for the current block. We use a `HashMap` instead of an LRU cache to make sure we keep all outputs
/// in memory while we index this block. Must be cleared every time a new block is processed.
block_output_cache: HashMap<(String, u32), HashMap<RuneId, Vec<InputRuneBalance>>>,
/// Holds a single transaction's rune cache. Must be cleared every time a new transaction is processed.
tx_cache: TransactionCache,
/// Keeps rows that have not yet been inserted in the DB.
pub db_cache: DbCache,
}
impl IndexCache {
pub async fn new(config: &Config, pg_client: &mut Client, ctx: &Context) -> Self {
let network = config.bitcoind.network;
let cap = NonZeroUsize::new(config.runes.as_ref().unwrap().lru_cache_size).unwrap();
IndexCache {
network,
next_rune_number: pg_get_max_rune_number(pg_client, ctx).await + 1,
rune_cache: LruCache::new(cap),
rune_total_mints_cache: LruCache::new(cap),
output_cache: LruCache::new(cap),
block_output_cache: HashMap::new(),
tx_cache: TransactionCache::new(
TransactionLocation {
network,
block_hash: "".to_string(),
block_height: 1,
timestamp: 0,
tx_index: 0,
tx_id: "".to_string(),
},
HashMap::new(),
HashMap::new(),
None,
0,
),
db_cache: DbCache::new(),
}
}
pub async fn reset_max_rune_number(&mut self, db_tx: &mut Transaction<'_>, ctx: &Context) {
self.next_rune_number = pg_get_max_rune_number(db_tx, ctx).await + 1;
}
/// Creates a fresh transaction index cache.
pub async fn begin_transaction(
&mut self,
location: TransactionLocation,
tx_inputs: &Vec<TxIn>,
eligible_outputs: HashMap<u32, ScriptBuf>,
first_eligible_output: Option<u32>,
total_outputs: u32,
db_tx: &mut Transaction<'_>,
ctx: &Context,
) {
let input_runes = input_rune_balances_from_tx_inputs(
tx_inputs,
&self.block_output_cache,
&mut self.output_cache,
db_tx,
ctx,
)
.await;
#[cfg(not(feature = "release"))]
{
for (rune_id, balances) in input_runes.iter() {
try_debug!(ctx, "INPUT {rune_id} {balances:?} {location}");
}
if input_runes.len() > 0 {
try_debug!(
ctx,
"First output: {first_eligible_output:?}, total_outputs: {total_outputs}"
);
}
}
self.tx_cache = TransactionCache::new(
location,
input_runes,
eligible_outputs,
first_eligible_output,
total_outputs,
);
}
/// Finalizes the current transaction index cache by moving all unallocated balances to the correct output.
pub fn end_transaction(&mut self, _db_tx: &mut Transaction<'_>, ctx: &Context) {
let entries = self.tx_cache.allocate_remaining_balances(ctx);
self.add_ledger_entries_to_db_cache(&entries);
}
pub fn end_block(&mut self) {
move_block_output_cache_to_output_cache(
&mut self.block_output_cache,
&mut self.output_cache,
);
}
pub async fn apply_runestone(
&mut self,
runestone: &Runestone,
_db_tx: &mut Transaction<'_>,
ctx: &Context,
) {
try_debug!(ctx, "{:?} {}", runestone, self.tx_cache.location);
if let Some(new_pointer) = runestone.pointer {
self.tx_cache.output_pointer = Some(new_pointer);
}
}
pub async fn apply_cenotaph(
&mut self,
cenotaph: &Cenotaph,
_db_tx: &mut Transaction<'_>,
ctx: &Context,
) {
try_debug!(ctx, "{:?} {}", cenotaph, self.tx_cache.location);
let entries = self.tx_cache.apply_cenotaph_input_burn(cenotaph);
self.add_ledger_entries_to_db_cache(&entries);
}
pub async fn apply_etching(
&mut self,
etching: &Etching,
_db_tx: &mut Transaction<'_>,
ctx: &Context,
) {
let (rune_id, db_rune, entry) = self.tx_cache.apply_etching(etching, self.next_rune_number);
try_info!(
ctx,
"Etching {} ({}) {}",
db_rune.spaced_name,
db_rune.id,
self.tx_cache.location
);
self.db_cache.runes.push(db_rune.clone());
self.rune_cache.put(rune_id, db_rune);
self.add_ledger_entries_to_db_cache(&vec![entry]);
self.next_rune_number += 1;
}
pub async fn apply_cenotaph_etching(
&mut self,
rune: &Rune,
_db_tx: &mut Transaction<'_>,
ctx: &Context,
) {
let (rune_id, db_rune, entry) = self
.tx_cache
.apply_cenotaph_etching(rune, self.next_rune_number);
try_info!(
ctx,
"Etching cenotaph {} ({}) {}",
db_rune.spaced_name,
db_rune.id,
self.tx_cache.location
);
self.db_cache.runes.push(db_rune.clone());
self.rune_cache.put(rune_id, db_rune);
self.add_ledger_entries_to_db_cache(&vec![entry]);
self.next_rune_number += 1;
}
pub async fn apply_mint(
&mut self,
rune_id: &RuneId,
db_tx: &mut Transaction<'_>,
ctx: &Context,
) {
let Some(db_rune) = self.get_cached_rune_by_rune_id(rune_id, db_tx, ctx).await else {
try_warn!(
ctx,
"Rune {} not found for mint {}",
rune_id,
self.tx_cache.location
);
return;
};
let total_mints = self
.get_cached_rune_total_mints(rune_id, db_tx, ctx)
.await
.unwrap_or(0);
if let Some(ledger_entry) = self
.tx_cache
.apply_mint(&rune_id, total_mints, &db_rune, ctx)
{
self.add_ledger_entries_to_db_cache(&vec![ledger_entry.clone()]);
if let Some(total) = self.rune_total_mints_cache.get_mut(rune_id) {
*total += 1;
} else {
self.rune_total_mints_cache.put(rune_id.clone(), 1);
}
}
}
pub async fn apply_cenotaph_mint(
&mut self,
rune_id: &RuneId,
db_tx: &mut Transaction<'_>,
ctx: &Context,
) {
let Some(db_rune) = self.get_cached_rune_by_rune_id(rune_id, db_tx, ctx).await else {
try_warn!(
ctx,
"Rune {} not found for cenotaph mint {}",
rune_id,
self.tx_cache.location
);
return;
};
let total_mints = self
.get_cached_rune_total_mints(rune_id, db_tx, ctx)
.await
.unwrap_or(0);
if let Some(ledger_entry) =
self.tx_cache
.apply_cenotaph_mint(&rune_id, total_mints, &db_rune, ctx)
{
self.add_ledger_entries_to_db_cache(&vec![ledger_entry]);
if let Some(total) = self.rune_total_mints_cache.get_mut(rune_id) {
*total += 1;
} else {
self.rune_total_mints_cache.put(rune_id.clone(), 1);
}
}
}
pub async fn apply_edict(&mut self, edict: &Edict, db_tx: &mut Transaction<'_>, ctx: &Context) {
let Some(db_rune) = self.get_cached_rune_by_rune_id(&edict.id, db_tx, ctx).await else {
try_warn!(
ctx,
"Rune {} not found for edict {}",
edict.id,
self.tx_cache.location
);
return;
};
let entries = self.tx_cache.apply_edict(edict, ctx);
for entry in entries.iter() {
try_info!(
ctx,
"Edict {} {} {}",
db_rune.spaced_name,
entry.amount.unwrap().0,
self.tx_cache.location
);
}
self.add_ledger_entries_to_db_cache(&entries);
}
async fn get_cached_rune_by_rune_id(
&mut self,
rune_id: &RuneId,
db_tx: &mut Transaction<'_>,
ctx: &Context,
) -> Option<DbRune> {
// Id 0:0 is used to mean the rune being etched in this transaction, if any.
if rune_id.block == 0 && rune_id.tx == 0 {
return self.tx_cache.etching.clone();
}
if let Some(cached_rune) = self.rune_cache.get(&rune_id) {
return Some(cached_rune.clone());
}
// Cache miss, look in DB.
self.db_cache.flush(db_tx, ctx).await;
let Some(db_rune) = pg_get_rune_by_id(rune_id, db_tx, ctx).await else {
return None;
};
self.rune_cache.put(rune_id.clone(), db_rune.clone());
return Some(db_rune);
}
async fn get_cached_rune_total_mints(
&mut self,
rune_id: &RuneId,
db_tx: &mut Transaction<'_>,
ctx: &Context,
) -> Option<u128> {
let real_rune_id = if rune_id.block == 0 && rune_id.tx == 0 {
let Some(etching) = self.tx_cache.etching.as_ref() else {
return None;
};
RuneId::from_str(etching.id.as_str()).unwrap()
} else {
rune_id.clone()
};
if let Some(total) = self.rune_total_mints_cache.get(&real_rune_id) {
return Some(*total);
}
// Cache miss, look in DB.
self.db_cache.flush(db_tx, ctx).await;
let Some(total) = pg_get_rune_total_mints(rune_id, db_tx, ctx).await else {
return None;
};
self.rune_total_mints_cache.put(rune_id.clone(), total);
return Some(total);
}
/// Take ledger entries returned by the `TransactionCache` and add them to the `DbCache`. Update global balances and counters
/// as well.
fn add_ledger_entries_to_db_cache(&mut self, entries: &Vec<DbLedgerEntry>) {
self.db_cache.ledger_entries.extend(entries.clone());
for entry in entries.iter() {
match entry.operation {
DbLedgerOperation::Etching => {
self.db_cache
.supply_changes
.entry(entry.rune_id.clone())
.and_modify(|i| {
i.total_operations += 1;
})
.or_insert(DbSupplyChange::from_operation(
entry.rune_id.clone(),
entry.block_height.clone(),
));
}
DbLedgerOperation::Mint => {
self.db_cache
.supply_changes
.entry(entry.rune_id.clone())
.and_modify(|i| {
i.minted += entry.amount.unwrap();
i.total_mints += 1;
i.total_operations += 1;
})
.or_insert(DbSupplyChange::from_mint(
entry.rune_id.clone(),
entry.block_height.clone(),
entry.amount.unwrap(),
));
}
DbLedgerOperation::Burn => {
self.db_cache
.supply_changes
.entry(entry.rune_id.clone())
.and_modify(|i| {
i.burned += entry.amount.unwrap();
i.total_burns += 1;
i.total_operations += 1;
})
.or_insert(DbSupplyChange::from_burn(
entry.rune_id.clone(),
entry.block_height.clone(),
entry.amount.unwrap(),
));
}
DbLedgerOperation::Send => {
self.db_cache
.supply_changes
.entry(entry.rune_id.clone())
.and_modify(|i| i.total_operations += 1)
.or_insert(DbSupplyChange::from_operation(
entry.rune_id.clone(),
entry.block_height.clone(),
));
if let Some(address) = entry.address.clone() {
self.db_cache
.balance_deductions
.entry((entry.rune_id.clone(), address.clone()))
.and_modify(|i| i.balance += entry.amount.unwrap())
.or_insert(DbBalanceChange::from_operation(
entry.rune_id.clone(),
entry.block_height.clone(),
address,
entry.amount.unwrap(),
));
}
}
DbLedgerOperation::Receive => {
self.db_cache
.supply_changes
.entry(entry.rune_id.clone())
.and_modify(|i| i.total_operations += 1)
.or_insert(DbSupplyChange::from_operation(
entry.rune_id.clone(),
entry.block_height.clone(),
));
if let Some(address) = entry.address.clone() {
self.db_cache
.balance_increases
.entry((entry.rune_id.clone(), address.clone()))
.and_modify(|i| i.balance += entry.amount.unwrap())
.or_insert(DbBalanceChange::from_operation(
entry.rune_id.clone(),
entry.block_height.clone(),
address,
entry.amount.unwrap(),
));
// Add to current block's output cache if it's received balance.
let k = (entry.tx_id.clone(), entry.output.unwrap().0);
let rune_id = RuneId::from_str(entry.rune_id.as_str()).unwrap();
let balance = InputRuneBalance {
address: entry.address.clone(),
amount: entry.amount.unwrap().0,
};
let mut default = HashMap::new();
default.insert(rune_id, vec![balance.clone()]);
self.block_output_cache
.entry(k)
.and_modify(|i| {
i.entry(rune_id)
.and_modify(|v| v.push(balance.clone()))
.or_insert(vec![balance]);
})
.or_insert(default);
}
}
}
}
}
}

View File

@@ -0,0 +1,29 @@
#[derive(Debug, Clone)]
pub struct InputRuneBalance {
/// Previous owner of this balance. If this is `None`, it means the balance was just minted or premined.
pub address: Option<String>,
/// How much balance was input to this transaction.
pub amount: u128,
}
#[cfg(test)]
impl InputRuneBalance {
pub fn dummy() -> Self {
InputRuneBalance {
address: Some(
"bc1p8zxlhgdsq6dmkzk4ammzcx55c3hfrg69ftx0gzlnfwq0wh38prds0nzqwf".to_string(),
),
amount: 1000,
}
}
pub fn amount(&mut self, amount: u128) -> &mut Self {
self.amount = amount;
return self;
}
pub fn address(&mut self, address: Option<String>) -> &mut Self {
self.address = address;
return self;
}
}

6
components/runes/src/db/cache/mod.rs vendored Normal file
View File

@@ -0,0 +1,6 @@
pub mod db_cache;
pub mod index_cache;
pub mod input_rune_balance;
pub mod transaction_cache;
pub mod transaction_location;
pub mod utils;

View File

@@ -0,0 +1,631 @@
use bitcoin::ScriptBuf;
use chainhook_sdk::utils::Context;
use ordinals::{Cenotaph, Edict, Etching, Rune, RuneId};
use std::{
collections::{HashMap, VecDeque},
vec,
};
use crate::{
db::{
cache::utils::{is_rune_mintable, new_sequential_ledger_entry},
models::{
db_ledger_entry::DbLedgerEntry, db_ledger_operation::DbLedgerOperation, db_rune::DbRune,
},
},
try_debug, try_info, try_warn,
};
use super::{
input_rune_balance::InputRuneBalance, transaction_location::TransactionLocation,
utils::move_rune_balance_to_output,
};
/// Holds cached data relevant to a single transaction during indexing.
pub struct TransactionCache {
pub location: TransactionLocation,
/// Sequential index of the ledger entry we're inserting next for this transaction. Will be increased with each generated
/// entry.
next_event_index: u32,
/// Rune etched during this transaction, if any.
pub etching: Option<DbRune>,
/// The output where all unallocated runes will be transferred to. Set to the first eligible output by default but can be
/// overridden by a Runestone.
pub output_pointer: Option<u32>,
/// Holds input runes for the current transaction (input to this tx, premined or minted). Balances in the vector are in the
/// order in which they were input to this transaction.
pub input_runes: HashMap<RuneId, VecDeque<InputRuneBalance>>,
/// Non-OP_RETURN outputs in this transaction
eligible_outputs: HashMap<u32, ScriptBuf>,
/// Total outputs contained in this transaction, including non-eligible outputs.
total_outputs: u32,
}
impl TransactionCache {
pub fn new(
location: TransactionLocation,
input_runes: HashMap<RuneId, VecDeque<InputRuneBalance>>,
eligible_outputs: HashMap<u32, ScriptBuf>,
first_eligible_output: Option<u32>,
total_outputs: u32,
) -> Self {
TransactionCache {
location,
next_event_index: 0,
etching: None,
output_pointer: first_eligible_output,
input_runes,
eligible_outputs,
total_outputs,
}
}
#[cfg(test)]
pub fn empty(location: TransactionLocation) -> Self {
TransactionCache {
location,
next_event_index: 0,
etching: None,
output_pointer: None,
input_runes: maplit::hashmap! {},
eligible_outputs: maplit::hashmap! {},
total_outputs: 0,
}
}
/// Burns the rune balances input to this transaction.
pub fn apply_cenotaph_input_burn(&mut self, _cenotaph: &Cenotaph) -> Vec<DbLedgerEntry> {
let mut results = vec![];
for (rune_id, unallocated) in self.input_runes.iter() {
for balance in unallocated {
results.push(new_sequential_ledger_entry(
&self.location,
Some(balance.amount),
*rune_id,
None,
balance.address.as_ref(),
None,
DbLedgerOperation::Burn,
&mut self.next_event_index,
));
}
}
self.input_runes.clear();
results
}
/// Moves remaining input runes to the correct output depending on runestone configuration. Must be called once the processing
/// for a transaction is complete.
pub fn allocate_remaining_balances(&mut self, ctx: &Context) -> Vec<DbLedgerEntry> {
let mut results = vec![];
for (rune_id, unallocated) in self.input_runes.iter_mut() {
#[cfg(not(feature = "release"))]
for input in unallocated.iter() {
try_debug!(
ctx,
"Assign unallocated {} to pointer {:?} {:?} ({}) {}",
rune_id,
self.output_pointer,
input.address,
input.amount,
self.location
);
}
results.extend(move_rune_balance_to_output(
&self.location,
self.output_pointer,
rune_id,
unallocated,
&self.eligible_outputs,
0, // All of it
&mut self.next_event_index,
ctx,
));
}
self.input_runes.clear();
results
}
pub fn apply_etching(
&mut self,
etching: &Etching,
number: u32,
) -> (RuneId, DbRune, DbLedgerEntry) {
let rune_id = self.location.rune_id();
let db_rune = DbRune::from_etching(etching, number, &self.location);
self.etching = Some(db_rune.clone());
// Move pre-mined balance to input runes.
if let Some(premine) = etching.premine {
self.add_input_runes(
&rune_id,
InputRuneBalance {
address: None,
amount: premine,
},
);
}
let entry = new_sequential_ledger_entry(
&self.location,
None,
rune_id,
None,
None,
None,
DbLedgerOperation::Etching,
&mut self.next_event_index,
);
(rune_id, db_rune, entry)
}
pub fn apply_cenotaph_etching(
&mut self,
rune: &Rune,
number: u32,
) -> (RuneId, DbRune, DbLedgerEntry) {
let rune_id = self.location.rune_id();
// If the runestone that produced the cenotaph contained an etching, the etched rune has supply zero and is unmintable.
let db_rune = DbRune::from_cenotaph_etching(rune, number, &self.location);
self.etching = Some(db_rune.clone());
let entry = new_sequential_ledger_entry(
&self.location,
None,
rune_id,
None,
None,
None,
DbLedgerOperation::Etching,
&mut self.next_event_index,
);
(rune_id, db_rune, entry)
}
pub fn apply_mint(
&mut self,
rune_id: &RuneId,
total_mints: u128,
db_rune: &DbRune,
ctx: &Context,
) -> Option<DbLedgerEntry> {
if !is_rune_mintable(db_rune, total_mints, &self.location) {
try_debug!(ctx, "Invalid mint {} {}", rune_id, self.location);
return None;
}
let terms_amount = db_rune.terms_amount.unwrap();
try_info!(
ctx,
"MINT {} ({}) {} {}",
rune_id,
db_rune.spaced_name,
terms_amount.0,
self.location
);
self.add_input_runes(
rune_id,
InputRuneBalance {
address: None,
amount: terms_amount.0,
},
);
Some(new_sequential_ledger_entry(
&self.location,
Some(terms_amount.0),
rune_id.clone(),
None,
None,
None,
DbLedgerOperation::Mint,
&mut self.next_event_index,
))
}
pub fn apply_cenotaph_mint(
&mut self,
rune_id: &RuneId,
total_mints: u128,
db_rune: &DbRune,
ctx: &Context,
) -> Option<DbLedgerEntry> {
if !is_rune_mintable(db_rune, total_mints, &self.location) {
try_debug!(ctx, "Invalid mint {} {}", rune_id, self.location);
return None;
}
let terms_amount = db_rune.terms_amount.unwrap();
try_info!(
ctx,
"CENOTAPH MINT {} {} {}",
db_rune.spaced_name,
terms_amount.0,
self.location
);
// This entry does not go in the input runes, it gets burned immediately.
Some(new_sequential_ledger_entry(
&self.location,
Some(terms_amount.0),
rune_id.clone(),
None,
None,
None,
DbLedgerOperation::Burn,
&mut self.next_event_index,
))
}
pub fn apply_edict(&mut self, edict: &Edict, ctx: &Context) -> Vec<DbLedgerEntry> {
// Find this rune.
let rune_id = if edict.id.block == 0 && edict.id.tx == 0 {
let Some(etching) = self.etching.as_ref() else {
try_warn!(
ctx,
"Attempted edict for nonexistent rune 0:0 {}",
self.location
);
return vec![];
};
etching.rune_id()
} else {
edict.id
};
// Take all the available inputs for the rune we're trying to move.
let Some(available_inputs) = self.input_runes.get_mut(&rune_id) else {
try_info!(
ctx,
"No unallocated runes {} remain for edict {}",
edict.id,
self.location
);
return vec![];
};
// Calculate the maximum unallocated balance we can move.
let unallocated = available_inputs
.iter()
.map(|b| b.amount)
.reduce(|acc, e| acc + e)
.unwrap_or(0);
// Perform movements.
let mut results = vec![];
if self.eligible_outputs.len() == 0 {
// No eligible outputs means burn.
try_info!(
ctx,
"No eligible outputs for edict on rune {} {}",
edict.id,
self.location
);
results.extend(move_rune_balance_to_output(
&self.location,
None, // This will force a burn.
&rune_id,
available_inputs,
&self.eligible_outputs,
edict.amount,
&mut self.next_event_index,
ctx,
));
} else {
match edict.output {
// An edict with output equal to the number of transaction outputs allocates `amount` runes to each non-OP_RETURN
// output in order.
output if output == self.total_outputs => {
let mut output_keys: Vec<u32> = self.eligible_outputs.keys().cloned().collect();
output_keys.sort();
if edict.amount == 0 {
// Divide equally. If the number of unallocated runes is not divisible by the number of non-OP_RETURN outputs,
// 1 additional rune is assigned to the first R non-OP_RETURN outputs, where R is the remainder after dividing
// the balance of unallocated units of rune id by the number of non-OP_RETURN outputs.
let len = self.eligible_outputs.len() as u128;
let per_output = unallocated / len;
let mut remainder = unallocated % len;
for output in output_keys {
let mut extra = 0;
if remainder > 0 {
extra = 1;
remainder -= 1;
}
results.extend(move_rune_balance_to_output(
&self.location,
Some(output),
&rune_id,
available_inputs,
&self.eligible_outputs,
per_output + extra,
&mut self.next_event_index,
ctx,
));
}
} else {
// Give `amount` to all outputs or until unallocated runs out.
for output in output_keys {
let amount = edict.amount.min(unallocated);
results.extend(move_rune_balance_to_output(
&self.location,
Some(output),
&rune_id,
available_inputs,
&self.eligible_outputs,
amount,
&mut self.next_event_index,
ctx,
));
}
}
}
// Send balance to the output specified by the edict.
output if output < self.total_outputs => {
let mut amount = edict.amount;
if edict.amount == 0 {
amount = unallocated;
}
results.extend(move_rune_balance_to_output(
&self.location,
Some(edict.output),
&rune_id,
available_inputs,
&self.eligible_outputs,
amount,
&mut self.next_event_index,
ctx,
));
}
_ => {
try_info!(
ctx,
"Edict for {} attempted move to nonexistent output {}, amount will be burnt {}",
edict.id,
edict.output,
self.location
);
results.extend(move_rune_balance_to_output(
&self.location,
None, // Burn.
&rune_id,
available_inputs,
&self.eligible_outputs,
edict.amount,
&mut self.next_event_index,
ctx,
));
}
}
}
results
}
fn add_input_runes(&mut self, rune_id: &RuneId, entry: InputRuneBalance) {
if let Some(balance) = self.input_runes.get_mut(&rune_id) {
balance.push_back(entry);
} else {
let mut vec = VecDeque::new();
vec.push_back(entry);
self.input_runes.insert(rune_id.clone(), vec);
}
}
}
#[cfg(test)]
mod test {
use std::collections::VecDeque;
use bitcoin::ScriptBuf;
use chainhook_sdk::utils::Context;
use maplit::hashmap;
use ordinals::{Edict, Etching, Rune, Terms};
use crate::db::{
cache::{
input_rune_balance::InputRuneBalance, transaction_location::TransactionLocation,
utils::is_rune_mintable,
},
models::{db_ledger_operation::DbLedgerOperation, db_rune::DbRune},
};
use super::TransactionCache;
#[test]
fn etches_rune() {
let location = TransactionLocation::dummy();
let mut cache = TransactionCache::empty(location.clone());
let etching = Etching {
divisibility: Some(2),
premine: Some(1000),
rune: Some(Rune::reserved(location.block_height, location.tx_index)),
spacers: None,
symbol: Some('x'),
terms: Some(Terms {
amount: Some(1000),
cap: None,
height: (None, None),
offset: (None, None),
}),
turbo: true,
};
let (rune_id, db_rune, db_ledger_entry) = cache.apply_etching(&etching, 1);
assert_eq!(rune_id.block, 840000);
assert_eq!(rune_id.tx, 0);
assert_eq!(db_rune.id, "840000:0");
assert_eq!(db_rune.name, "AAAAAAAAAAAAAAAAZOMJMODBYFG");
assert_eq!(db_rune.number.0, 1);
assert_eq!(db_ledger_entry.operation, DbLedgerOperation::Etching);
assert_eq!(db_ledger_entry.rune_id, "840000:0");
}
#[test]
// TODO add cenotaph field to DbRune before filling this in
fn etches_cenotaph_rune() {
let location = TransactionLocation::dummy();
let mut cache = TransactionCache::empty(location.clone());
// Create a cenotaph rune
let rune = Rune::reserved(location.block_height, location.tx_index);
let number = 2;
let (_rune_id, db_rune, db_ledger_entry) = cache.apply_cenotaph_etching(&rune, number);
// // the etched rune has supply zero and is unmintable.
assert_eq!(is_rune_mintable(&db_rune, 0, &location), false);
assert_eq!(db_ledger_entry.amount, None);
assert_eq!(db_rune.id, "840000:0");
assert_eq!(db_ledger_entry.operation, DbLedgerOperation::Etching);
assert_eq!(db_ledger_entry.rune_id, "840000:0");
}
#[test]
fn mints_rune() {
let location = TransactionLocation::dummy();
let mut cache = TransactionCache::empty(location.clone());
let db_rune = &DbRune::factory();
let rune_id = &db_rune.rune_id();
let ledger_entry = cache.apply_mint(&rune_id, 0, &db_rune, &Context::empty());
assert!(ledger_entry.is_some());
let ledger_entry = ledger_entry.unwrap();
assert_eq!(ledger_entry.operation, DbLedgerOperation::Mint);
assert_eq!(ledger_entry.rune_id, rune_id.to_string());
// ledger entry is minted with the correct amount
assert_eq!(ledger_entry.amount, Some(db_rune.terms_amount.unwrap()));
// minted amount is added to the input runes (`cache.input_runes`)
assert!(cache.input_runes.contains_key(&rune_id));
}
#[test]
fn does_not_mint_fully_minted_rune() {
let location = TransactionLocation::dummy();
let mut cache = TransactionCache::empty(location.clone());
let etching = Etching {
divisibility: Some(2),
premine: Some(1000),
rune: Some(Rune::reserved(location.block_height, location.tx_index)),
spacers: None,
symbol: Some('x'),
terms: Some(Terms {
amount: Some(1000),
cap: Some(1000),
height: (None, None),
offset: (None, None),
}),
turbo: true,
};
let (rune_id, db_rune, _db_ledger_entry) = cache.apply_etching(&etching, 1);
let ledger_entry = cache.apply_mint(&rune_id, 1000, &db_rune, &Context::empty());
assert!(ledger_entry.is_none());
}
#[test]
fn burns_cenotaph_mint() {
let location = TransactionLocation::dummy();
let mut cache = TransactionCache::empty(location.clone());
let db_rune = DbRune::factory();
let rune_id = db_rune.rune_id();
let ledger_entry = cache.apply_cenotaph_mint(&rune_id, 0, &db_rune, &Context::empty());
assert!(ledger_entry.is_some());
let ledger_entry = ledger_entry.unwrap();
assert_eq!(ledger_entry.operation, DbLedgerOperation::Burn);
assert_eq!(
ledger_entry.amount.unwrap().0,
db_rune.terms_amount.unwrap().0
);
}
#[test]
fn moves_runes_with_edict() {
let location = TransactionLocation::dummy();
let db_rune = &DbRune::factory();
let rune_id = &db_rune.rune_id();
let mut balances = VecDeque::new();
let sender_address =
"bc1p3v7r3n4hv63z4s7jkhdzxsay9xem98hxul057w2mwur406zhw8xqrpwp9w".to_string();
let receiver_address =
"bc1p8zxlhgdsq6dmkzk4ammzcx55c3hfrg69ftx0gzlnfwq0wh38prds0nzqwf".to_string();
balances.push_back(InputRuneBalance {
address: Some(sender_address.clone()),
amount: 1000,
});
let input_runes = hashmap! {
rune_id.clone() => balances
};
let eligible_outputs = hashmap! {0=> ScriptBuf::from_hex("5120388dfba1b0069bbb0ad5eef62c1a94c46e91a3454accf40bf34b80f75e2708db").unwrap()};
let mut cache = TransactionCache::new(location, input_runes, eligible_outputs, Some(0), 1);
let edict = Edict {
id: rune_id.clone(),
amount: 1000,
output: 0,
};
let ledger_entry = cache.apply_edict(&edict, &Context::empty());
assert_eq!(ledger_entry.len(), 2);
let receive = ledger_entry.first().unwrap();
assert_eq!(receive.operation, DbLedgerOperation::Receive);
assert_eq!(receive.address, Some(receiver_address.clone()));
let send = ledger_entry.last().unwrap();
assert_eq!(send.operation, DbLedgerOperation::Send);
assert_eq!(send.address, Some(sender_address.clone()));
assert_eq!(send.receiver_address, Some(receiver_address.clone()));
}
#[test]
fn allocates_remaining_runes_to_first_eligible_output() {
let location = TransactionLocation::dummy();
let db_rune = &DbRune::factory();
let rune_id = &db_rune.rune_id();
let mut balances = VecDeque::new();
let sender_address =
"bc1p3v7r3n4hv63z4s7jkhdzxsay9xem98hxul057w2mwur406zhw8xqrpwp9w".to_string();
let receiver_address =
"bc1p8zxlhgdsq6dmkzk4ammzcx55c3hfrg69ftx0gzlnfwq0wh38prds0nzqwf".to_string();
balances.push_back(InputRuneBalance {
address: Some(sender_address.clone()),
amount: 1000,
});
let input_runes = hashmap! {
rune_id.clone() => balances
};
let eligible_outputs = hashmap! {0=> ScriptBuf::from_hex("5120388dfba1b0069bbb0ad5eef62c1a94c46e91a3454accf40bf34b80f75e2708db").unwrap()};
let mut cache = TransactionCache::new(location, input_runes, eligible_outputs, Some(0), 1);
let ledger_entry = cache.allocate_remaining_balances(&Context::empty());
assert_eq!(ledger_entry.len(), 2);
let receive = ledger_entry.first().unwrap();
assert_eq!(receive.operation, DbLedgerOperation::Receive);
assert_eq!(receive.address, Some(receiver_address.clone()));
let send = ledger_entry.last().unwrap();
assert_eq!(send.operation, DbLedgerOperation::Send);
assert_eq!(send.address, Some(sender_address.clone()));
assert_eq!(send.receiver_address, Some(receiver_address.clone()));
}
#[test]
fn allocates_remaining_runes_to_runestone_pointer_output() {
let location = TransactionLocation::dummy();
let db_rune = &DbRune::factory();
let rune_id = &db_rune.rune_id();
let mut balances = VecDeque::new();
let sender_address =
"bc1p3v7r3n4hv63z4s7jkhdzxsay9xem98hxul057w2mwur406zhw8xqrpwp9w".to_string();
let receiver_address =
"bc1p8zxlhgdsq6dmkzk4ammzcx55c3hfrg69ftx0gzlnfwq0wh38prds0nzqwf".to_string();
balances.push_back(InputRuneBalance {
address: Some(sender_address.clone()),
amount: 1000,
});
let input_runes = hashmap! {
rune_id.clone() => balances
};
let eligible_outputs = hashmap! {1=> ScriptBuf::from_hex("5120388dfba1b0069bbb0ad5eef62c1a94c46e91a3454accf40bf34b80f75e2708db").unwrap()};
let mut cache = TransactionCache::new(location, input_runes, eligible_outputs, Some(0), 2);
cache.output_pointer = Some(1);
let ledger_entry = cache.allocate_remaining_balances(&Context::empty());
assert_eq!(ledger_entry.len(), 2);
let receive = ledger_entry.first().unwrap();
assert_eq!(receive.operation, DbLedgerOperation::Receive);
assert_eq!(receive.address, Some(receiver_address.clone()));
let send = ledger_entry.last().unwrap();
assert_eq!(send.operation, DbLedgerOperation::Send);
assert_eq!(send.address, Some(sender_address.clone()));
assert_eq!(send.receiver_address, Some(receiver_address.clone()));
}
}

View File

@@ -0,0 +1,53 @@
use std::fmt;
use bitcoin::Network;
use ordinals::RuneId;
#[derive(Debug, Clone)]
pub struct TransactionLocation {
pub network: Network,
pub block_hash: String,
pub block_height: u64,
pub timestamp: u32,
pub tx_index: u32,
pub tx_id: String,
}
impl TransactionLocation {
pub fn rune_id(&self) -> RuneId {
RuneId {
block: self.block_height,
tx: self.tx_index,
}
}
}
impl fmt::Display for TransactionLocation {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"tx: {} ({}) @{}",
self.tx_id, self.tx_index, self.block_height
)
}
}
#[cfg(test)]
impl TransactionLocation {
pub fn dummy() -> Self {
TransactionLocation {
network: Network::Bitcoin,
block_hash: "0000000000000000000320283a032748cef8227873ff4872689bf23f1cda83a5"
.to_string(),
block_height: 840000,
timestamp: 1713571767,
tx_index: 0,
tx_id: "2bb85f4b004be6da54f766c17c1e855187327112c231ef2ff35ebad0ea67c69e".to_string(),
}
}
pub fn block_height(&mut self, val: u64) -> &Self {
self.block_height = val;
self
}
}

937
components/runes/src/db/cache/utils.rs vendored Normal file
View File

@@ -0,0 +1,937 @@
use std::collections::{HashMap, VecDeque};
use bitcoin::{Address, ScriptBuf};
use chainhook_sdk::utils::Context;
use chainhook_types::bitcoin::TxIn;
use lru::LruCache;
use ordinals::RuneId;
use tokio_postgres::Transaction;
use crate::{
db::{
models::{
db_ledger_entry::DbLedgerEntry, db_ledger_operation::DbLedgerOperation, db_rune::DbRune,
},
pg_get_input_rune_balances,
},
try_info, try_warn,
};
use super::{input_rune_balance::InputRuneBalance, transaction_location::TransactionLocation};
/// Takes all transaction inputs and transforms them into rune balances to be allocated for operations. Looks inside an output LRU
/// cache and the DB when there are cache misses.
///
/// # Arguments
///
/// * `inputs` - Raw transaction inputs
/// * `block_output_cache` - Cache with output balances produced by the current block
/// * `output_cache` - LRU cache with output balances
/// * `db_tx` - DB transaction
/// * `ctx` - Context
pub async fn input_rune_balances_from_tx_inputs(
inputs: &Vec<TxIn>,
block_output_cache: &HashMap<(String, u32), HashMap<RuneId, Vec<InputRuneBalance>>>,
output_cache: &mut LruCache<(String, u32), HashMap<RuneId, Vec<InputRuneBalance>>>,
db_tx: &mut Transaction<'_>,
ctx: &Context,
) -> HashMap<RuneId, VecDeque<InputRuneBalance>> {
// Maps input index to all of its rune balances. Useful in order to keep rune inputs in order.
let mut indexed_input_runes = HashMap::new();
let mut cache_misses = vec![];
// Look in both current block output cache and in long term LRU cache.
for (i, input) in inputs.iter().enumerate() {
let tx_id = input.previous_output.txid.hash[2..].to_string();
let vout = input.previous_output.vout;
let k = (tx_id.clone(), vout);
if let Some(map) = block_output_cache.get(&k) {
indexed_input_runes.insert(i as u32, map.clone());
} else if let Some(map) = output_cache.get(&k) {
indexed_input_runes.insert(i as u32, map.clone());
} else {
cache_misses.push((i as u32, tx_id, vout));
}
}
// Look for cache misses in database. We don't need to `flush` the DB cache here because we've already looked in the current
// block's output cache.
if cache_misses.len() > 0 {
let output_balances = pg_get_input_rune_balances(cache_misses, db_tx, ctx).await;
indexed_input_runes.extend(output_balances);
}
let mut final_input_runes: HashMap<RuneId, VecDeque<InputRuneBalance>> = HashMap::new();
let mut input_keys: Vec<u32> = indexed_input_runes.keys().copied().collect();
input_keys.sort();
for key in input_keys.iter() {
let input_value = indexed_input_runes.get(key).unwrap();
for (rune_id, vec) in input_value.iter() {
if let Some(rune) = final_input_runes.get_mut(rune_id) {
rune.extend(vec.clone());
} else {
final_input_runes.insert(*rune_id, VecDeque::from(vec.clone()));
}
}
}
final_input_runes
}
/// Moves data from the current block's output cache to the long-term LRU output cache. Clears the block output cache when done.
///
/// # Arguments
///
/// * `block_output_cache` - Block output cache
/// * `output_cache` - Output LRU cache
pub fn move_block_output_cache_to_output_cache(
block_output_cache: &mut HashMap<(String, u32), HashMap<RuneId, Vec<InputRuneBalance>>>,
output_cache: &mut LruCache<(String, u32), HashMap<RuneId, Vec<InputRuneBalance>>>,
) {
for (k, block_output_map) in block_output_cache.iter() {
if let Some(v) = output_cache.get_mut(&k) {
for (rune_id, balances) in block_output_map.iter() {
if let Some(rune_balance) = v.get_mut(&rune_id) {
rune_balance.extend(balances.clone());
} else {
v.insert(*rune_id, balances.clone());
}
}
} else {
output_cache.push(k.clone(), block_output_map.clone());
}
}
block_output_cache.clear();
}
/// Creates a new ledger entry while incrementing the `next_event_index`.
pub fn new_sequential_ledger_entry(
location: &TransactionLocation,
amount: Option<u128>,
rune_id: RuneId,
output: Option<u32>,
address: Option<&String>,
receiver_address: Option<&String>,
operation: DbLedgerOperation,
next_event_index: &mut u32,
) -> DbLedgerEntry {
let entry = DbLedgerEntry::from_values(
amount,
rune_id,
&location.block_hash,
location.block_height,
location.tx_index,
*next_event_index,
&location.tx_id,
output,
address,
receiver_address,
operation,
location.timestamp,
);
*next_event_index += 1;
entry
}
/// Moves rune balance from transaction inputs into a transaction output.
///
/// # Arguments
///
/// * `location` - Transaction location.
/// * `output` - Output where runes will be moved to. If `None`, runes are burned.
/// * `rune_id` - Rune that is being moved.
/// * `input_balances` - Balances input to this transaction for this rune. This value will be modified by the moves happening in
/// this function.
/// * `outputs` - Transaction outputs eligible to receive runes.
/// * `amount` - Amount of balance to move. If value is zero, all inputs will be moved to the output.
/// * `next_event_index` - Next sequential event index to create. This value will be modified.
/// * `ctx` - Context.
pub fn move_rune_balance_to_output(
location: &TransactionLocation,
output: Option<u32>,
rune_id: &RuneId,
input_balances: &mut VecDeque<InputRuneBalance>,
outputs: &HashMap<u32, ScriptBuf>,
amount: u128,
next_event_index: &mut u32,
ctx: &Context,
) -> Vec<DbLedgerEntry> {
let mut results = vec![];
// Who is this balance going to?
let receiver_address = if let Some(output) = output {
match outputs.get(&output) {
Some(script) => match Address::from_script(script, location.network) {
Ok(address) => Some(address.to_string()),
Err(e) => {
try_warn!(
ctx,
"Unable to decode address for output {}, {} {}",
output,
e,
location
);
None
}
},
None => {
try_info!(
ctx,
"Attempted move to non-eligible output {}, runes will be burnt {}",
output,
location
);
None
}
}
} else {
None
};
let operation = if receiver_address.is_some() {
DbLedgerOperation::Send
} else {
DbLedgerOperation::Burn
};
// Gather balance to be received by taking it from the available inputs until the amount to move is satisfied.
let mut total_sent = 0;
let mut senders = vec![];
loop {
// Do we still have input balance left to move?
let Some(input_bal) = input_balances.pop_front() else {
break;
};
// Select the correct move amount.
let balance_taken = if amount == 0 {
input_bal.amount
} else {
input_bal.amount.min(amount - total_sent)
};
total_sent += balance_taken;
// If the input balance came from an address, add to `Send` operations.
if let Some(sender_address) = input_bal.address.clone() {
senders.push((balance_taken, sender_address));
}
// Is there still some balance left on this input? If so, keep it for later but break the loop because we've satisfied the
// move amount.
if balance_taken < input_bal.amount {
input_balances.push_front(InputRuneBalance {
address: input_bal.address,
amount: input_bal.amount - balance_taken,
});
break;
}
// Have we finished moving balance?
if total_sent == amount {
break;
}
}
// Add the "receive" entry, if applicable.
if receiver_address.is_some() && total_sent > 0 {
results.push(new_sequential_ledger_entry(
location,
Some(total_sent),
*rune_id,
output,
receiver_address.as_ref(),
None,
DbLedgerOperation::Receive,
next_event_index,
));
try_info!(
ctx,
"{} {} ({}) {} {}",
DbLedgerOperation::Receive,
rune_id,
total_sent,
receiver_address.as_ref().unwrap(),
location
);
}
// Add the "send"/"burn" entries.
for (balance_taken, sender_address) in senders.iter() {
results.push(new_sequential_ledger_entry(
location,
Some(*balance_taken),
*rune_id,
output,
Some(sender_address),
receiver_address.as_ref(),
operation.clone(),
next_event_index,
));
try_info!(
ctx,
"{} {} ({}) {} -> {:?} {}",
operation,
rune_id,
balance_taken,
sender_address,
receiver_address,
location
);
}
results
}
/// Determines if a mint is valid depending on the rune's mint terms.
pub fn is_rune_mintable(
db_rune: &DbRune,
total_mints: u128,
location: &TransactionLocation,
) -> bool {
if db_rune.cenotaph {
return false;
}
if db_rune.terms_amount.is_none() {
return false;
}
if let Some(terms_cap) = db_rune.terms_cap {
if total_mints >= terms_cap.0 {
return false;
}
}
if let Some(terms_height_start) = db_rune.terms_height_start {
if location.block_height < terms_height_start.0 {
return false;
}
}
if let Some(terms_height_end) = db_rune.terms_height_end {
if location.block_height > terms_height_end.0 {
return false;
}
}
if let Some(terms_offset_start) = db_rune.terms_offset_start {
if location.block_height < db_rune.block_height.0 + terms_offset_start.0 {
return false;
}
}
if let Some(terms_offset_end) = db_rune.terms_offset_end {
if location.block_height > db_rune.block_height.0 + terms_offset_end.0 {
return false;
}
}
true
}
#[cfg(test)]
mod test {
mod move_balance {
use std::collections::{HashMap, VecDeque};
use bitcoin::ScriptBuf;
use chainhook_sdk::utils::Context;
use maplit::hashmap;
use ordinals::RuneId;
use crate::db::{
cache::{
input_rune_balance::InputRuneBalance, transaction_location::TransactionLocation,
utils::move_rune_balance_to_output,
},
models::db_ledger_operation::DbLedgerOperation,
};
fn dummy_eligible_output() -> HashMap<u32, ScriptBuf> {
hashmap! {
0u32 => ScriptBuf::from_hex(
"5120388dfba1b0069bbb0ad5eef62c1a94c46e91a3454accf40bf34b80f75e2708db",
)
.unwrap()
}
}
#[test]
fn ledger_writes_receive_before_send() {
let address =
Some("bc1p8zxlhgdsq6dmkzk4ammzcx55c3hfrg69ftx0gzlnfwq0wh38prds0nzqwf".to_string());
let mut available_inputs = VecDeque::new();
let mut input1 = InputRuneBalance::dummy();
input1.address(address.clone()).amount(1000);
available_inputs.push_back(input1);
let mut input2 = InputRuneBalance::dummy();
input2.address(None).amount(1000);
available_inputs.push_back(input2);
let eligible_outputs = dummy_eligible_output();
let mut next_event_index = 0;
let results = move_rune_balance_to_output(
&TransactionLocation::dummy(),
Some(0),
&RuneId::new(840000, 25).unwrap(),
&mut available_inputs,
&eligible_outputs,
0,
&mut next_event_index,
&Context::empty(),
);
let receive = results.get(0).unwrap();
assert_eq!(receive.event_index.0, 0u32);
assert_eq!(receive.operation, DbLedgerOperation::Receive);
assert_eq!(receive.amount.unwrap().0, 2000u128);
let send = results.get(1).unwrap();
assert_eq!(send.event_index.0, 1u32);
assert_eq!(send.operation, DbLedgerOperation::Send);
assert_eq!(send.amount.unwrap().0, 1000u128);
assert_eq!(results.len(), 2);
assert_eq!(available_inputs.len(), 0);
}
#[test]
fn move_to_empty_output_is_burned() {
let address =
Some("bc1p8zxlhgdsq6dmkzk4ammzcx55c3hfrg69ftx0gzlnfwq0wh38prds0nzqwf".to_string());
let mut available_inputs = VecDeque::new();
let mut input1 = InputRuneBalance::dummy();
input1.address(address.clone()).amount(1000);
available_inputs.push_back(input1);
let results = move_rune_balance_to_output(
&TransactionLocation::dummy(),
None, // Burn
&RuneId::new(840000, 25).unwrap(),
&mut available_inputs,
&HashMap::new(),
0,
&mut 0,
&Context::empty(),
);
assert_eq!(results.len(), 1);
let entry1 = results.get(0).unwrap();
assert_eq!(entry1.operation, DbLedgerOperation::Burn);
assert_eq!(entry1.address, address);
assert_eq!(entry1.amount.unwrap().0, 1000);
assert_eq!(available_inputs.len(), 0);
}
#[test]
fn moves_partial_input_balance() {
let mut available_inputs = VecDeque::new();
let mut input1 = InputRuneBalance::dummy();
input1.amount(5000); // More than required in this move.
available_inputs.push_back(input1);
let eligible_outputs = dummy_eligible_output();
let results = move_rune_balance_to_output(
&TransactionLocation::dummy(),
Some(0),
&RuneId::new(840000, 25).unwrap(),
&mut available_inputs,
&eligible_outputs,
1000, // Less than total available in first input.
&mut 0,
&Context::empty(),
);
assert_eq!(results.len(), 2);
let entry1 = results.get(0).unwrap();
assert_eq!(entry1.operation, DbLedgerOperation::Receive);
assert_eq!(entry1.amount.unwrap().0, 1000);
let entry2 = results.get(1).unwrap();
assert_eq!(entry2.operation, DbLedgerOperation::Send);
assert_eq!(entry2.amount.unwrap().0, 1000);
// Remainder is still in available inputs.
let remaining = available_inputs.get(0).unwrap();
assert_eq!(remaining.amount, 4000);
}
#[test]
fn moves_insufficient_input_balance() {
let mut available_inputs = VecDeque::new();
let mut input1 = InputRuneBalance::dummy();
input1.amount(1000); // Insufficient.
available_inputs.push_back(input1);
let eligible_outputs = dummy_eligible_output();
let results = move_rune_balance_to_output(
&TransactionLocation::dummy(),
Some(0),
&RuneId::new(840000, 25).unwrap(),
&mut available_inputs,
&eligible_outputs,
3000, // More than total available in input.
&mut 0,
&Context::empty(),
);
assert_eq!(results.len(), 2);
let entry1 = results.get(0).unwrap();
assert_eq!(entry1.operation, DbLedgerOperation::Receive);
assert_eq!(entry1.amount.unwrap().0, 1000);
let entry2 = results.get(1).unwrap();
assert_eq!(entry2.operation, DbLedgerOperation::Send);
assert_eq!(entry2.amount.unwrap().0, 1000);
assert_eq!(available_inputs.len(), 0);
}
#[test]
fn moves_all_remaining_balance() {
let mut available_inputs = VecDeque::new();
let mut input1 = InputRuneBalance::dummy();
input1.amount(6000);
available_inputs.push_back(input1);
let mut input2 = InputRuneBalance::dummy();
input2.amount(2000);
available_inputs.push_back(input2);
let mut input3 = InputRuneBalance::dummy();
input3.amount(2000);
available_inputs.push_back(input3);
let eligible_outputs = dummy_eligible_output();
let results = move_rune_balance_to_output(
&TransactionLocation::dummy(),
Some(0),
&RuneId::new(840000, 25).unwrap(),
&mut available_inputs,
&eligible_outputs,
0, // Move all.
&mut 0,
&Context::empty(),
);
assert_eq!(results.len(), 4);
let entry1 = results.get(0).unwrap();
assert_eq!(entry1.operation, DbLedgerOperation::Receive);
assert_eq!(entry1.amount.unwrap().0, 10000);
let entry2 = results.get(1).unwrap();
assert_eq!(entry2.operation, DbLedgerOperation::Send);
assert_eq!(entry2.amount.unwrap().0, 6000);
let entry3 = results.get(2).unwrap();
assert_eq!(entry3.operation, DbLedgerOperation::Send);
assert_eq!(entry3.amount.unwrap().0, 2000);
let entry4 = results.get(3).unwrap();
assert_eq!(entry4.operation, DbLedgerOperation::Send);
assert_eq!(entry4.amount.unwrap().0, 2000);
assert_eq!(available_inputs.len(), 0);
}
#[test]
fn move_to_output_with_address_failure_is_burned() {
let mut available_inputs = VecDeque::new();
let mut input1 = InputRuneBalance::dummy();
input1.amount(1000);
available_inputs.push_back(input1);
let mut eligible_outputs = HashMap::new();
// Broken script buf that yields no address.
eligible_outputs.insert(0u32, ScriptBuf::from_hex("0101010101").unwrap());
let results = move_rune_balance_to_output(
&TransactionLocation::dummy(),
Some(0),
&RuneId::new(840000, 25).unwrap(),
&mut available_inputs,
&eligible_outputs,
1000,
&mut 0,
&Context::empty(),
);
assert_eq!(results.len(), 1);
let entry1 = results.get(0).unwrap();
assert_eq!(entry1.operation, DbLedgerOperation::Burn);
assert_eq!(entry1.amount.unwrap().0, 1000);
assert_eq!(available_inputs.len(), 0);
}
#[test]
fn move_to_nonexistent_output_is_burned() {
let mut available_inputs = VecDeque::new();
let mut input1 = InputRuneBalance::dummy();
input1.amount(1000);
available_inputs.push_back(input1);
let eligible_outputs = dummy_eligible_output();
let results = move_rune_balance_to_output(
&TransactionLocation::dummy(),
Some(5), // Output does not exist.
&RuneId::new(840000, 25).unwrap(),
&mut available_inputs,
&eligible_outputs,
1000,
&mut 0,
&Context::empty(),
);
assert_eq!(results.len(), 1);
let entry1 = results.get(0).unwrap();
assert_eq!(entry1.operation, DbLedgerOperation::Burn);
assert_eq!(entry1.amount.unwrap().0, 1000);
assert_eq!(available_inputs.len(), 0);
}
#[test]
fn send_not_generated_on_minted_balance() {
let mut available_inputs = VecDeque::new();
let mut input1 = InputRuneBalance::dummy();
input1.amount(1000).address(None); // No address because it's a mint.
available_inputs.push_back(input1);
let eligible_outputs = dummy_eligible_output();
let results = move_rune_balance_to_output(
&TransactionLocation::dummy(),
Some(0),
&RuneId::new(840000, 25).unwrap(),
&mut available_inputs,
&eligible_outputs,
1000,
&mut 0,
&Context::empty(),
);
assert_eq!(results.len(), 1);
let entry1 = results.get(0).unwrap();
assert_eq!(entry1.operation, DbLedgerOperation::Receive);
assert_eq!(entry1.amount.unwrap().0, 1000);
assert_eq!(available_inputs.len(), 0);
}
}
mod mint_validation {
use chainhook_postgres::types::{PgNumericU128, PgNumericU64};
use test_case::test_case;
use crate::db::{
cache::{transaction_location::TransactionLocation, utils::is_rune_mintable},
models::db_rune::DbRune,
};
#[test_case(840000 => false; "early block")]
#[test_case(840500 => false; "late block")]
#[test_case(840150 => true; "block in window")]
#[test_case(840100 => true; "first block")]
#[test_case(840200 => true; "last block")]
fn mint_block_height_terms_are_validated(block_height: u64) -> bool {
let mut rune = DbRune::factory();
rune.terms_height_start(Some(PgNumericU64(840100)));
rune.terms_height_end(Some(PgNumericU64(840200)));
let mut location = TransactionLocation::dummy();
location.block_height(block_height);
is_rune_mintable(&rune, 0, &location)
}
#[test_case(840000 => false; "early block")]
#[test_case(840500 => false; "late block")]
#[test_case(840150 => true; "block in window")]
#[test_case(840100 => true; "first block")]
#[test_case(840200 => true; "last block")]
fn mint_block_offset_terms_are_validated(block_height: u64) -> bool {
let mut rune = DbRune::factory();
rune.terms_offset_start(Some(PgNumericU64(100)));
rune.terms_offset_end(Some(PgNumericU64(200)));
let mut location = TransactionLocation::dummy();
location.block_height(block_height);
is_rune_mintable(&rune, 0, &location)
}
#[test_case(0 => true; "first mint")]
#[test_case(49 => true; "last mint")]
#[test_case(50 => false; "out of range")]
fn mint_cap_is_validated(cap: u128) -> bool {
let mut rune = DbRune::factory();
rune.terms_cap(Some(PgNumericU128(50)));
is_rune_mintable(&rune, cap, &TransactionLocation::dummy())
}
}
mod sequential_ledger_entry {
use ordinals::RuneId;
use crate::db::{
cache::{
transaction_location::TransactionLocation, utils::new_sequential_ledger_entry,
},
models::db_ledger_operation::DbLedgerOperation,
};
#[test]
fn increments_event_index() {
let location = TransactionLocation::dummy();
let rune_id = RuneId::new(840000, 25).unwrap();
let address =
Some("bc1p8zxlhgdsq6dmkzk4ammzcx55c3hfrg69ftx0gzlnfwq0wh38prds0nzqwf".to_string());
let mut event_index = 0u32;
let event0 = new_sequential_ledger_entry(
&location,
Some(100),
rune_id,
Some(0),
address.as_ref(),
None,
DbLedgerOperation::Receive,
&mut event_index,
);
assert_eq!(event0.event_index.0, 0);
assert_eq!(event0.amount.unwrap().0, 100);
assert_eq!(event0.address, address);
let event1 = new_sequential_ledger_entry(
&location,
Some(300),
rune_id,
Some(0),
None,
None,
DbLedgerOperation::Receive,
&mut event_index,
);
assert_eq!(event1.event_index.0, 1);
assert_eq!(event1.amount.unwrap().0, 300);
assert_eq!(event1.address, None);
assert_eq!(event_index, 2);
}
}
mod input_balances {
use std::num::NonZeroUsize;
use chainhook_sdk::utils::Context;
use chainhook_types::{bitcoin::{OutPoint, TxIn}, TransactionIdentifier};
use lru::LruCache;
use maplit::hashmap;
use ordinals::RuneId;
use crate::db::{
cache::{
input_rune_balance::InputRuneBalance, utils::input_rune_balances_from_tx_inputs,
},
models::{db_ledger_entry::DbLedgerEntry, db_ledger_operation::DbLedgerOperation},
pg_insert_ledger_entries, pg_test_client, pg_test_roll_back_migrations,
};
#[tokio::test]
async fn from_block_cache() {
let inputs = vec![TxIn {
previous_output: OutPoint {
txid: TransactionIdentifier {
hash: "0x045fe33f1174d6a72084e751735a89746a259c6d3e418b65c03ec0740f924c7b"
.to_string(),
},
vout: 1,
value: 100,
block_height: 840000,
},
script_sig: "".to_string(),
sequence: 0,
witness: vec![],
}];
let rune_id = RuneId::new(840000, 25).unwrap();
let block_output_cache = hashmap! {
("045fe33f1174d6a72084e751735a89746a259c6d3e418b65c03ec0740f924c7b"
.to_string(), 1) => hashmap! {
rune_id => vec![InputRuneBalance { address: None, amount: 2000 }]
}
};
let mut output_cache = LruCache::new(NonZeroUsize::new(1).unwrap());
let ctx = Context::empty();
let mut pg_client = pg_test_client(true, &ctx).await;
let mut db_tx = pg_client.transaction().await.unwrap();
let results = input_rune_balances_from_tx_inputs(
&inputs,
&block_output_cache,
&mut output_cache,
&mut db_tx,
&ctx,
)
.await;
let _ = db_tx.rollback().await;
pg_test_roll_back_migrations(&mut pg_client, &ctx).await;
assert_eq!(results.len(), 1);
let rune_results = results.get(&rune_id).unwrap();
assert_eq!(rune_results.len(), 1);
let input_bal = rune_results.get(0).unwrap();
assert_eq!(input_bal.address, None);
assert_eq!(input_bal.amount, 2000);
}
#[tokio::test]
async fn from_lru_cache() {
let inputs = vec![TxIn {
previous_output: OutPoint {
txid: TransactionIdentifier {
hash: "0x045fe33f1174d6a72084e751735a89746a259c6d3e418b65c03ec0740f924c7b"
.to_string(),
},
vout: 1,
value: 100,
block_height: 840000,
},
script_sig: "".to_string(),
sequence: 0,
witness: vec![],
}];
let rune_id = RuneId::new(840000, 25).unwrap();
let block_output_cache = hashmap! {};
let mut output_cache = LruCache::new(NonZeroUsize::new(1).unwrap());
output_cache.put(
(
"045fe33f1174d6a72084e751735a89746a259c6d3e418b65c03ec0740f924c7b".to_string(),
1,
),
hashmap! {
rune_id => vec![InputRuneBalance { address: None, amount: 2000 }]
},
);
let ctx = Context::empty();
let mut pg_client = pg_test_client(true, &ctx).await;
let mut db_tx = pg_client.transaction().await.unwrap();
let results = input_rune_balances_from_tx_inputs(
&inputs,
&block_output_cache,
&mut output_cache,
&mut db_tx,
&ctx,
)
.await;
let _ = db_tx.rollback().await;
pg_test_roll_back_migrations(&mut pg_client, &ctx).await;
assert_eq!(results.len(), 1);
let rune_results = results.get(&rune_id).unwrap();
assert_eq!(rune_results.len(), 1);
let input_bal = rune_results.get(0).unwrap();
assert_eq!(input_bal.address, None);
assert_eq!(input_bal.amount, 2000);
}
#[tokio::test]
async fn from_db() {
let inputs = vec![TxIn {
previous_output: OutPoint {
txid: TransactionIdentifier {
hash: "0x045fe33f1174d6a72084e751735a89746a259c6d3e418b65c03ec0740f924c7b"
.to_string(),
},
vout: 1,
value: 100,
block_height: 840000,
},
script_sig: "".to_string(),
sequence: 0,
witness: vec![],
}];
let rune_id = RuneId::new(840000, 25).unwrap();
let block_output_cache = hashmap! {};
let mut output_cache = LruCache::new(NonZeroUsize::new(1).unwrap());
let ctx = Context::empty();
let mut pg_client = pg_test_client(true, &ctx).await;
let mut db_tx = pg_client.transaction().await.unwrap();
let entry = DbLedgerEntry::from_values(
Some(2000),
rune_id,
&"0x0000000000000000000044642cc1f64c22579d46a2a149ef2a51f9c98cb622e1".to_string(),
840000,
0,
0,
&"0x045fe33f1174d6a72084e751735a89746a259c6d3e418b65c03ec0740f924c7b".to_string(),
Some(1),
None,
None,
DbLedgerOperation::Receive,
0,
);
let _ = pg_insert_ledger_entries(&vec![entry], &mut db_tx, &ctx).await;
let results = input_rune_balances_from_tx_inputs(
&inputs,
&block_output_cache,
&mut output_cache,
&mut db_tx,
&ctx,
)
.await;
let _ = db_tx.rollback().await;
pg_test_roll_back_migrations(&mut pg_client, &ctx).await;
assert_eq!(results.len(), 1);
let rune_results = results.get(&rune_id).unwrap();
assert_eq!(rune_results.len(), 1);
let input_bal = rune_results.get(0).unwrap();
assert_eq!(input_bal.address, None);
assert_eq!(input_bal.amount, 2000);
}
#[tokio::test]
async fn inputs_without_balances() {
let inputs = vec![TxIn {
previous_output: OutPoint {
txid: TransactionIdentifier {
hash: "0x045fe33f1174d6a72084e751735a89746a259c6d3e418b65c03ec0740f924c7b"
.to_string(),
},
vout: 1,
value: 100,
block_height: 840000,
},
script_sig: "".to_string(),
sequence: 0,
witness: vec![],
}];
let block_output_cache = hashmap! {};
let mut output_cache = LruCache::new(NonZeroUsize::new(1).unwrap());
let ctx = Context::empty();
let mut pg_client = pg_test_client(true, &ctx).await;
let mut db_tx = pg_client.transaction().await.unwrap();
let results = input_rune_balances_from_tx_inputs(
&inputs,
&block_output_cache,
&mut output_cache,
&mut db_tx,
&ctx,
)
.await;
let _ = db_tx.rollback().await;
pg_test_roll_back_migrations(&mut pg_client, &ctx).await;
assert_eq!(results.len(), 0);
}
}
mod cache_move {
use std::num::NonZeroUsize;
use lru::LruCache;
use maplit::hashmap;
use ordinals::RuneId;
use crate::db::cache::{
input_rune_balance::InputRuneBalance, utils::move_block_output_cache_to_output_cache,
};
#[test]
fn moves_to_lru_output_cache_and_clears() {
let rune_id = RuneId::new(840000, 25).unwrap();
let mut block_output_cache = hashmap! {
("045fe33f1174d6a72084e751735a89746a259c6d3e418b65c03ec0740f924c7b"
.to_string(), 1) => hashmap! {
rune_id => vec![InputRuneBalance { address: None, amount: 2000 }]
}
};
let mut output_cache = LruCache::new(NonZeroUsize::new(1).unwrap());
move_block_output_cache_to_output_cache(&mut block_output_cache, &mut output_cache);
let moved_val = output_cache
.get(&(
"045fe33f1174d6a72084e751735a89746a259c6d3e418b65c03ec0740f924c7b".to_string(),
1,
))
.unwrap();
assert_eq!(moved_val.len(), 1);
let balances = moved_val.get(&rune_id).unwrap();
assert_eq!(balances.len(), 1);
let balance = balances.get(0).unwrap();
assert_eq!(balance.address, None);
assert_eq!(balance.amount, 2000);
assert_eq!(block_output_cache.len(), 0);
}
}
}

View File

@@ -0,0 +1,176 @@
use std::collections::HashMap;
use bitcoin::absolute::LockTime;
use bitcoin::transaction::TxOut;
use bitcoin::transaction::Version;
use bitcoin::Amount;
use bitcoin::Network;
use bitcoin::ScriptBuf;
use bitcoin::Transaction;
use chainhook_sdk::utils::Context;
use chainhook_types::BitcoinBlockData;
use chainhook_types::BitcoinTransactionData;
use ordinals::Artifact;
use ordinals::Runestone;
use tokio_postgres::Client;
use crate::db::cache::transaction_location::TransactionLocation;
use crate::db::pg_roll_back_block;
use crate::try_info;
use super::cache::index_cache::IndexCache;
pub fn get_rune_genesis_block_height(network: Network) -> u64 {
match network {
Network::Bitcoin => 840_000,
Network::Testnet => todo!(),
Network::Signet => todo!(),
Network::Regtest => todo!(),
_ => todo!(),
}
}
/// Transforms a Bitcoin transaction from a Chainhook format to a rust bitcoin crate format so it can be parsed by the ord crate
/// to look for `Artifact`s. Also, takes all non-OP_RETURN outputs and returns them so they can be used later to receive runes.
fn bitcoin_tx_from_chainhook_tx(
block: &BitcoinBlockData,
tx: &BitcoinTransactionData,
) -> (Transaction, HashMap<u32, ScriptBuf>, Option<u32>, u32) {
let mut outputs = vec![];
let mut eligible_outputs = HashMap::new();
let mut first_eligible_output: Option<u32> = None;
for (i, output) in tx.metadata.outputs.iter().enumerate() {
let script = ScriptBuf::from_bytes(output.get_script_pubkey_bytes());
if !script.is_op_return() {
eligible_outputs.insert(i as u32, script.clone());
if first_eligible_output.is_none() {
first_eligible_output = Some(i as u32);
}
}
outputs.push(TxOut {
value: Amount::from_sat(output.value),
script_pubkey: script,
});
}
(
Transaction {
version: Version::TWO,
lock_time: LockTime::from_time(block.timestamp).unwrap(),
// Inputs don't matter for Runestone parsing.
input: vec![],
output: outputs,
},
eligible_outputs,
first_eligible_output,
tx.metadata.outputs.len() as u32,
)
}
/// Index a Bitcoin block for runes data.
pub async fn index_block(
pg_client: &mut Client,
index_cache: &mut IndexCache,
block: &mut BitcoinBlockData,
ctx: &Context,
) {
let stopwatch = std::time::Instant::now();
let block_hash = &block.block_identifier.hash;
let block_height = block.block_identifier.index;
try_info!(ctx, "Indexing block {}...", block_height);
let mut db_tx = pg_client
.transaction()
.await
.expect("Unable to begin block processing pg transaction");
index_cache.reset_max_rune_number(&mut db_tx, ctx).await;
for tx in block.transactions.iter() {
let (transaction, eligible_outputs, first_eligible_output, total_outputs) =
bitcoin_tx_from_chainhook_tx(block, tx);
let tx_index = tx.metadata.index;
let tx_id = &tx.transaction_identifier.hash;
let location = TransactionLocation {
network: index_cache.network,
block_hash: block_hash.clone(),
block_height,
tx_index,
tx_id: tx_id.clone(),
timestamp: block.timestamp,
};
index_cache
.begin_transaction(
location,
&tx.metadata.inputs,
eligible_outputs,
first_eligible_output,
total_outputs,
&mut db_tx,
ctx,
)
.await;
if let Some(artifact) = Runestone::decipher(&transaction) {
match artifact {
Artifact::Runestone(runestone) => {
index_cache
.apply_runestone(&runestone, &mut db_tx, ctx)
.await;
if let Some(etching) = runestone.etching {
index_cache.apply_etching(&etching, &mut db_tx, ctx).await;
}
if let Some(mint_rune_id) = runestone.mint {
index_cache.apply_mint(&mint_rune_id, &mut db_tx, ctx).await;
}
for edict in runestone.edicts.iter() {
index_cache.apply_edict(edict, &mut db_tx, ctx).await;
}
}
Artifact::Cenotaph(cenotaph) => {
index_cache.apply_cenotaph(&cenotaph, &mut db_tx, ctx).await;
if let Some(etching) = cenotaph.etching {
index_cache
.apply_cenotaph_etching(&etching, &mut db_tx, ctx)
.await;
}
if let Some(mint_rune_id) = cenotaph.mint {
index_cache
.apply_cenotaph_mint(&mint_rune_id, &mut db_tx, ctx)
.await;
}
}
}
}
index_cache.end_transaction(&mut db_tx, ctx);
}
index_cache.end_block();
index_cache.db_cache.flush(&mut db_tx, ctx).await;
db_tx
.commit()
.await
.expect("Unable to commit pg transaction");
try_info!(
ctx,
"Block {} indexed in {}s",
block_height,
stopwatch.elapsed().as_millis() as f32 / 1000.0
);
}
/// Roll back a Bitcoin block because of a re-org.
pub async fn roll_back_block(pg_client: &mut Client, block_height: u64, ctx: &Context) {
let stopwatch = std::time::Instant::now();
try_info!(ctx, "Rolling back block {}...", block_height);
let mut db_tx = pg_client
.transaction()
.await
.expect("Unable to begin block roll back pg transaction");
pg_roll_back_block(block_height, &mut db_tx, ctx).await;
db_tx
.commit()
.await
.expect("Unable to commit pg transaction");
try_info!(
ctx,
"Block {} rolled back in {}s",
block_height,
stopwatch.elapsed().as_millis() as f32 / 1000.0
);
}

View File

@@ -0,0 +1,571 @@
use std::{collections::HashMap, process, str::FromStr};
use cache::input_rune_balance::InputRuneBalance;
use chainhook_postgres::types::{PgBigIntU32, PgNumericU128, PgNumericU64};
use chainhook_sdk::utils::Context;
use config::Config;
use models::{
db_balance_change::DbBalanceChange, db_ledger_entry::DbLedgerEntry, db_rune::DbRune,
db_supply_change::DbSupplyChange,
};
use ordinals::RuneId;
use refinery::embed_migrations;
use tokio_postgres::{types::ToSql, Client, Error, GenericClient, NoTls, Transaction};
use crate::{try_error, try_info};
pub mod cache;
pub mod index;
pub mod models;
embed_migrations!("../../migrations/runes");
async fn pg_run_migrations(pg_client: &mut Client, ctx: &Context) {
try_info!(ctx, "Running postgres migrations");
match migrations::runner()
.set_migration_table_name("pgmigrations")
.run_async(pg_client)
.await
{
Ok(_) => {}
Err(e) => {
try_error!(ctx, "Error running pg migrations: {}", e.to_string());
process::exit(1);
}
};
try_info!(ctx, "Postgres migrations complete");
}
pub async fn pg_connect(config: &Config, run_migrations: bool, ctx: &Context) -> Client {
let db_config = &config.runes.as_ref().unwrap().db;
let mut pg_config = tokio_postgres::Config::new();
pg_config
.dbname(&db_config.dbname)
.host(&db_config.host)
.port(db_config.port)
.user(&db_config.user);
if let Some(password) = db_config.password.as_ref() {
pg_config.password(password);
}
try_info!(
ctx,
"Connecting to postgres at {}:{}",
db_config.host,
db_config.port
);
let mut pg_client: Client;
loop {
match pg_config.connect(NoTls).await {
Ok((client, connection)) => {
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("Postgres connection error: {}", e.to_string());
process::exit(1);
}
});
pg_client = client;
break;
}
Err(e) => {
try_error!(ctx, "Error connecting to postgres: {}", e.to_string());
std::thread::sleep(std::time::Duration::from_secs(1));
}
}
}
if run_migrations {
pg_run_migrations(&mut pg_client, ctx).await;
}
pg_client
}
pub async fn pg_insert_runes(
rows: &Vec<DbRune>,
db_tx: &mut Transaction<'_>,
ctx: &Context,
) -> Result<bool, Error> {
for chunk in rows.chunks(500) {
let mut arg_num = 1;
let mut arg_str = String::new();
let mut params: Vec<&(dyn ToSql + Sync)> = vec![];
for row in chunk.iter() {
arg_str.push_str("(");
for i in 0..19 {
arg_str.push_str(format!("${},", arg_num + i).as_str());
}
arg_str.pop();
arg_str.push_str("),");
arg_num += 19;
params.push(&row.id);
params.push(&row.number);
params.push(&row.name);
params.push(&row.spaced_name);
params.push(&row.block_hash);
params.push(&row.block_height);
params.push(&row.tx_index);
params.push(&row.tx_id);
params.push(&row.divisibility);
params.push(&row.premine);
params.push(&row.symbol);
params.push(&row.terms_amount);
params.push(&row.terms_cap);
params.push(&row.terms_height_start);
params.push(&row.terms_height_end);
params.push(&row.terms_offset_start);
params.push(&row.terms_offset_end);
params.push(&row.turbo);
params.push(&row.timestamp);
}
arg_str.pop();
match db_tx
.query(
&format!("INSERT INTO runes
(id, number, name, spaced_name, block_hash, block_height, tx_index, tx_id, divisibility, premine, symbol,
terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo,
timestamp) VALUES {}
ON CONFLICT (name) DO NOTHING", arg_str),
&params,
)
.await
{
Ok(_) => {}
Err(e) => {
try_error!(ctx, "Error inserting runes: {:?}", e);
process::exit(1);
}
};
}
Ok(true)
}
pub async fn pg_insert_supply_changes(
rows: &Vec<DbSupplyChange>,
db_tx: &mut Transaction<'_>,
ctx: &Context,
) -> Result<bool, Error> {
for chunk in rows.chunks(500) {
let mut arg_num = 1;
let mut arg_str = String::new();
let mut params: Vec<&(dyn ToSql + Sync)> = vec![];
for row in chunk.iter() {
arg_str.push_str(
format!(
"(${},${}::numeric,${}::numeric,${}::numeric,${}::numeric,${}::numeric,${}::numeric),",
arg_num,
arg_num + 1,
arg_num + 2,
arg_num + 3,
arg_num + 4,
arg_num + 5,
arg_num + 6
)
.as_str(),
);
arg_num += 7;
params.push(&row.rune_id);
params.push(&row.block_height);
params.push(&row.minted);
params.push(&row.total_mints);
params.push(&row.burned);
params.push(&row.total_burns);
params.push(&row.total_operations);
}
arg_str.pop();
match db_tx
.query(
&format!("
WITH changes (rune_id, block_height, minted, total_mints, burned, total_burns, total_operations) AS (VALUES {}),
previous AS (
SELECT DISTINCT ON (rune_id) *
FROM supply_changes
WHERE rune_id IN (SELECT rune_id FROM changes)
ORDER BY rune_id, block_height DESC
),
inserts AS (
SELECT c.rune_id,
c.block_height,
COALESCE(p.minted, 0) + c.minted AS minted,
COALESCE(p.total_mints, 0) + c.total_mints AS total_mints,
COALESCE(p.burned, 0) + c.burned AS burned,
COALESCE(p.total_burns, 0) + c.total_burns AS total_burns,
COALESCE(p.total_operations, 0) + c.total_operations AS total_operations
FROM changes AS c
LEFT JOIN previous AS p ON c.rune_id = p.rune_id
)
INSERT INTO supply_changes (rune_id, block_height, minted, total_mints, burned, total_burns, total_operations)
(SELECT * FROM inserts)
ON CONFLICT (rune_id, block_height) DO UPDATE SET
minted = EXCLUDED.minted,
total_mints = EXCLUDED.total_mints,
burned = EXCLUDED.burned,
total_burns = EXCLUDED.total_burns,
total_operations = EXCLUDED.total_operations
", arg_str),
&params,
)
.await
{
Ok(_) => {}
Err(e) => {
try_error!(ctx, "Error inserting supply changes: {:?}", e);
process::exit(1);
}
};
}
Ok(true)
}
pub async fn pg_insert_balance_changes(
rows: &Vec<DbBalanceChange>,
increase: bool,
db_tx: &mut Transaction<'_>,
ctx: &Context,
) -> Result<bool, Error> {
let sign = if increase { "+" } else { "-" };
for chunk in rows.chunks(500) {
let mut arg_num = 1;
let mut arg_str = String::new();
let mut params: Vec<&(dyn ToSql + Sync)> = vec![];
for row in chunk.iter() {
arg_str.push_str(
format!(
"(${},${}::numeric,${},${}::numeric,${}::bigint),",
arg_num,
arg_num + 1,
arg_num + 2,
arg_num + 3,
arg_num + 4
)
.as_str(),
);
arg_num += 5;
params.push(&row.rune_id);
params.push(&row.block_height);
params.push(&row.address);
params.push(&row.balance);
params.push(&row.total_operations);
}
arg_str.pop();
match db_tx
.query(
&format!("WITH changes (rune_id, block_height, address, balance, total_operations) AS (VALUES {}),
previous AS (
SELECT DISTINCT ON (rune_id, address) *
FROM balance_changes
WHERE (rune_id, address) IN (SELECT rune_id, address FROM changes)
ORDER BY rune_id, address, block_height DESC
),
inserts AS (
SELECT c.rune_id, c.block_height, c.address, COALESCE(p.balance, 0) {} c.balance AS balance,
COALESCE(p.total_operations, 0) + c.total_operations AS total_operations
FROM changes AS c
LEFT JOIN previous AS p ON c.rune_id = p.rune_id AND c.address = p.address
)
INSERT INTO balance_changes (rune_id, block_height, address, balance, total_operations)
(SELECT * FROM inserts)
ON CONFLICT (rune_id, block_height, address) DO UPDATE SET
balance = EXCLUDED.balance,
total_operations = EXCLUDED.total_operations", arg_str, sign),
&params,
)
.await
{
Ok(_) => {}
Err(e) => {
try_error!(ctx, "Error inserting balance changes: {:?}", e);
process::exit(1);
}
};
}
Ok(true)
}
pub async fn pg_insert_ledger_entries(
rows: &Vec<DbLedgerEntry>,
db_tx: &mut Transaction<'_>,
ctx: &Context,
) -> Result<bool, Error> {
for chunk in rows.chunks(500) {
let mut arg_num = 1;
let mut arg_str = String::new();
let mut params: Vec<&(dyn ToSql + Sync)> = vec![];
for row in chunk.iter() {
arg_str.push_str("(");
for i in 0..12 {
arg_str.push_str(format!("${},", arg_num + i).as_str());
}
arg_str.pop();
arg_str.push_str("),");
arg_num += 12;
params.push(&row.rune_id);
params.push(&row.block_hash);
params.push(&row.block_height);
params.push(&row.tx_index);
params.push(&row.event_index);
params.push(&row.tx_id);
params.push(&row.output);
params.push(&row.address);
params.push(&row.receiver_address);
params.push(&row.amount);
params.push(&row.operation);
params.push(&row.timestamp);
}
arg_str.pop();
match db_tx
.query(
&format!("INSERT INTO ledger
(rune_id, block_hash, block_height, tx_index, event_index, tx_id, output, address, receiver_address, amount,
operation, timestamp)
VALUES {}", arg_str),
&params,
)
.await
{
Ok(_) => {}
Err(e) => {
try_error!(ctx, "Error inserting ledger entries: {:?}", e);
process::exit(1);
}
};
}
Ok(true)
}
pub async fn pg_roll_back_block(block_height: u64, db_tx: &mut Transaction<'_>, _ctx: &Context) {
db_tx
.execute(
"DELETE FROM balance_changes WHERE block_height = $1",
&[&PgNumericU64(block_height)],
)
.await
.expect("error rolling back balance_changes");
db_tx
.execute(
"DELETE FROM supply_changes WHERE block_height = $1",
&[&PgNumericU64(block_height)],
)
.await
.expect("error rolling back supply_changes");
db_tx
.execute(
"DELETE FROM ledger WHERE block_height = $1",
&[&PgNumericU64(block_height)],
)
.await
.expect("error rolling back ledger");
db_tx
.execute(
"DELETE FROM runes WHERE block_height = $1",
&[&PgNumericU64(block_height)],
)
.await
.expect("error rolling back runes");
}
pub async fn pg_get_max_rune_number<T: GenericClient>(client: &T, _ctx: &Context) -> u32 {
let row = client
.query_opt("SELECT MAX(number) AS max FROM runes", &[])
.await
.expect("error getting max rune number");
let Some(row) = row else {
return 0;
};
let max: PgBigIntU32 = row.get("max");
max.0
}
pub async fn pg_get_block_height(client: &mut Client, _ctx: &Context) -> Option<u64> {
let row = client
.query_opt("SELECT MAX(block_height) AS max FROM ledger", &[])
.await
.expect("error getting max block height")?;
let max: Option<PgNumericU64> = row.get("max");
if let Some(max) = max {
Some(max.0)
} else {
None
}
}
pub async fn pg_get_rune_by_id(
id: &RuneId,
db_tx: &mut Transaction<'_>,
ctx: &Context,
) -> Option<DbRune> {
let row = match db_tx
.query_opt("SELECT * FROM runes WHERE id = $1", &[&id.to_string()])
.await
{
Ok(row) => row,
Err(e) => {
try_error!(ctx, "error retrieving rune: {}", e.to_string());
process::exit(1);
}
};
let Some(row) = row else {
return None;
};
Some(DbRune::from_pg_row(&row))
}
pub async fn pg_get_rune_total_mints(
id: &RuneId,
db_tx: &mut Transaction<'_>,
ctx: &Context,
) -> Option<u128> {
let row = match db_tx
.query_opt(
"SELECT total_mints FROM supply_changes WHERE rune_id = $1 ORDER BY block_height DESC LIMIT 1",
&[&id.to_string()],
)
.await
{
Ok(row) => row,
Err(e) => {
try_error!(
ctx,
"error retrieving rune minted total: {}",
e.to_string()
);
process::exit(1);
}
};
let Some(row) = row else {
return None;
};
let minted: PgNumericU128 = row.get("total_mints");
Some(minted.0)
}
/// Retrieves the rune balance for an array of transaction inputs represented by `(vin, tx_id, vout)` where `vin` is the index of
/// this transaction input, `tx_id` is the transaction ID that produced this input and `vout` is the output index of this previous
/// tx.
pub async fn pg_get_input_rune_balances(
outputs: Vec<(u32, String, u32)>,
db_tx: &mut Transaction<'_>,
ctx: &Context,
) -> HashMap<u32, HashMap<RuneId, Vec<InputRuneBalance>>> {
// Instead of preparing a statement and running it thousands of times, pull all rows with 1 query.
let mut arg_num = 1;
let mut args = String::new();
let mut data = vec![];
for (input_index, tx_id, output) in outputs.iter() {
args.push_str(
format!(
"(${}::bigint,${},${}::bigint),",
arg_num,
arg_num + 1,
arg_num + 2
)
.as_str(),
);
arg_num += 3;
data.push((PgBigIntU32(*input_index), tx_id, PgBigIntU32(*output)));
}
args.pop();
let mut params: Vec<&(dyn ToSql + Sync)> = vec![];
for d in data.iter() {
params.push(&d.0);
params.push(d.1);
params.push(&d.2);
}
let rows = match db_tx
.query(
format!(
"WITH inputs (index, tx_id, output) AS (VALUES {})
SELECT i.index, l.rune_id, l.address, l.amount
FROM ledger AS l
INNER JOIN inputs AS i USING (tx_id, output)
WHERE l.operation = 'receive'",
args
)
.as_str(),
&params,
)
.await
{
Ok(rows) => rows,
Err(e) => {
try_error!(
ctx,
"error retrieving output rune balances: {}",
e.to_string()
);
process::exit(1);
}
};
let mut results: HashMap<u32, HashMap<RuneId, Vec<InputRuneBalance>>> = HashMap::new();
for row in rows.iter() {
let key: PgBigIntU32 = row.get("index");
let rune_str: String = row.get("rune_id");
let rune_id = RuneId::from_str(rune_str.as_str()).unwrap();
let address: Option<String> = row.get("address");
let amount: PgNumericU128 = row.get("amount");
let input_bal = InputRuneBalance {
address,
amount: amount.0,
};
if let Some(input) = results.get_mut(&key.0) {
if let Some(rune_bal) = input.get_mut(&rune_id) {
rune_bal.push(input_bal);
} else {
input.insert(rune_id, vec![input_bal]);
}
} else {
let mut map = HashMap::new();
map.insert(rune_id, vec![input_bal]);
results.insert(key.0, map);
}
}
results
}
#[cfg(test)]
pub async fn pg_test_client(run_migrations: bool, ctx: &Context) -> Client {
let (mut client, connection) =
tokio_postgres::connect("host=localhost user=postgres password=postgres", NoTls)
.await
.unwrap();
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("test connection error: {}", e);
}
});
if run_migrations {
pg_run_migrations(&mut client, ctx).await;
}
client
}
#[cfg(test)]
pub async fn pg_test_roll_back_migrations(pg_client: &mut Client, ctx: &Context) {
match pg_client
.batch_execute(
"
DO $$ DECLARE
r RECORD;
BEGIN
FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = current_schema()) LOOP
EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE';
END LOOP;
END $$;
DO $$ DECLARE
r RECORD;
BEGIN
FOR r IN (SELECT typname FROM pg_type WHERE typtype = 'e' AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = current_schema())) LOOP
EXECUTE 'DROP TYPE IF EXISTS ' || quote_ident(r.typname) || ' CASCADE';
END LOOP;
END $$;",
)
.await {
Ok(rows) => rows,
Err(e) => {
try_error!(
ctx,
"error rolling back test migrations: {}",
e.to_string()
);
process::exit(1);
}
};
}

View File

@@ -0,0 +1,27 @@
use chainhook_postgres::types::{PgBigIntU32, PgNumericU128, PgNumericU64};
#[derive(Debug, Clone)]
pub struct DbBalanceChange {
pub rune_id: String,
pub block_height: PgNumericU64,
pub address: String,
pub balance: PgNumericU128,
pub total_operations: PgBigIntU32,
}
impl DbBalanceChange {
pub fn from_operation(
rune_id: String,
block_height: PgNumericU64,
address: String,
balance: PgNumericU128,
) -> Self {
DbBalanceChange {
rune_id,
block_height,
address,
balance,
total_operations: PgBigIntU32(1),
}
}
}

View File

@@ -0,0 +1,71 @@
use chainhook_postgres::types::{PgBigIntU32, PgNumericU128, PgNumericU64};
use ordinals::RuneId;
use tokio_postgres::Row;
use super::db_ledger_operation::DbLedgerOperation;
/// A row in the `ledger` table.
#[derive(Debug, Clone)]
pub struct DbLedgerEntry {
pub rune_id: String,
pub block_hash: String,
pub block_height: PgNumericU64,
pub tx_index: PgBigIntU32,
pub event_index: PgBigIntU32,
pub tx_id: String,
pub output: Option<PgBigIntU32>,
pub address: Option<String>,
pub receiver_address: Option<String>,
pub amount: Option<PgNumericU128>,
pub operation: DbLedgerOperation,
pub timestamp: PgBigIntU32,
}
impl DbLedgerEntry {
pub fn from_values(
amount: Option<u128>,
rune_id: RuneId,
block_hash: &String,
block_height: u64,
tx_index: u32,
event_index: u32,
tx_id: &String,
output: Option<u32>,
address: Option<&String>,
receiver_address: Option<&String>,
operation: DbLedgerOperation,
timestamp: u32,
) -> Self {
DbLedgerEntry {
rune_id: rune_id.to_string(),
block_hash: block_hash[2..].to_string(),
block_height: PgNumericU64(block_height),
tx_index: PgBigIntU32(tx_index),
event_index: PgBigIntU32(event_index),
tx_id: tx_id[2..].to_string(),
output: output.map(|i| PgBigIntU32(i)),
address: address.cloned(),
receiver_address: receiver_address.cloned(),
amount: amount.map(|i| PgNumericU128(i)),
operation,
timestamp: PgBigIntU32(timestamp),
}
}
pub fn from_pg_row(row: &Row) -> Self {
DbLedgerEntry {
rune_id: row.get("rune_id"),
block_hash: row.get("block_hash"),
block_height: row.get("block_height"),
tx_index: row.get("tx_index"),
event_index: row.get("event_index"),
tx_id: row.get("tx_id"),
output: row.get("output"),
address: row.get("address"),
receiver_address: row.get("receiver_address"),
amount: row.get("amount"),
operation: row.get("operation"),
timestamp: row.get("timestamp"),
}
}
}

View File

@@ -0,0 +1,79 @@
use std::{error::Error, fmt};
use bytes::BytesMut;
use tokio_postgres::types::{to_sql_checked, FromSql, IsNull, ToSql, Type};
/// A value from the `ledger_operation` enum type.
#[derive(Debug, Clone, PartialEq)]
pub enum DbLedgerOperation {
Etching,
Mint,
Burn,
Send,
Receive,
}
impl fmt::Display for DbLedgerOperation {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str().to_uppercase())
}
}
impl DbLedgerOperation {
pub fn as_str(&self) -> &str {
match self {
Self::Etching => "etching",
Self::Mint => "mint",
Self::Burn => "burn",
Self::Send => "send",
Self::Receive => "receive",
}
}
}
impl std::str::FromStr for DbLedgerOperation {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"etching" => Ok(DbLedgerOperation::Etching),
"mint" => Ok(DbLedgerOperation::Mint),
"burn" => Ok(DbLedgerOperation::Burn),
"send" => Ok(DbLedgerOperation::Send),
"receive" => Ok(DbLedgerOperation::Receive),
_ => Err(()),
}
}
}
impl ToSql for DbLedgerOperation {
fn to_sql(
&self,
_ty: &Type,
out: &mut BytesMut,
) -> Result<IsNull, Box<dyn Error + Sync + Send>> {
out.extend_from_slice(self.as_str().as_bytes());
Ok(IsNull::No)
}
fn accepts(ty: &Type) -> bool {
ty.name() == "ledger_operation"
}
to_sql_checked!();
}
impl<'a> FromSql<'a> for DbLedgerOperation {
fn from_sql(
_ty: &Type,
raw: &'a [u8],
) -> Result<DbLedgerOperation, Box<dyn Error + Sync + Send>> {
let s = std::str::from_utf8(raw)?;
s.parse::<DbLedgerOperation>()
.map_err(|_| "failed to parse enum variant".into())
}
fn accepts(ty: &Type) -> bool {
ty.name() == "ledger_operation"
}
}

View File

@@ -0,0 +1,246 @@
use chainhook_postgres::types::{PgBigIntU32, PgNumericU128, PgNumericU64, PgSmallIntU8};
use ordinals::{Etching, Rune, RuneId, SpacedRune};
use tokio_postgres::Row;
use crate::db::cache::transaction_location::TransactionLocation;
/// A row in the `runes` table.
#[derive(Debug, Clone)]
pub struct DbRune {
pub id: String,
pub number: PgBigIntU32,
pub name: String,
pub spaced_name: String,
pub block_hash: String,
pub block_height: PgNumericU64,
pub tx_index: PgBigIntU32,
pub tx_id: String,
pub divisibility: PgSmallIntU8,
pub premine: PgNumericU128,
pub symbol: String,
pub terms_amount: Option<PgNumericU128>,
pub terms_cap: Option<PgNumericU128>,
pub terms_height_start: Option<PgNumericU64>,
pub terms_height_end: Option<PgNumericU64>,
pub terms_offset_start: Option<PgNumericU64>,
pub terms_offset_end: Option<PgNumericU64>,
pub turbo: bool,
pub cenotaph: bool,
pub timestamp: PgBigIntU32,
}
impl DbRune {
pub fn from_etching(etching: &Etching, number: u32, location: &TransactionLocation) -> Self {
let rune = etching
.rune
.unwrap_or(Rune::reserved(location.block_height, location.tx_index));
let spaced_name = if let Some(spacers) = etching.spacers {
let spaced_rune = SpacedRune::new(rune, spacers);
spaced_rune.to_string()
} else {
rune.to_string()
};
let name = rune.to_string();
let mut terms_amount = None;
let mut terms_cap = None;
let mut terms_height_start = None;
let mut terms_height_end = None;
let mut terms_offset_start = None;
let mut terms_offset_end = None;
if let Some(terms) = etching.terms {
terms_amount = terms.amount.map(|i| PgNumericU128(i));
terms_cap = terms.cap.map(|i| PgNumericU128(i));
terms_height_start = terms.height.0.map(|i| PgNumericU64(i));
terms_height_end = terms.height.1.map(|i| PgNumericU64(i));
terms_offset_start = terms.offset.0.map(|i| PgNumericU64(i));
terms_offset_end = terms.offset.1.map(|i| PgNumericU64(i));
}
DbRune {
id: format!("{}:{}", location.block_height, location.tx_index),
number: PgBigIntU32(number),
name,
spaced_name,
block_hash: location.block_hash[2..].to_string(),
block_height: PgNumericU64(location.block_height),
tx_index: PgBigIntU32(location.tx_index),
tx_id: location.tx_id[2..].to_string(),
divisibility: etching
.divisibility
.map(|i| PgSmallIntU8(i))
.unwrap_or(PgSmallIntU8(0)),
premine: etching
.premine
.map(|i| PgNumericU128(i))
.unwrap_or(PgNumericU128(0)),
symbol: etching
.symbol
.map(|i| i.to_string().replace('\0', ""))
.unwrap_or("¤".to_string()),
terms_amount,
terms_cap,
terms_height_start,
terms_height_end,
terms_offset_start,
terms_offset_end,
turbo: etching.turbo,
cenotaph: false,
timestamp: PgBigIntU32(location.timestamp),
}
}
pub fn from_cenotaph_etching(rune: &Rune, number: u32, location: &TransactionLocation) -> Self {
DbRune {
id: format!("{}:{}", location.block_height, location.tx_index),
name: rune.to_string(),
spaced_name: rune.to_string(),
number: PgBigIntU32(number),
block_hash: location.block_hash[2..].to_string(),
block_height: PgNumericU64(location.block_height),
tx_index: PgBigIntU32(location.tx_index),
tx_id: location.tx_id[2..].to_string(),
divisibility: PgSmallIntU8(0),
premine: PgNumericU128(0),
symbol: "".to_string(),
terms_amount: None,
terms_cap: None,
terms_height_start: None,
terms_height_end: None,
terms_offset_start: None,
terms_offset_end: None,
turbo: false,
cenotaph: true,
timestamp: PgBigIntU32(location.timestamp),
}
}
pub fn from_pg_row(row: &Row) -> Self {
DbRune {
id: row.get("id"),
number: row.get("number"),
name: row.get("name"),
spaced_name: row.get("spaced_name"),
block_hash: row.get("block_hash"),
block_height: row.get("block_height"),
tx_index: row.get("tx_index"),
tx_id: row.get("tx_id"),
divisibility: row.get("divisibility"),
premine: row.get("premine"),
symbol: row.get("symbol"),
terms_amount: row.get("terms_amount"),
terms_cap: row.get("terms_cap"),
terms_height_start: row.get("terms_height_start"),
terms_height_end: row.get("terms_height_end"),
terms_offset_start: row.get("terms_offset_start"),
terms_offset_end: row.get("terms_offset_end"),
turbo: row.get("turbo"),
cenotaph: row.get("cenotaph"),
timestamp: row.get("timestamp"),
}
}
pub fn rune_id(&self) -> RuneId {
RuneId {
block: self.block_height.0,
tx: self.tx_index.0,
}
}
}
#[cfg(test)]
impl DbRune {
pub fn factory() -> Self {
DbRune {
id: "840000:1".to_string(),
number: PgBigIntU32(1),
name: "ZZZZZFEHUZZZZZ".to_string(),
spaced_name: "Z•Z•Z•Z•Z•FEHU•Z•Z•Z•Z•Z".to_string(),
block_hash: "0000000000000000000320283a032748cef8227873ff4872689bf23f1cda83a5"
.to_string(),
block_height: PgNumericU64(840000),
tx_index: PgBigIntU32(1),
tx_id: "2bb85f4b004be6da54f766c17c1e855187327112c231ef2ff35ebad0ea67c69e".to_string(),
divisibility: PgSmallIntU8(2),
premine: PgNumericU128(11000000000),
symbol: "".to_string(),
terms_amount: Some(PgNumericU128(100)),
terms_cap: Some(PgNumericU128(1111111)),
terms_height_start: None,
terms_height_end: None,
terms_offset_start: None,
terms_offset_end: None,
turbo: true,
cenotaph: false,
timestamp: PgBigIntU32(1713571767),
}
}
pub fn terms_height_start(&mut self, val: Option<PgNumericU64>) -> &Self {
self.terms_height_start = val;
self
}
pub fn terms_height_end(&mut self, val: Option<PgNumericU64>) -> &Self {
self.terms_height_end = val;
self
}
pub fn terms_offset_start(&mut self, val: Option<PgNumericU64>) -> &Self {
self.terms_offset_start = val;
self
}
pub fn terms_offset_end(&mut self, val: Option<PgNumericU64>) -> &Self {
self.terms_offset_end = val;
self
}
pub fn terms_cap(&mut self, val: Option<PgNumericU128>) -> &Self {
self.terms_cap = val;
self
}
}
#[cfg(test)]
mod test {
use std::str::FromStr;
use ordinals::{Etching, SpacedRune, Terms};
use crate::db::cache::transaction_location::TransactionLocation;
use super::DbRune;
#[test]
fn test_from_etching() {
let rune = SpacedRune::from_str("UNCOMMON•GOODS").unwrap();
let db_rune = DbRune::from_etching(
&Etching {
divisibility: Some(0),
premine: Some(0),
rune: Some(rune.rune),
spacers: Some(rune.spacers),
symbol: Some('⧉'),
terms: Some(Terms {
amount: Some(1),
cap: Some(u128::max_value()),
height: (Some(840000), Some(1050000)),
offset: (None, None),
}),
turbo: false,
},
0,
&TransactionLocation {
network: bitcoin::Network::Bitcoin,
block_hash: "00000000000000000000d2845e9e48d356e89fd3b2e1f3da668ffc04c7dfe298"
.to_string(),
block_height: 1,
tx_index: 0,
tx_id: "14e87956a6bb0f50df1515e85f1dcc4625a7e2ebeb08ab6db7d9211c7cf64fa3"
.to_string(),
timestamp: 0,
},
);
assert!(db_rune.name == "UNCOMMONGOODS");
assert!(db_rune.spaced_name == "UNCOMMON•GOODS");
}
}

View File

@@ -0,0 +1,51 @@
use chainhook_postgres::types::{PgNumericU128, PgNumericU64};
/// An update to a rune that affects its total counts.
#[derive(Debug, Clone)]
pub struct DbSupplyChange {
pub rune_id: String,
pub block_height: PgNumericU64,
pub minted: PgNumericU128,
pub total_mints: PgNumericU128,
pub burned: PgNumericU128,
pub total_burns: PgNumericU128,
pub total_operations: PgNumericU128,
}
impl DbSupplyChange {
pub fn from_mint(id: String, block_height: PgNumericU64, amount: PgNumericU128) -> Self {
DbSupplyChange {
rune_id: id,
block_height,
minted: amount,
total_mints: PgNumericU128(1),
burned: PgNumericU128(0),
total_burns: PgNumericU128(0),
total_operations: PgNumericU128(1),
}
}
pub fn from_burn(id: String, block_height: PgNumericU64, amount: PgNumericU128) -> Self {
DbSupplyChange {
rune_id: id,
block_height,
minted: PgNumericU128(0),
total_mints: PgNumericU128(0),
burned: amount,
total_burns: PgNumericU128(1),
total_operations: PgNumericU128(1),
}
}
pub fn from_operation(id: String, block_height: PgNumericU64) -> Self {
DbSupplyChange {
rune_id: id,
block_height,
minted: PgNumericU128(0),
total_mints: PgNumericU128(0),
burned: PgNumericU128(0),
total_burns: PgNumericU128(0),
total_operations: PgNumericU128(1),
}
}
}

View File

@@ -0,0 +1,5 @@
pub mod db_balance_change;
pub mod db_ledger_entry;
pub mod db_ledger_operation;
pub mod db_rune;
pub mod db_supply_change;

View File

@@ -0,0 +1,48 @@
#[macro_use]
extern crate hiro_system_kit;
extern crate serde;
pub mod db;
pub mod scan;
pub mod service;
#[macro_export]
macro_rules! try_info {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| info!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| info!(l, $tag));
};
}
#[macro_export]
macro_rules! try_debug {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| debug!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| debug!(l, $tag));
};
}
#[macro_export]
macro_rules! try_warn {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| warn!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| warn!(l, $tag));
};
}
#[macro_export]
macro_rules! try_error {
($a:expr, $tag:expr, $($args:tt)*) => {
$a.try_log(|l| error!(l, $tag, $($args)*));
};
($a:expr, $tag:expr) => {
$a.try_log(|l| error!(l, $tag));
};
}

View File

@@ -0,0 +1,82 @@
use crate::db::cache::index_cache::IndexCache;
use crate::db::index::{index_block, roll_back_block};
use crate::try_info;
use chainhook_sdk::indexer::bitcoin::{
build_http_client, download_and_parse_block_with_retry, retrieve_block_hash_with_retry,
standardize_bitcoin_block,
};
use chainhook_sdk::utils::bitcoind::bitcoind_get_block_height;
use chainhook_sdk::utils::{BlockHeights, Context};
use chainhook_types::BitcoinNetwork;
use config::Config;
use tokio_postgres::Client;
pub async fn drop_blocks(start_block: u64, end_block: u64, pg_client: &mut Client, ctx: &Context) {
for block in start_block..=end_block {
roll_back_block(pg_client, block, ctx).await;
}
}
pub async fn scan_blocks(
blocks: Vec<u64>,
config: &Config,
pg_client: &mut Client,
index_cache: &mut IndexCache,
ctx: &Context,
) -> Result<(), String> {
let block_heights_to_scan_res = BlockHeights::Blocks(blocks).get_sorted_entries();
let mut block_heights_to_scan =
block_heights_to_scan_res.map_err(|_e| format!("Block start / end block spec invalid"))?;
try_info!(
ctx,
"Scanning {} Bitcoin blocks",
block_heights_to_scan.len()
);
let bitcoin_config = config.bitcoind.clone();
let mut number_of_blocks_scanned = 0;
let http_client = build_http_client();
while let Some(current_block_height) = block_heights_to_scan.pop_front() {
number_of_blocks_scanned += 1;
let block_hash = retrieve_block_hash_with_retry(
&http_client,
&current_block_height,
&bitcoin_config,
ctx,
)
.await?;
let raw_block =
download_and_parse_block_with_retry(&http_client, &block_hash, &bitcoin_config, ctx)
.await?;
let mut block = standardize_bitcoin_block(
raw_block,
&BitcoinNetwork::from_network(bitcoin_config.network),
ctx,
)
.unwrap();
index_block(pg_client, index_cache, &mut block, ctx).await;
if block_heights_to_scan.is_empty() {
let bitcoind_tip = bitcoind_get_block_height(&config.bitcoind, ctx);
let new_tip = match block_heights_to_scan.back() {
Some(end_block) => {
if *end_block > bitcoind_tip {
bitcoind_tip
} else {
*end_block
}
}
None => bitcoind_tip,
};
for entry in (current_block_height + 1)..new_tip {
block_heights_to_scan.push_back(entry);
}
}
}
try_info!(ctx, "{number_of_blocks_scanned} blocks scanned");
Ok(())
}

View File

@@ -0,0 +1 @@
pub mod bitcoin;

View File

@@ -0,0 +1,175 @@
use std::sync::mpsc::channel;
use crate::db::cache::index_cache::IndexCache;
use crate::db::index::{get_rune_genesis_block_height, index_block, roll_back_block};
use crate::db::{pg_connect, pg_get_block_height};
use crate::scan::bitcoin::scan_blocks;
use crate::{try_error, try_info};
use chainhook_sdk::observer::BitcoinBlockDataCached;
use chainhook_sdk::utils::bitcoind::bitcoind_get_block_height;
use chainhook_sdk::{
observer::{start_event_observer, ObserverEvent, ObserverSidecar},
utils::Context,
};
use chainhook_types::BlockIdentifier;
use config::Config;
use crossbeam_channel::select;
pub async fn get_index_chain_tip(config: &Config, ctx: &Context) -> u64 {
let mut pg_client = pg_connect(&config, true, ctx).await;
pg_get_block_height(&mut pg_client, ctx)
.await
.unwrap_or(get_rune_genesis_block_height(config.bitcoind.network) - 1)
}
pub async fn catch_up_to_bitcoin_chain_tip(config: &Config, ctx: &Context) -> Result<(), String> {
let mut pg_client = pg_connect(&config, true, ctx).await;
let mut index_cache = IndexCache::new(config, &mut pg_client, ctx).await;
loop {
let chain_tip = pg_get_block_height(&mut pg_client, ctx)
.await
.unwrap_or(get_rune_genesis_block_height(config.bitcoind.network) - 1);
let bitcoind_chain_tip = bitcoind_get_block_height(&config.bitcoind, ctx);
if bitcoind_chain_tip < chain_tip {
try_info!(
ctx,
"Waiting for bitcoind to reach height {}, currently at {}",
chain_tip,
bitcoind_chain_tip
);
std::thread::sleep(std::time::Duration::from_secs(10));
} else if bitcoind_chain_tip > chain_tip {
try_info!(
ctx,
"Block height is behind bitcoind, scanning block range {} to {}",
chain_tip + 1,
bitcoind_chain_tip
);
scan_blocks(
((chain_tip + 1)..=bitcoind_chain_tip).collect(),
config,
&mut pg_client,
&mut index_cache,
ctx,
)
.await?;
} else {
try_info!(ctx, "Caught up to bitcoind chain tip at {}", chain_tip);
break;
}
}
Ok(())
}
pub async fn start_service(config: &Config, ctx: &Context) -> Result<(), String> {
catch_up_to_bitcoin_chain_tip(config, ctx).await?;
// Start chainhook event observer, we're at chain tip.
let (observer_cmd_tx, observer_cmd_rx) = channel();
let (observer_event_tx, observer_event_rx) = crossbeam_channel::unbounded();
let observer_sidecar = set_up_observer_sidecar_runloop(config, ctx)
.await
.expect("unable to set up observer sidecar");
let event_observer_config = config.bitcoind.clone();
let context = ctx.clone();
let observer_cmd_tx_moved = observer_cmd_tx.clone();
let _ = std::thread::spawn(move || {
start_event_observer(
event_observer_config,
observer_cmd_tx_moved,
observer_cmd_rx,
Some(observer_event_tx),
Some(observer_sidecar),
context,
)
.expect("unable to start Stacks chain observer");
});
try_info!(ctx, "Listening for new blocks via Chainhook SDK");
loop {
let event = match observer_event_rx.recv() {
Ok(cmd) => cmd,
Err(e) => {
try_error!(ctx, "Error: broken channel {}", e.to_string());
break;
}
};
match event {
ObserverEvent::Terminate => {
try_info!(ctx, "Received termination event from Chainhook SDK");
break;
}
_ => {}
}
}
Ok(())
}
pub async fn set_up_observer_sidecar_runloop(
config: &Config,
ctx: &Context,
) -> Result<ObserverSidecar, String> {
// Sidecar will be receiving blocks to mutate
let (block_mutator_in_tx, block_mutator_in_rx) = crossbeam_channel::unbounded();
// Sidecar will be sending mutated blocks back to chainhook-sdk
let (block_mutator_out_tx, block_mutator_out_rx) = crossbeam_channel::unbounded();
// HandleBlock
let (chain_event_notifier_tx, chain_event_notifier_rx) = crossbeam_channel::unbounded();
let observer_sidecar = ObserverSidecar {
bitcoin_blocks_mutator: Some((block_mutator_in_tx, block_mutator_out_rx)),
bitcoin_chain_event_notifier: Some(chain_event_notifier_tx),
};
let ctx = ctx.clone();
let config = config.clone();
let _ = hiro_system_kit::thread_named("Observer Sidecar Runloop").spawn(move || {
hiro_system_kit::nestable_block_on(async {
let mut index_cache =
IndexCache::new(&config, &mut pg_connect(&config, false, &ctx).await, &ctx).await;
loop {
select! {
recv(block_mutator_in_rx) -> msg => {
if let Ok((mut blocks_to_mutate, blocks_ids_to_rollback)) = msg {
chainhook_sidecar_mutate_blocks(
&mut index_cache,
&mut blocks_to_mutate,
&blocks_ids_to_rollback,
&config,
&ctx,
).await;
let _ = block_mutator_out_tx.send(blocks_to_mutate);
}
}
recv(chain_event_notifier_rx) -> msg => {
if let Ok(_command) = msg {
// We don't need to do anything here because we already indexed the block during the mutation above.
}
}
}
}
});
});
Ok(observer_sidecar)
}
pub async fn chainhook_sidecar_mutate_blocks(
index_cache: &mut IndexCache,
blocks_to_mutate: &mut Vec<BitcoinBlockDataCached>,
block_ids_to_rollback: &Vec<BlockIdentifier>,
config: &Config,
ctx: &Context,
) {
try_info!(ctx, "Received mutate blocks message from Chainhook SDK");
let mut pg_client = pg_connect(&config, false, &ctx).await;
for block_id in block_ids_to_rollback.iter() {
roll_back_block(&mut pg_client, block_id.index, ctx).await;
}
for cache in blocks_to_mutate.iter_mut() {
if !cache.processed_by_sidecar {
index_block(&mut pg_client, index_cache, &mut cache.block, ctx).await;
cache.processed_by_sidecar = true;
}
}
}

View File

@@ -16,13 +16,13 @@ COPY ./components /src/components
COPY ./migrations /src/migrations
RUN cargo build --features release --release
RUN cp /src/target/release/ordhook /out
RUN cp /src/target/release/bitcoin-indexer /out
FROM debian:bullseye-slim
RUN apt-get update && apt-get install -y ca-certificates libssl-dev libclang-11-dev libunwind-dev libunwind8 sqlite3
COPY --from=build /out/ordhook /bin/ordhook
COPY --from=build /out/bitcoin-indexer /bin/bitcoin-indexer
WORKDIR /workspace
ENTRYPOINT ["ordhook"]
ENTRYPOINT ["bitcoin-indexer"]