mirror of
https://github.com/alexgo-io/bitcoin-indexer.git
synced 2026-01-12 16:52:57 +08:00
chore: complete renaming
This commit is contained in:
@@ -1,2 +1,2 @@
|
||||
[alias]
|
||||
hord-install = "install --path components/hord-cli --locked --force"
|
||||
ordhook-install = "install --path components/ordhook-cli --locked --force"
|
||||
|
||||
84
Cargo.lock
generated
84
Cargo.lock
generated
@@ -1608,48 +1608,6 @@ dependencies = [
|
||||
"hmac 0.8.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hord"
|
||||
version = "0.2.0"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"anyhow",
|
||||
"atty",
|
||||
"chainhook-sdk",
|
||||
"clap",
|
||||
"clap_generate",
|
||||
"crossbeam-channel 0.5.8",
|
||||
"ctrlc",
|
||||
"dashmap 5.4.0",
|
||||
"flate2",
|
||||
"flume",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"fxhash",
|
||||
"hex",
|
||||
"hiro-system-kit",
|
||||
"num_cpus",
|
||||
"pprof",
|
||||
"progressing",
|
||||
"rand 0.8.5",
|
||||
"redis",
|
||||
"reqwest",
|
||||
"rocket",
|
||||
"rocket_okapi",
|
||||
"rocksdb",
|
||||
"rusqlite",
|
||||
"schemars 0.8.12",
|
||||
"serde",
|
||||
"serde-redis",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"tar",
|
||||
"threadpool",
|
||||
"tokio",
|
||||
"toml",
|
||||
"uuid 1.3.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "0.2.8"
|
||||
@@ -2386,6 +2344,48 @@ dependencies = [
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ordhook"
|
||||
version = "0.2.0"
|
||||
dependencies = [
|
||||
"ansi_term",
|
||||
"anyhow",
|
||||
"atty",
|
||||
"chainhook-sdk",
|
||||
"clap",
|
||||
"clap_generate",
|
||||
"crossbeam-channel 0.5.8",
|
||||
"ctrlc",
|
||||
"dashmap 5.4.0",
|
||||
"flate2",
|
||||
"flume",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"fxhash",
|
||||
"hex",
|
||||
"hiro-system-kit",
|
||||
"num_cpus",
|
||||
"pprof",
|
||||
"progressing",
|
||||
"rand 0.8.5",
|
||||
"redis",
|
||||
"reqwest",
|
||||
"rocket",
|
||||
"rocket_okapi",
|
||||
"rocksdb",
|
||||
"rusqlite",
|
||||
"schemars 0.8.12",
|
||||
"serde",
|
||||
"serde-redis",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"tar",
|
||||
"threadpool",
|
||||
"tokio",
|
||||
"toml",
|
||||
"uuid 1.3.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "os_str_bytes"
|
||||
version = "6.4.1"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"components/hord-cli"
|
||||
"components/ordhook-cli"
|
||||
]
|
||||
default-members = ["components/hord-cli"]
|
||||
default-members = ["components/ordhook-cli"]
|
||||
|
||||
44
README.md
44
README.md
@@ -16,41 +16,41 @@
|
||||
|
||||
The [Ordinal Theory](https://trustmachines.co/glossary/ordinal-theory) is a protocol aiming at attributing unique identifiers to minted satoshis (sats). With its numbering scheme, satoshis can be **inscribed** with arbitrary content (aka **inscriptions**), creating bitcoin-native digital artifacts more commonly known as NFTs. Inscriptions do not require a sidechain or separate token, which makes it attractive for new entrants to adopt, extend, and use. These inscribed sats can be transferred using Bitcoin transactions, sent to Bitcoin addresses, and held in Bitcoin UTXOs. In all respects, these transactions, addresses, and UTXOs are normal Bitcoin transactions, addresses, and UTXOs, except that to send individual sats, transactions must control the order and value of inputs and outputs per Ordinal Theory.
|
||||
|
||||
Now that we discussed Ordinal Theory, let's dive into what **hord** attempts to solve for developers.
|
||||
Now that we discussed Ordinal Theory, let's dive into what **ordhook** attempts to solve for developers.
|
||||
|
||||
The **hord** is an indexer designed to help developers build new re-org-resistant applications on top of the Ordinal Theory. This indexer will make it easy for protocol developers and users of those protocols to trace and discover the ownership of Ordinal's inscriptions, along with a wealth of information about each inscription.
|
||||
The **ordhook** is an indexer designed to help developers build new re-org-resistant applications on top of the Ordinal Theory. This indexer will make it easy for protocol developers and users of those protocols to trace and discover the ownership of Ordinal's inscriptions, along with a wealth of information about each inscription.
|
||||
|
||||
The **hord** uses [Chainhook SDK](https://github.com/hirosystems/chainhook/tree/develop/components/chainhook-sdk) from the [Chainhook](https://github.com/hirosystems/chainhook/tree/develop) project, which is a re-org-aware transaction indexing engine for Stacks and Bitcoin. The SDK is designed with first-class event-driven principles, so it helps developers extract transactions from blocks efficiently and keeps a consistent view of the chain state.
|
||||
The **ordhook** uses [Chainhook SDK](https://github.com/hirosystems/chainhook/tree/develop/components/chainhook-sdk) from the [Chainhook](https://github.com/hirosystems/chainhook/tree/develop) project, which is a re-org-aware transaction indexing engine for Stacks and Bitcoin. The SDK is designed with first-class event-driven principles, so it helps developers extract transactions from blocks efficiently and keeps a consistent view of the chain state.
|
||||
|
||||
With **hord**, Bitcoin developers can reliably implement feature-rich protocols and business models utilizing _near-real-time_ Ordinals inscriptions and transfers events.
|
||||
With **ordhook**, Bitcoin developers can reliably implement feature-rich protocols and business models utilizing _near-real-time_ Ordinals inscriptions and transfers events.
|
||||
|
||||
# Quick Start
|
||||
|
||||
## Installing `hord` from source
|
||||
## Installing `ordhook` from source
|
||||
|
||||
```console
|
||||
$ git clone https://github.com/hirosystems/hord.git
|
||||
$ cd hord
|
||||
$ cargo hord-install
|
||||
$ git clone https://github.com/hirosystems/ordhook.git
|
||||
$ cd ordhook
|
||||
$ cargo ordhook-install
|
||||
```
|
||||
|
||||
## Getting started with `hord`
|
||||
## Getting started with `ordhook`
|
||||
|
||||
### Explore Ordinal activities in your terminal
|
||||
|
||||
Once `hord` is installed, Ordinals activities scanning can simply be performed using the following command:
|
||||
Once `ordhook` is installed, Ordinals activities scanning can simply be performed using the following command:
|
||||
```console
|
||||
$ hord scan blocks 767430 767753 --mainnet
|
||||
$ ordhook scan blocks 767430 767753 --mainnet
|
||||
Inscription 6fb976ab49dcec017f1e201e84395983204ae1a7c2abf7ced0a85d692e442799i0 revealed at block #767430 (ordinal_number 1252201400444387, inscription_number 0)
|
||||
Inscription 26482871f33f1051f450f2da9af275794c0b5f1c61ebf35e4467fb42c2813403i0 revealed at block #767753 (ordinal_number 727624168684699, inscription_number 1)
|
||||
```
|
||||
|
||||
In this command, an interval of blocks to scan (starting at block `767430`, ending at block `767753`) is being provided. `hord` will display inscriptions and transfers activities occurring in the range of the specified blocks.
|
||||
In this command, an interval of blocks to scan (starting at block `767430`, ending at block `767753`) is being provided. `ordhook` will display inscriptions and transfers activities occurring in the range of the specified blocks.
|
||||
|
||||
The activity for a given inscription can be retrieved using the following command:
|
||||
|
||||
```console
|
||||
$ hord scan inscription 6fb976ab49dcec017f1e201e84395983204ae1a7c2abf7ced0a85d692e442799i0 --mainnet
|
||||
$ ordhook scan inscription 6fb976ab49dcec017f1e201e84395983204ae1a7c2abf7ced0a85d692e442799i0 --mainnet
|
||||
Inscription 6fb976ab49dcec017f1e201e84395983204ae1a7c2abf7ced0a85d692e442799i0 revealed at block #767430 (ordinal_number 1252201400444387, inscription_number 0)
|
||||
Transfered in transaction bc4c30829a9564c0d58e6287195622b53ced54a25711d1b86be7cd3a70ef61ed at block 785396
|
||||
```
|
||||
@@ -58,7 +58,7 @@ Transfered in transaction bc4c30829a9564c0d58e6287195622b53ced54a25711d1b86be7cd
|
||||
---
|
||||
### Stream Ordinal activities to an indexer
|
||||
|
||||
`hord` is designed to help developers extracting ordinals activities (inscriptions and transfers) from the Bitcoin chain and streaming these activities to their indexer / web application.
|
||||
`ordhook` is designed to help developers extracting ordinals activities (inscriptions and transfers) from the Bitcoin chain and streaming these activities to their indexer / web application.
|
||||
|
||||
In order to get started, a `bitcoind` instance with access to the RPC methods `getblockhash` and `getblock` must be running. The RPC calls latency will directly impact the speed of the scans.
|
||||
|
||||
@@ -73,25 +73,25 @@ Assuming:
|
||||
A configuration file `Hord.toml` can be generated using the command:
|
||||
|
||||
```console
|
||||
$ hord config new --mainnet
|
||||
$ ordhook config new --mainnet
|
||||
✔ Generated config file Hord.toml
|
||||
```
|
||||
|
||||
After adjusting the `Hord.toml` settings to make them match the `bitcoind` configuration, the following command can be ran:
|
||||
|
||||
```
|
||||
$ hord scan blocks 767430 767753 --post-to=http://localhost:3000/api/events --config-path=./Hord.toml
|
||||
$ ordhook scan blocks 767430 767753 --post-to=http://localhost:3000/api/events --config-path=./Hord.toml
|
||||
```
|
||||
|
||||
`hord` will retrieve the full Ordinals activities (including the inscriptions content) and send all these informations to the `http://localhost:3000/api/events` HTTP POST endpoint.
|
||||
`ordhook` will retrieve the full Ordinals activities (including the inscriptions content) and send all these informations to the `http://localhost:3000/api/events` HTTP POST endpoint.
|
||||
|
||||
---
|
||||
### Run `hord` as a service for streaming blocks
|
||||
### Run `ordhook` as a service for streaming blocks
|
||||
|
||||
`hord` can be ran as a service for streaming and processing new blocks appended to the Bitcoin blockchain.
|
||||
`ordhook` can be ran as a service for streaming and processing new blocks appended to the Bitcoin blockchain.
|
||||
|
||||
```console
|
||||
$ hord service start --post-to=http://localhost:3000/api/events --config-path=./Hord.toml
|
||||
$ ordhook service start --post-to=http://localhost:3000/api/events --config-path=./Hord.toml
|
||||
```
|
||||
|
||||
New `http-post` endpoints can also be added dynamically by spinning up a redis server and adding the following section in the `Hord.toml` configuration file:
|
||||
@@ -102,10 +102,10 @@ http_port = 20456
|
||||
database_uri = "redis://localhost:6379/"
|
||||
```
|
||||
|
||||
Running `hord` with the command
|
||||
Running `ordhook` with the command
|
||||
|
||||
```console
|
||||
$ hord service start --config-path=./Hord.toml
|
||||
$ ordhook service start --config-path=./Hord.toml
|
||||
```
|
||||
|
||||
will spin up a HTTP API for managing events destinations.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[package]
|
||||
name = "hord"
|
||||
name = "ordhook"
|
||||
version = "0.2.0"
|
||||
edition = "2021"
|
||||
|
||||
@@ -9,12 +9,12 @@ use crate::scan::bitcoin::scan_bitcoin_chainstate_via_rpc_using_predicate;
|
||||
use crate::service::Service;
|
||||
|
||||
use crate::db::{
|
||||
delete_data_in_hord_db, find_all_inscription_transfers, find_all_inscriptions_in_block,
|
||||
delete_data_in_ordhook_db, find_all_inscription_transfers, find_all_inscriptions_in_block,
|
||||
find_all_transfers_in_block, find_inscription_with_id, find_last_block_inserted,
|
||||
find_latest_inscription_block_height, find_lazy_block_at_block_height,
|
||||
get_default_hord_db_file_path, initialize_hord_db, open_readonly_hord_db_conn,
|
||||
open_readonly_hord_db_conn_rocks_db, open_readwrite_hord_db_conn,
|
||||
open_readwrite_hord_db_conn_rocks_db,
|
||||
get_default_ordhook_db_file_path, initialize_ordhook_db, open_readonly_ordhook_db_conn,
|
||||
open_readonly_ordhook_db_conn_rocks_db, open_readwrite_ordhook_db_conn,
|
||||
open_readwrite_ordhook_db_conn_rocks_db,
|
||||
};
|
||||
use chainhook_sdk::bitcoincore_rpc::{Auth, Client, RpcApi};
|
||||
use chainhook_sdk::chainhooks::types::HttpHook;
|
||||
@@ -255,17 +255,17 @@ struct StartCommand {
|
||||
/// Specify relative path of the chainhooks (yaml format) to evaluate
|
||||
#[clap(long = "post-to")]
|
||||
pub post_to: Vec<String>,
|
||||
/// Block height where hord will start posting Ordinals activities
|
||||
/// Block height where ordhook will start posting Ordinals activities
|
||||
#[clap(long = "start-at-block")]
|
||||
pub start_at_block: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, PartialEq, Clone, Debug)]
|
||||
enum HordDbCommand {
|
||||
/// Initialize a new hord db
|
||||
/// Initialize a new ordhook db
|
||||
#[clap(name = "new", bin_name = "new")]
|
||||
New(SyncHordDbCommand),
|
||||
/// Catch-up hord db
|
||||
/// Catch-up ordhook db
|
||||
#[clap(name = "sync", bin_name = "sync")]
|
||||
Sync(SyncHordDbCommand),
|
||||
/// Rebuild inscriptions entries for a given block
|
||||
@@ -483,7 +483,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
|
||||
let mut total_inscriptions = 0;
|
||||
let mut total_transfers = 0;
|
||||
|
||||
let inscriptions_db_conn = initialize_hord_db(&config.expected_cache_path(), &ctx);
|
||||
let inscriptions_db_conn = initialize_ordhook_db(&config.expected_cache_path(), &ctx);
|
||||
while let Some(block_height) = block_range.pop_front() {
|
||||
let inscriptions =
|
||||
find_all_inscriptions_in_block(&block_height, &inscriptions_db_conn, &ctx);
|
||||
@@ -527,7 +527,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
|
||||
total_transfers += total_transfers_in_block;
|
||||
}
|
||||
if total_transfers == 0 && total_inscriptions == 0 {
|
||||
let db_file_path = get_default_hord_db_file_path(&config.expected_cache_path());
|
||||
let db_file_path = get_default_ordhook_db_file_path(&config.expected_cache_path());
|
||||
warn!(ctx.expect_logger(), "No data available. Check the validity of the range being scanned and the validity of your local database {}", db_file_path.display());
|
||||
}
|
||||
}
|
||||
@@ -539,7 +539,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
|
||||
let _ = download_ordinals_dataset_if_required(&config, ctx).await;
|
||||
|
||||
let inscriptions_db_conn =
|
||||
open_readonly_hord_db_conn(&config.expected_cache_path(), &ctx)?;
|
||||
open_readonly_ordhook_db_conn(&config.expected_cache_path(), &ctx)?;
|
||||
let (inscription, block_height) =
|
||||
match find_inscription_with_id(&cmd.inscription_id, &inscriptions_db_conn, &ctx)? {
|
||||
Some(entry) => entry,
|
||||
@@ -575,15 +575,15 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
|
||||
let config =
|
||||
Config::default(cmd.regtest, cmd.testnet, cmd.mainnet, &cmd.config_path)?;
|
||||
|
||||
let _ = initialize_hord_db(&config.expected_cache_path(), &ctx);
|
||||
let _ = initialize_ordhook_db(&config.expected_cache_path(), &ctx);
|
||||
|
||||
let inscriptions_db_conn =
|
||||
open_readonly_hord_db_conn(&config.expected_cache_path(), &ctx)?;
|
||||
open_readonly_ordhook_db_conn(&config.expected_cache_path(), &ctx)?;
|
||||
|
||||
let last_known_block =
|
||||
find_latest_inscription_block_height(&inscriptions_db_conn, &ctx)?;
|
||||
|
||||
let hord_config = config.get_hord_config();
|
||||
let ordhook_config = config.get_ordhook_config();
|
||||
|
||||
info!(ctx.expect_logger(), "Starting service...",);
|
||||
|
||||
@@ -595,9 +595,9 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
|
||||
warn!(
|
||||
ctx.expect_logger(),
|
||||
"Inscription ingestion will start at block #{}",
|
||||
hord_config.first_inscription_height
|
||||
ordhook_config.first_inscription_height
|
||||
);
|
||||
hord_config.first_inscription_height
|
||||
ordhook_config.first_inscription_height
|
||||
}
|
||||
},
|
||||
};
|
||||
@@ -636,19 +636,19 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
|
||||
},
|
||||
Command::Db(HordDbCommand::New(cmd)) => {
|
||||
let config = Config::default(false, false, false, &cmd.config_path)?;
|
||||
initialize_hord_db(&config.expected_cache_path(), &ctx);
|
||||
initialize_ordhook_db(&config.expected_cache_path(), &ctx);
|
||||
}
|
||||
Command::Db(HordDbCommand::Sync(cmd)) => {
|
||||
let config = Config::default(false, false, false, &cmd.config_path)?;
|
||||
initialize_hord_db(&config.expected_cache_path(), &ctx);
|
||||
initialize_ordhook_db(&config.expected_cache_path(), &ctx);
|
||||
let service = Service::new(config, ctx.clone());
|
||||
service.update_state(None).await?;
|
||||
}
|
||||
Command::Db(HordDbCommand::Repair(subcmd)) => match subcmd {
|
||||
RepairCommand::Blocks(cmd) => {
|
||||
let config = Config::default(false, false, false, &cmd.config_path)?;
|
||||
let mut hord_config = config.get_hord_config();
|
||||
hord_config.network_thread_max = cmd.network_threads;
|
||||
let mut ordhook_config = config.get_ordhook_config();
|
||||
ordhook_config.network_thread_max = cmd.network_threads;
|
||||
|
||||
let block_ingestion_processor =
|
||||
start_block_archiving_processor(&config, ctx, false, None);
|
||||
@@ -657,7 +657,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
|
||||
&config,
|
||||
cmd.start_block,
|
||||
cmd.end_block,
|
||||
hord_config.first_inscription_height,
|
||||
ordhook_config.first_inscription_height,
|
||||
Some(&block_ingestion_processor),
|
||||
10_000,
|
||||
&ctx,
|
||||
@@ -666,8 +666,8 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
|
||||
}
|
||||
RepairCommand::Inscriptions(cmd) => {
|
||||
let config = Config::default(false, false, false, &cmd.config_path)?;
|
||||
let mut hord_config = config.get_hord_config();
|
||||
hord_config.network_thread_max = cmd.network_threads;
|
||||
let mut ordhook_config = config.get_ordhook_config();
|
||||
ordhook_config.network_thread_max = cmd.network_threads;
|
||||
|
||||
let inscription_indexing_processor =
|
||||
start_inscription_indexing_processor(&config, ctx, None);
|
||||
@@ -676,7 +676,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
|
||||
&config,
|
||||
cmd.start_block,
|
||||
cmd.end_block,
|
||||
hord_config.first_inscription_height,
|
||||
ordhook_config.first_inscription_height,
|
||||
Some(&inscription_indexing_processor),
|
||||
10_000,
|
||||
&ctx,
|
||||
@@ -695,7 +695,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
|
||||
let config = Config::default(false, false, false, &cmd.config_path)?;
|
||||
{
|
||||
let blocks_db =
|
||||
open_readonly_hord_db_conn_rocks_db(&config.expected_cache_path(), &ctx)?;
|
||||
open_readonly_ordhook_db_conn_rocks_db(&config.expected_cache_path(), &ctx)?;
|
||||
let tip = find_last_block_inserted(&blocks_db) as u64;
|
||||
println!("Tip: {}", tip);
|
||||
|
||||
@@ -714,11 +714,11 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
|
||||
Command::Db(HordDbCommand::Drop(cmd)) => {
|
||||
let config = Config::default(false, false, false, &cmd.config_path)?;
|
||||
let blocks_db =
|
||||
open_readwrite_hord_db_conn_rocks_db(&config.expected_cache_path(), &ctx)?;
|
||||
open_readwrite_ordhook_db_conn_rocks_db(&config.expected_cache_path(), &ctx)?;
|
||||
let inscriptions_db_conn_rw =
|
||||
open_readwrite_hord_db_conn(&config.expected_cache_path(), &ctx)?;
|
||||
open_readwrite_ordhook_db_conn(&config.expected_cache_path(), &ctx)?;
|
||||
|
||||
delete_data_in_hord_db(
|
||||
delete_data_in_ordhook_db(
|
||||
cmd.start_block,
|
||||
cmd.end_block,
|
||||
&blocks_db,
|
||||
@@ -727,7 +727,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
|
||||
)?;
|
||||
info!(
|
||||
ctx.expect_logger(),
|
||||
"Cleaning hord_db: {} blocks dropped",
|
||||
"Cleaning ordhook_db: {} blocks dropped",
|
||||
cmd.end_block - cmd.start_block + 1
|
||||
);
|
||||
}
|
||||
@@ -4,7 +4,7 @@ pub fn generate_config(network: &BitcoinNetwork) -> String {
|
||||
let network = format!("{:?}", network);
|
||||
let conf = format!(
|
||||
r#"[storage]
|
||||
working_dir = "cache"
|
||||
working_dir = "ordhook"
|
||||
|
||||
# The Http Api allows you to register / deregister
|
||||
# dynamically predicates.
|
||||
@@ -110,7 +110,7 @@ impl Config {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_hord_config(&self) -> HordConfig {
|
||||
pub fn get_ordhook_config(&self) -> HordConfig {
|
||||
HordConfig {
|
||||
network_thread_max: self.limits.bitcoin_concurrent_http_requests_max,
|
||||
ingestion_thread_max: self.limits.max_number_of_processing_threads,
|
||||
@@ -162,7 +162,7 @@ impl Config {
|
||||
|
||||
let config = Config {
|
||||
storage: StorageConfig {
|
||||
working_dir: config_file.storage.working_dir.unwrap_or("cache".into()),
|
||||
working_dir: config_file.storage.working_dir.unwrap_or("ordhook".into()),
|
||||
},
|
||||
http_api: match config_file.http_api {
|
||||
None => PredicatesApi::Off,
|
||||
@@ -399,6 +399,6 @@ impl Config {
|
||||
|
||||
pub fn default_cache_path() -> String {
|
||||
let mut cache_path = std::env::current_dir().expect("unable to get current dir");
|
||||
cache_path.push("cache");
|
||||
cache_path.push("ordhook");
|
||||
format!("{}", cache_path.display())
|
||||
}
|
||||
@@ -18,8 +18,8 @@ use chainhook_sdk::{
|
||||
use crate::config::{Config, LogConfig};
|
||||
|
||||
use crate::db::{
|
||||
find_last_block_inserted, find_latest_inscription_block_height, initialize_hord_db,
|
||||
open_readonly_hord_db_conn, open_readonly_hord_db_conn_rocks_db,
|
||||
find_last_block_inserted, find_latest_inscription_block_height, initialize_ordhook_db,
|
||||
open_readonly_ordhook_db_conn, open_readonly_ordhook_db_conn_rocks_db,
|
||||
};
|
||||
|
||||
use crate::db::{
|
||||
@@ -38,7 +38,7 @@ pub struct HordConfig {
|
||||
pub logs: LogConfig,
|
||||
}
|
||||
|
||||
pub fn revert_hord_db_with_augmented_bitcoin_block(
|
||||
pub fn revert_ordhook_db_with_augmented_bitcoin_block(
|
||||
block: &BitcoinBlockData,
|
||||
blocks_db_rw: &DB,
|
||||
inscriptions_db_conn_rw: &Connection,
|
||||
@@ -133,7 +133,7 @@ pub fn compute_next_satpoint_data(
|
||||
SatPosition::Output((output_index, (offset_cross_inputs - offset_intra_outputs)))
|
||||
}
|
||||
|
||||
pub fn should_sync_hord_db(
|
||||
pub fn should_sync_ordhook_db(
|
||||
config: &Config,
|
||||
ctx: &Context,
|
||||
) -> Result<Option<(u64, u64, usize)>, String> {
|
||||
@@ -150,7 +150,7 @@ pub fn should_sync_hord_db(
|
||||
};
|
||||
|
||||
let mut start_block =
|
||||
match open_readonly_hord_db_conn_rocks_db(&config.expected_cache_path(), &ctx) {
|
||||
match open_readonly_ordhook_db_conn_rocks_db(&config.expected_cache_path(), &ctx) {
|
||||
Ok(blocks_db) => find_last_block_inserted(&blocks_db) as u64,
|
||||
Err(err) => {
|
||||
ctx.try_log(|logger| {
|
||||
@@ -161,17 +161,17 @@ pub fn should_sync_hord_db(
|
||||
};
|
||||
|
||||
if start_block == 0 {
|
||||
let _ = initialize_hord_db(&config.expected_cache_path(), &ctx);
|
||||
let _ = initialize_ordhook_db(&config.expected_cache_path(), &ctx);
|
||||
}
|
||||
|
||||
let inscriptions_db_conn = open_readonly_hord_db_conn(&config.expected_cache_path(), &ctx)?;
|
||||
let inscriptions_db_conn = open_readonly_ordhook_db_conn(&config.expected_cache_path(), &ctx)?;
|
||||
|
||||
match find_latest_inscription_block_height(&inscriptions_db_conn, ctx)? {
|
||||
Some(height) => {
|
||||
start_block = start_block.min(height);
|
||||
}
|
||||
None => {
|
||||
start_block = start_block.min(config.get_hord_config().first_inscription_height);
|
||||
start_block = start_block.min(config.get_ordhook_config().first_inscription_height);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -59,7 +59,7 @@ pub async fn download_and_pipeline_blocks(
|
||||
bitcoin_block_signaling: config.network.bitcoin_block_signaling.clone(),
|
||||
};
|
||||
|
||||
let hord_config = config.get_hord_config();
|
||||
let ordhook_config = config.get_ordhook_config();
|
||||
|
||||
let number_of_blocks_to_process = end_block - start_block + 1;
|
||||
|
||||
@@ -74,7 +74,7 @@ pub async fn download_and_pipeline_blocks(
|
||||
|
||||
let mut block_heights = VecDeque::from((start_block..=end_block).collect::<Vec<u64>>());
|
||||
|
||||
for _ in 0..hord_config.ingestion_thread_queue_size {
|
||||
for _ in 0..ordhook_config.ingestion_thread_queue_size {
|
||||
if let Some(block_height) = block_heights.pop_front() {
|
||||
let config = moved_config.clone();
|
||||
let ctx = moved_ctx.clone();
|
||||
@@ -96,8 +96,8 @@ pub async fn download_and_pipeline_blocks(
|
||||
let mut rx_thread_pool = vec![];
|
||||
let mut thread_pool_handles = vec![];
|
||||
|
||||
for _ in 0..hord_config.ingestion_thread_max {
|
||||
let (tx, rx) = bounded::<Option<Vec<u8>>>(hord_config.ingestion_thread_queue_size);
|
||||
for _ in 0..ordhook_config.ingestion_thread_max {
|
||||
let (tx, rx) = bounded::<Option<Vec<u8>>>(ordhook_config.ingestion_thread_queue_size);
|
||||
tx_thread_pool.push(tx);
|
||||
rx_thread_pool.push(rx);
|
||||
}
|
||||
@@ -257,7 +257,7 @@ pub async fn download_and_pipeline_blocks(
|
||||
ctx,
|
||||
));
|
||||
}
|
||||
thread_index = (thread_index + 1) % hord_config.ingestion_thread_max;
|
||||
thread_index = (thread_index + 1) % ordhook_config.ingestion_thread_max;
|
||||
}
|
||||
|
||||
ctx.try_log(|logger| {
|
||||
@@ -313,7 +313,7 @@ pub async fn download_and_pipeline_blocks(
|
||||
// ctx.try_log(|logger| {
|
||||
// slog::info!(logger, "Generating report");
|
||||
// });
|
||||
// let file = std::fs::File::create("hord-perf.svg").unwrap();
|
||||
// let file = std::fs::File::create("ordhook-perf.svg").unwrap();
|
||||
// report.flamegraph(file).unwrap();
|
||||
// }
|
||||
// Err(e) => {
|
||||
@@ -11,7 +11,7 @@ use rocksdb::DB;
|
||||
use crate::{
|
||||
config::Config,
|
||||
core::pipeline::{PostProcessorCommand, PostProcessorController, PostProcessorEvent},
|
||||
db::{insert_entry_in_blocks, open_readwrite_hord_db_conn_rocks_db, LazyBlock},
|
||||
db::{insert_entry_in_blocks, open_readwrite_ordhook_db_conn_rocks_db, LazyBlock},
|
||||
};
|
||||
|
||||
pub fn start_block_archiving_processor(
|
||||
@@ -28,7 +28,7 @@ pub fn start_block_archiving_processor(
|
||||
let handle: JoinHandle<()> = hiro_system_kit::thread_named("Processor Runloop")
|
||||
.spawn(move || {
|
||||
let blocks_db_rw =
|
||||
open_readwrite_hord_db_conn_rocks_db(&config.expected_cache_path(), &ctx).unwrap();
|
||||
open_readwrite_ordhook_db_conn_rocks_db(&config.expected_cache_path(), &ctx).unwrap();
|
||||
let mut empty_cycles = 0;
|
||||
|
||||
if let Ok(PostProcessorCommand::Start) = commands_rx.recv() {
|
||||
@@ -30,7 +30,7 @@ use crate::{
|
||||
},
|
||||
HordConfig,
|
||||
},
|
||||
db::{get_any_entry_in_ordinal_activities, open_readonly_hord_db_conn},
|
||||
db::{get_any_entry_in_ordinal_activities, open_readonly_ordhook_db_conn},
|
||||
};
|
||||
|
||||
use crate::db::{LazyBlockTransaction, TraversalResult};
|
||||
@@ -41,7 +41,7 @@ use crate::{
|
||||
new_traversals_lazy_cache,
|
||||
pipeline::{PostProcessorCommand, PostProcessorController, PostProcessorEvent},
|
||||
},
|
||||
db::{open_readwrite_hord_db_conn, open_readwrite_hord_db_conn_rocks_db},
|
||||
db::{open_readwrite_ordhook_db_conn, open_readwrite_ordhook_db_conn_rocks_db},
|
||||
};
|
||||
|
||||
pub fn start_inscription_indexing_processor(
|
||||
@@ -61,14 +61,14 @@ pub fn start_inscription_indexing_processor(
|
||||
let mut garbage_collect_nth_block = 0;
|
||||
|
||||
let mut inscriptions_db_conn_rw =
|
||||
open_readwrite_hord_db_conn(&config.expected_cache_path(), &ctx).unwrap();
|
||||
let hord_config = config.get_hord_config();
|
||||
open_readwrite_ordhook_db_conn(&config.expected_cache_path(), &ctx).unwrap();
|
||||
let ordhook_config = config.get_ordhook_config();
|
||||
let blocks_db_rw =
|
||||
open_readwrite_hord_db_conn_rocks_db(&config.expected_cache_path(), &ctx).unwrap();
|
||||
open_readwrite_ordhook_db_conn_rocks_db(&config.expected_cache_path(), &ctx).unwrap();
|
||||
let mut empty_cycles = 0;
|
||||
|
||||
let inscriptions_db_conn =
|
||||
open_readonly_hord_db_conn(&config.expected_cache_path(), &ctx).unwrap();
|
||||
open_readonly_ordhook_db_conn(&config.expected_cache_path(), &ctx).unwrap();
|
||||
let mut sequence_cursor = SequenceCursor::new(inscriptions_db_conn);
|
||||
|
||||
if let Ok(PostProcessorCommand::Start) = commands_rx.recv() {
|
||||
@@ -125,7 +125,7 @@ pub fn start_inscription_indexing_processor(
|
||||
&mut sequence_cursor,
|
||||
&cache_l2,
|
||||
&mut inscriptions_db_conn_rw,
|
||||
&hord_config,
|
||||
&ordhook_config,
|
||||
&post_processor,
|
||||
&ctx,
|
||||
);
|
||||
@@ -158,7 +158,7 @@ pub fn process_blocks(
|
||||
sequence_cursor: &mut SequenceCursor,
|
||||
cache_l2: &Arc<DashMap<(u32, [u8; 8]), LazyBlockTransaction, BuildHasherDefault<FxHasher>>>,
|
||||
inscriptions_db_conn_rw: &mut Connection,
|
||||
hord_config: &HordConfig,
|
||||
ordhook_config: &HordConfig,
|
||||
post_processor: &Option<Sender<BitcoinBlockData>>,
|
||||
ctx: &Context,
|
||||
) -> Vec<BitcoinBlockData> {
|
||||
@@ -187,7 +187,7 @@ pub fn process_blocks(
|
||||
&mut cache_l1,
|
||||
cache_l2,
|
||||
&inscriptions_db_tx,
|
||||
hord_config,
|
||||
ordhook_config,
|
||||
ctx,
|
||||
);
|
||||
|
||||
@@ -253,7 +253,7 @@ pub fn process_block(
|
||||
cache_l1: &mut BTreeMap<(TransactionIdentifier, usize), TraversalResult>,
|
||||
cache_l2: &Arc<DashMap<(u32, [u8; 8]), LazyBlockTransaction, BuildHasherDefault<FxHasher>>>,
|
||||
inscriptions_db_tx: &Transaction,
|
||||
hord_config: &HordConfig,
|
||||
ordhook_config: &HordConfig,
|
||||
ctx: &Context,
|
||||
) -> Result<(), String> {
|
||||
let any_processable_transactions = parallelize_inscription_data_computations(
|
||||
@@ -262,7 +262,7 @@ pub fn process_block(
|
||||
cache_l1,
|
||||
cache_l2,
|
||||
inscriptions_db_tx,
|
||||
&hord_config,
|
||||
&ordhook_config,
|
||||
ctx,
|
||||
)?;
|
||||
|
||||
@@ -270,7 +270,7 @@ pub fn process_block(
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let inner_ctx = if hord_config.logs.ordinals_internals {
|
||||
let inner_ctx = if ordhook_config.logs.ordinals_internals {
|
||||
ctx.clone()
|
||||
} else {
|
||||
Context::empty()
|
||||
@@ -16,7 +16,7 @@ use crate::{
|
||||
},
|
||||
},
|
||||
db::{
|
||||
insert_new_inscriptions_from_block_in_locations, open_readwrite_hord_db_conn,
|
||||
insert_new_inscriptions_from_block_in_locations, open_readwrite_ordhook_db_conn,
|
||||
remove_entries_from_locations_at_block_height,
|
||||
},
|
||||
};
|
||||
@@ -34,7 +34,7 @@ pub fn start_transfers_recomputing_processor(
|
||||
let handle: JoinHandle<()> = hiro_system_kit::thread_named("Inscription indexing runloop")
|
||||
.spawn(move || {
|
||||
let mut inscriptions_db_conn_rw =
|
||||
open_readwrite_hord_db_conn(&config.expected_cache_path(), &ctx).unwrap();
|
||||
open_readwrite_ordhook_db_conn(&config.expected_cache_path(), &ctx).unwrap();
|
||||
let mut empty_cycles = 0;
|
||||
|
||||
if let Ok(PostProcessorCommand::Start) = commands_rx.recv() {
|
||||
@@ -68,13 +68,13 @@ pub fn parallelize_inscription_data_computations(
|
||||
cache_l1: &mut BTreeMap<(TransactionIdentifier, usize), TraversalResult>,
|
||||
cache_l2: &Arc<DashMap<(u32, [u8; 8]), LazyBlockTransaction, BuildHasherDefault<FxHasher>>>,
|
||||
inscriptions_db_tx: &Transaction,
|
||||
hord_config: &HordConfig,
|
||||
ordhook_config: &HordConfig,
|
||||
ctx: &Context,
|
||||
) -> Result<bool, String> {
|
||||
let (mut transactions_ids, l1_cache_hits) =
|
||||
get_transactions_to_process(block, cache_l1, inscriptions_db_tx, ctx);
|
||||
|
||||
let inner_ctx = if hord_config.logs.ordinals_internals {
|
||||
let inner_ctx = if ordhook_config.logs.ordinals_internals {
|
||||
ctx.clone()
|
||||
} else {
|
||||
Context::empty()
|
||||
@@ -82,7 +82,7 @@ pub fn parallelize_inscription_data_computations(
|
||||
|
||||
let has_transactions_to_process = !transactions_ids.is_empty() || !l1_cache_hits.is_empty();
|
||||
|
||||
let thread_max = hord_config.ingestion_thread_max;
|
||||
let thread_max = ordhook_config.ingestion_thread_max;
|
||||
|
||||
// Nothing to do? early return
|
||||
if !has_transactions_to_process {
|
||||
@@ -101,7 +101,7 @@ pub fn parallelize_inscription_data_computations(
|
||||
|
||||
let moved_traversal_tx = traversal_tx.clone();
|
||||
let moved_ctx = inner_ctx.clone();
|
||||
let moved_hord_db_path = hord_config.db_path.clone();
|
||||
let moved_ordhook_db_path = ordhook_config.db_path.clone();
|
||||
let local_cache = cache_l2.clone();
|
||||
|
||||
let handle = hiro_system_kit::thread_named("Worker")
|
||||
@@ -110,7 +110,7 @@ pub fn parallelize_inscription_data_computations(
|
||||
rx.recv()
|
||||
{
|
||||
let traversal: Result<TraversalResult, String> = compute_satoshi_number(
|
||||
&moved_hord_db_path,
|
||||
&moved_ordhook_db_path,
|
||||
&block_identifier,
|
||||
&transaction_id,
|
||||
input_index,
|
||||
@@ -7,7 +7,7 @@ use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::db::{
|
||||
find_lazy_block_at_block_height, open_readonly_hord_db_conn_rocks_db_loop, TransferData,
|
||||
find_lazy_block_at_block_height, open_readonly_ordhook_db_conn_rocks_db_loop, TransferData,
|
||||
};
|
||||
|
||||
use crate::db::{LazyBlockTransaction, TraversalResult};
|
||||
@@ -30,7 +30,7 @@ pub fn compute_satoshi_number(
|
||||
let mut ordinal_block_number = block_identifier.index as u32;
|
||||
let txid = transaction_identifier.get_8_hash_bytes();
|
||||
|
||||
let mut blocks_db = open_readonly_hord_db_conn_rocks_db_loop(&blocks_db_dir, &ctx);
|
||||
let mut blocks_db = open_readonly_ordhook_db_conn_rocks_db_loop(&blocks_db_dir, &ctx);
|
||||
|
||||
let (sats_ranges, inscription_offset_cross_outputs) = match traversals_cache
|
||||
.get(&(block_identifier.index as u32, txid.clone()))
|
||||
@@ -56,7 +56,7 @@ pub fn compute_satoshi_number(
|
||||
if attempt < 3 {
|
||||
attempt += 1;
|
||||
blocks_db =
|
||||
open_readonly_hord_db_conn_rocks_db_loop(&blocks_db_dir, &ctx);
|
||||
open_readonly_ordhook_db_conn_rocks_db_loop(&blocks_db_dir, &ctx);
|
||||
} else {
|
||||
return Err(format!("block #{ordinal_block_number} not in database"));
|
||||
}
|
||||
@@ -176,7 +176,7 @@ pub fn compute_satoshi_number(
|
||||
if attempt < 3 {
|
||||
attempt += 1;
|
||||
blocks_db =
|
||||
open_readonly_hord_db_conn_rocks_db_loop(&blocks_db_dir, &ctx);
|
||||
open_readonly_ordhook_db_conn_rocks_db_loop(&blocks_db_dir, &ctx);
|
||||
} else {
|
||||
return Err(format!("block #{ordinal_block_number} not in database"));
|
||||
}
|
||||
@@ -23,19 +23,19 @@ use crate::{
|
||||
core::protocol::inscription_parsing::get_inscriptions_revealed_in_block, ord::sat::Sat,
|
||||
};
|
||||
|
||||
pub fn get_default_hord_db_file_path(base_dir: &PathBuf) -> PathBuf {
|
||||
pub fn get_default_ordhook_db_file_path(base_dir: &PathBuf) -> PathBuf {
|
||||
let mut destination_path = base_dir.clone();
|
||||
destination_path.push("hord.sqlite");
|
||||
destination_path
|
||||
}
|
||||
|
||||
pub fn open_readonly_hord_db_conn(base_dir: &PathBuf, ctx: &Context) -> Result<Connection, String> {
|
||||
let path = get_default_hord_db_file_path(&base_dir);
|
||||
pub fn open_readonly_ordhook_db_conn(base_dir: &PathBuf, ctx: &Context) -> Result<Connection, String> {
|
||||
let path = get_default_ordhook_db_file_path(&base_dir);
|
||||
let conn = open_existing_readonly_db(&path, ctx);
|
||||
Ok(conn)
|
||||
}
|
||||
|
||||
pub fn open_readwrite_hord_db_conn(
|
||||
pub fn open_readwrite_ordhook_db_conn(
|
||||
base_dir: &PathBuf,
|
||||
ctx: &Context,
|
||||
) -> Result<Connection, String> {
|
||||
@@ -43,7 +43,7 @@ pub fn open_readwrite_hord_db_conn(
|
||||
Ok(conn)
|
||||
}
|
||||
|
||||
pub fn initialize_hord_db(path: &PathBuf, ctx: &Context) -> Connection {
|
||||
pub fn initialize_ordhook_db(path: &PathBuf, ctx: &Context) -> Connection {
|
||||
let conn = create_or_open_readwrite_db(path, ctx);
|
||||
// TODO: introduce initial output
|
||||
if let Err(e) = conn.execute(
|
||||
@@ -125,7 +125,7 @@ pub fn initialize_hord_db(path: &PathBuf, ctx: &Context) -> Connection {
|
||||
}
|
||||
|
||||
pub fn create_or_open_readwrite_db(cache_path: &PathBuf, ctx: &Context) -> Connection {
|
||||
let path = get_default_hord_db_file_path(&cache_path);
|
||||
let path = get_default_ordhook_db_file_path(&cache_path);
|
||||
let open_flags = match std::fs::metadata(&path) {
|
||||
Err(e) => {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
@@ -192,7 +192,7 @@ fn open_existing_readonly_db(path: &PathBuf, ctx: &Context) -> Connection {
|
||||
return conn;
|
||||
}
|
||||
|
||||
fn get_default_hord_db_file_path_rocks_db(base_dir: &PathBuf) -> PathBuf {
|
||||
fn get_default_ordhook_db_file_path_rocks_db(base_dir: &PathBuf) -> PathBuf {
|
||||
let mut destination_path = base_dir.clone();
|
||||
destination_path.push("hord.rocksdb");
|
||||
destination_path
|
||||
@@ -216,11 +216,11 @@ fn rocks_db_default_options() -> rocksdb::Options {
|
||||
opts
|
||||
}
|
||||
|
||||
pub fn open_readonly_hord_db_conn_rocks_db(
|
||||
pub fn open_readonly_ordhook_db_conn_rocks_db(
|
||||
base_dir: &PathBuf,
|
||||
_ctx: &Context,
|
||||
) -> Result<DB, String> {
|
||||
let path = get_default_hord_db_file_path_rocks_db(&base_dir);
|
||||
let path = get_default_ordhook_db_file_path_rocks_db(&base_dir);
|
||||
let mut opts = rocks_db_default_options();
|
||||
opts.set_disable_auto_compactions(true);
|
||||
opts.set_max_background_jobs(0);
|
||||
@@ -229,10 +229,10 @@ pub fn open_readonly_hord_db_conn_rocks_db(
|
||||
Ok(db)
|
||||
}
|
||||
|
||||
pub fn open_readonly_hord_db_conn_rocks_db_loop(base_dir: &PathBuf, ctx: &Context) -> DB {
|
||||
pub fn open_readonly_ordhook_db_conn_rocks_db_loop(base_dir: &PathBuf, ctx: &Context) -> DB {
|
||||
let mut retries = 0;
|
||||
let blocks_db = loop {
|
||||
match open_readonly_hord_db_conn_rocks_db(&base_dir, &ctx) {
|
||||
match open_readonly_ordhook_db_conn_rocks_db(&base_dir, &ctx) {
|
||||
Ok(db) => break db,
|
||||
Err(e) => {
|
||||
retries += 1;
|
||||
@@ -248,20 +248,20 @@ pub fn open_readonly_hord_db_conn_rocks_db_loop(base_dir: &PathBuf, ctx: &Contex
|
||||
blocks_db
|
||||
}
|
||||
|
||||
pub fn open_readwrite_hord_dbs(
|
||||
pub fn open_readwrite_ordhook_dbs(
|
||||
base_dir: &PathBuf,
|
||||
ctx: &Context,
|
||||
) -> Result<(DB, Connection), String> {
|
||||
let blocks_db = open_readwrite_hord_db_conn_rocks_db(&base_dir, &ctx)?;
|
||||
let inscriptions_db = open_readwrite_hord_db_conn(&base_dir, &ctx)?;
|
||||
let blocks_db = open_readwrite_ordhook_db_conn_rocks_db(&base_dir, &ctx)?;
|
||||
let inscriptions_db = open_readwrite_ordhook_db_conn(&base_dir, &ctx)?;
|
||||
Ok((blocks_db, inscriptions_db))
|
||||
}
|
||||
|
||||
pub fn open_readwrite_hord_db_conn_rocks_db(
|
||||
pub fn open_readwrite_ordhook_db_conn_rocks_db(
|
||||
base_dir: &PathBuf,
|
||||
_ctx: &Context,
|
||||
) -> Result<DB, String> {
|
||||
let path = get_default_hord_db_file_path_rocks_db(&base_dir);
|
||||
let path = get_default_ordhook_db_file_path_rocks_db(&base_dir);
|
||||
let opts = rocks_db_default_options();
|
||||
let db = DB::open(&opts, path)
|
||||
.map_err(|e| format!("unable to open blocks_db: {}", e.to_string()))?;
|
||||
@@ -867,10 +867,10 @@ pub fn find_watched_satpoint_for_inscription(
|
||||
|
||||
pub fn find_inscriptions_at_wached_outpoint(
|
||||
outpoint: &str,
|
||||
hord_db_conn: &Connection,
|
||||
ordhook_db_conn: &Connection,
|
||||
) -> Result<Vec<WatchedSatpoint>, String> {
|
||||
let args: &[&dyn ToSql] = &[&outpoint.to_sql().unwrap()];
|
||||
let mut stmt = hord_db_conn
|
||||
let mut stmt = ordhook_db_conn
|
||||
.prepare("SELECT inscription_id, offset FROM locations WHERE outpoint_to_watch = ? ORDER BY offset ASC")
|
||||
.map_err(|e| format!("unable to query locations table: {}", e.to_string()))?;
|
||||
let mut results = vec![];
|
||||
@@ -959,7 +959,7 @@ pub fn insert_entry_in_locations(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn delete_data_in_hord_db(
|
||||
pub fn delete_data_in_ordhook_db(
|
||||
start_block: u64,
|
||||
end_block: u64,
|
||||
blocks_db_rw: &DB,
|
||||
@@ -3,7 +3,7 @@ use crate::core::protocol::inscription_parsing::{
|
||||
get_inscriptions_revealed_in_block, parse_inscriptions_and_standardize_block,
|
||||
};
|
||||
use crate::core::protocol::inscription_sequencing::consolidate_block_with_pre_computed_ordinals_data;
|
||||
use crate::db::{get_any_entry_in_ordinal_activities, open_readonly_hord_db_conn};
|
||||
use crate::db::{get_any_entry_in_ordinal_activities, open_readonly_ordhook_db_conn};
|
||||
use crate::download::download_ordinals_dataset_if_required;
|
||||
use crate::service::{
|
||||
open_readwrite_predicates_db_conn_or_panic, update_predicate_status, PredicateStatus,
|
||||
@@ -75,7 +75,7 @@ pub async fn scan_bitcoin_chainstate_via_rpc_using_predicate(
|
||||
BlockHeights::BlockRange(start_block, end_block).get_sorted_entries()
|
||||
};
|
||||
|
||||
let mut inscriptions_db_conn = open_readonly_hord_db_conn(&config.expected_cache_path(), ctx)?;
|
||||
let mut inscriptions_db_conn = open_readonly_ordhook_db_conn(&config.expected_cache_path(), ctx)?;
|
||||
|
||||
info!(
|
||||
ctx.expect_logger(),
|
||||
@@ -96,7 +96,7 @@ pub async fn scan_bitcoin_chainstate_via_rpc_using_predicate(
|
||||
|
||||
// Re-initiate connection every 250 blocks (pessimistic) to avoid stale connections
|
||||
let conn_updated = if number_of_blocks_scanned % 250 == 0 {
|
||||
inscriptions_db_conn = open_readonly_hord_db_conn(&config.expected_cache_path(), ctx)?;
|
||||
inscriptions_db_conn = open_readonly_ordhook_db_conn(&config.expected_cache_path(), ctx)?;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
@@ -9,10 +9,10 @@ use crate::core::pipeline::processors::transfers_recomputing::start_transfers_re
|
||||
use crate::core::protocol::inscription_parsing::parse_inscriptions_in_standardized_block;
|
||||
use crate::core::protocol::inscription_sequencing::SequenceCursor;
|
||||
use crate::core::{
|
||||
new_traversals_lazy_cache, revert_hord_db_with_augmented_bitcoin_block, should_sync_hord_db,
|
||||
new_traversals_lazy_cache, revert_ordhook_db_with_augmented_bitcoin_block, should_sync_ordhook_db,
|
||||
};
|
||||
use crate::db::{
|
||||
insert_entry_in_blocks, open_readonly_hord_db_conn, open_readwrite_hord_dbs, LazyBlock,
|
||||
insert_entry_in_blocks, open_readonly_ordhook_db_conn, open_readwrite_ordhook_dbs, LazyBlock,
|
||||
};
|
||||
use crate::scan::bitcoin::process_block_with_predicates;
|
||||
use crate::service::http_api::{load_predicates_from_redis, start_predicate_api_server};
|
||||
@@ -53,21 +53,21 @@ impl Service {
|
||||
);
|
||||
event_observer_config.chainhook_config = Some(chainhook_config);
|
||||
|
||||
let hord_config = self.config.get_hord_config();
|
||||
let ordhook_config = self.config.get_ordhook_config();
|
||||
|
||||
// Sleep
|
||||
// std::thread::sleep(std::time::Duration::from_secs(1200));
|
||||
|
||||
// Force rebuild
|
||||
// {
|
||||
// let blocks_db = open_readwrite_hord_db_conn_rocks_db(
|
||||
// let blocks_db = open_readwrite_ordhook_db_conn_rocks_db(
|
||||
// &self.config.expected_cache_path(),
|
||||
// &self.ctx,
|
||||
// )?;
|
||||
// let inscriptions_db_conn_rw =
|
||||
// open_readwrite_hord_db_conn(&self.config.expected_cache_path(), &self.ctx)?;
|
||||
// open_readwrite_ordhook_db_conn(&self.config.expected_cache_path(), &self.ctx)?;
|
||||
|
||||
// delete_data_in_hord_db(
|
||||
// delete_data_in_ordhook_db(
|
||||
// 767430,
|
||||
// 800000,
|
||||
// &blocks_db,
|
||||
@@ -107,7 +107,7 @@ impl Service {
|
||||
|
||||
// let (cursor, tip) = {
|
||||
// let inscriptions_db_conn =
|
||||
// open_readonly_hord_db_conn(&self.config.expected_cache_path(), &self.ctx)?;
|
||||
// open_readonly_ordhook_db_conn(&self.config.expected_cache_path(), &self.ctx)?;
|
||||
// let cursor = find_latest_transfers_block_height(&inscriptions_db_conn, &self.ctx).unwrap_or(1);
|
||||
// match find_latest_inscription_block_height(&inscriptions_db_conn, &self.ctx)? {
|
||||
// Some(height) => (cursor, height),
|
||||
@@ -158,9 +158,9 @@ impl Service {
|
||||
}
|
||||
|
||||
let (observer_event_tx, observer_event_rx) = crossbeam_channel::unbounded();
|
||||
let traversals_cache = Arc::new(new_traversals_lazy_cache(hord_config.cache_size));
|
||||
let traversals_cache = Arc::new(new_traversals_lazy_cache(ordhook_config.cache_size));
|
||||
|
||||
let inner_ctx = if hord_config.logs.chainhook_internals {
|
||||
let inner_ctx = if ordhook_config.logs.chainhook_internals {
|
||||
self.ctx.clone()
|
||||
} else {
|
||||
Context::empty()
|
||||
@@ -197,7 +197,7 @@ impl Service {
|
||||
};
|
||||
|
||||
let (blocks_db_rw, mut inscriptions_db_conn_rw) =
|
||||
match open_readwrite_hord_dbs(&config.expected_cache_path(), &ctx) {
|
||||
match open_readwrite_ordhook_dbs(&config.expected_cache_path(), &ctx) {
|
||||
Ok(dbs) => dbs,
|
||||
Err(e) => {
|
||||
ctx.try_log(|logger| {
|
||||
@@ -217,7 +217,7 @@ impl Service {
|
||||
block.block_identifier.index
|
||||
);
|
||||
|
||||
if let Err(e) = revert_hord_db_with_augmented_bitcoin_block(
|
||||
if let Err(e) = revert_ordhook_db_with_augmented_bitcoin_block(
|
||||
block,
|
||||
&blocks_db_rw,
|
||||
&inscriptions_db_conn_rw,
|
||||
@@ -261,7 +261,7 @@ impl Service {
|
||||
parse_inscriptions_in_standardized_block(block, &ctx);
|
||||
}
|
||||
let inscriptions_db_conn =
|
||||
open_readonly_hord_db_conn(&config.expected_cache_path(), &ctx)
|
||||
open_readonly_ordhook_db_conn(&config.expected_cache_path(), &ctx)
|
||||
.expect("unable to open inscriptions db");
|
||||
let mut sequence_cursor = SequenceCursor::new(inscriptions_db_conn);
|
||||
|
||||
@@ -270,7 +270,7 @@ impl Service {
|
||||
&mut sequence_cursor,
|
||||
&moved_traversals_cache,
|
||||
&mut inscriptions_db_conn_rw,
|
||||
&config.get_hord_config(),
|
||||
&config.get_ordhook_config(),
|
||||
&None,
|
||||
&ctx,
|
||||
);
|
||||
@@ -401,7 +401,7 @@ impl Service {
|
||||
// Start predicate processor
|
||||
|
||||
while let Some((start_block, end_block, speed)) =
|
||||
should_sync_hord_db(&self.config, &self.ctx)?
|
||||
should_sync_ordhook_db(&self.config, &self.ctx)?
|
||||
{
|
||||
let blocks_post_processor = start_inscription_indexing_processor(
|
||||
&self.config,
|
||||
@@ -414,8 +414,8 @@ impl Service {
|
||||
"Indexing inscriptions from block #{start_block} to block #{end_block}"
|
||||
);
|
||||
|
||||
let hord_config = self.config.get_hord_config();
|
||||
let first_inscription_height = hord_config.first_inscription_height;
|
||||
let ordhook_config = self.config.get_ordhook_config();
|
||||
let first_inscription_height = ordhook_config.first_inscription_height;
|
||||
download_and_pipeline_blocks(
|
||||
&self.config,
|
||||
start_block,
|
||||
@@ -446,8 +446,8 @@ impl Service {
|
||||
"Indexing inscriptions from block #{start_block} to block #{end_block}"
|
||||
);
|
||||
|
||||
let hord_config = self.config.get_hord_config();
|
||||
let first_inscription_height = hord_config.first_inscription_height;
|
||||
let ordhook_config = self.config.get_ordhook_config();
|
||||
let first_inscription_height = ordhook_config.first_inscription_height;
|
||||
download_and_pipeline_blocks(
|
||||
&self.config,
|
||||
start_block,
|
||||
@@ -6,21 +6,21 @@ RUN apt update && apt install -y ca-certificates pkg-config libssl-dev libclang-
|
||||
|
||||
RUN rustup update 1.67.0 && rustup default 1.67.0
|
||||
|
||||
COPY ./components/hord-cli /src/components/hord-cli
|
||||
COPY ./components/ordhook-cli /src/components/ordhook-cli
|
||||
|
||||
WORKDIR /src/components/hord-cli
|
||||
WORKDIR /src/components/ordhook-cli
|
||||
|
||||
RUN mkdir /out
|
||||
|
||||
RUN cargo build --features release --release
|
||||
|
||||
RUN cp target/release/hord /out
|
||||
RUN cp target/release/ordhook /out
|
||||
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
RUN apt update && apt install -y ca-certificates libssl-dev
|
||||
|
||||
COPY --from=build /out/ /bin/
|
||||
COPY --from=build /out/ordhook /bin/hord
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
|
||||
39
tmp2/Hord.toml
Normal file
39
tmp2/Hord.toml
Normal file
@@ -0,0 +1,39 @@
|
||||
[storage]
|
||||
working_dir = "cache"
|
||||
|
||||
# The Http Api allows you to register / deregister
|
||||
# dynamically predicates.
|
||||
# Disable by default.
|
||||
#
|
||||
# [http_api]
|
||||
# http_port = 20456
|
||||
# database_uri = "redis://localhost:6379/"
|
||||
|
||||
[network]
|
||||
mode = "mainnet"
|
||||
bitcoind_rpc_url = "http://0.0.0.0:8332"
|
||||
bitcoind_rpc_username = "devnet"
|
||||
bitcoind_rpc_password = "devnet"
|
||||
# Bitcoin block events can be received by Chainhook
|
||||
# either through a Bitcoin node's ZeroMQ interface,
|
||||
# or through the Stacks node. Zmq is being
|
||||
# used by default:
|
||||
bitcoind_zmq_url = "tcp://0.0.0.0:18543"
|
||||
# but stacks can also be used:
|
||||
# stacks_node_rpc_url = "http://0.0.0.0:20443"
|
||||
|
||||
[limits]
|
||||
max_number_of_bitcoin_predicates = 100
|
||||
max_number_of_concurrent_bitcoin_scans = 100
|
||||
max_number_of_processing_threads = 16
|
||||
bitcoin_concurrent_http_requests_max = 16
|
||||
max_caching_memory_size_mb = 32000
|
||||
|
||||
# Disable the following section if the state
|
||||
# must be built locally
|
||||
[bootstrap]
|
||||
download_url = "https://archive.hiro.so/mainnet/chainhooks/hord.sqlite"
|
||||
|
||||
[logs]
|
||||
ordinals_internals = false
|
||||
chainhook_internals = false
|
||||
Reference in New Issue
Block a user