feat!: support brc20 activity on scan blocks command (#350)

* fix: meta protocols for block scan

* chore: show brc20 operations on block scan

* docs: readme

* chore: generator
This commit is contained in:
Rafael Cárdenas
2024-07-31 11:37:43 -06:00
committed by GitHub
parent 13e5a97b27
commit caacff7c4f
10 changed files with 189 additions and 152 deletions

View File

@@ -45,7 +45,7 @@ Inscription 6fb976ab49dcec017f1e201e84395983204ae1a7c2abf7ced0a85d692e442799i0 r
Inscription 26482871f33f1051f450f2da9af275794c0b5f1c61ebf35e4467fb42c2813403i0 revealed at block #767753 (ordinal_number 727624168684699, inscription_number 1)
```
In this command, an interval of blocks to scan (starting at block `767430`, ending at block `767753`) is being provided. `ordhook` will display inscriptions and transfers activities occurring in the range of the specified blocks.
In this command, an interval of blocks to scan (starting at block `767430`, ending at block `767753`) is being provided. `ordhook` will display inscriptions and transfers activities occurring in the range of the specified blocks. Add the option `--meta_protocols=brc20` if you wish to explore BRC-20 activity as well.
The activity for a given inscription can be retrieved using the following command:

View File

@@ -24,7 +24,7 @@ clap = { version = "3.2.23", features = ["derive"], optional = true }
clap_generate = { version = "3.0.3", optional = true }
toml = { version = "0.5.6", features = ["preserve_order"], optional = true }
ctrlc = { version = "3.2.2", optional = true }
tcmalloc2 = { version = "0.1.2+2.13", optional = true }
tcmalloc2 = { version = "0.1.2", optional = true }
[features]
default = ["cli"]

View File

@@ -16,7 +16,9 @@ use ordhook::chainhook_sdk::types::{BitcoinBlockData, TransactionIdentifier};
use ordhook::chainhook_sdk::utils::BlockHeights;
use ordhook::chainhook_sdk::utils::Context;
use ordhook::config::Config;
use ordhook::core::meta_protocols::brc20::db::open_readwrite_brc20_db_conn;
use ordhook::core::meta_protocols::brc20::db::{
get_brc20_operations_on_block, open_readwrite_brc20_db_conn,
};
use ordhook::core::new_traversals_lazy_cache;
use ordhook::core::pipeline::download_and_pipeline_blocks;
use ordhook::core::pipeline::processors::block_archiving::start_block_archiving_processor;
@@ -30,12 +32,12 @@ use ordhook::db::{
open_ordhook_db_conn_rocks_db_loop, open_readonly_ordhook_db_conn,
open_readonly_ordhook_db_conn_rocks_db, open_readwrite_ordhook_dbs, BlockBytesCursor,
};
use ordhook::download::download_ordinals_dataset_if_required;
use ordhook::download::download_archive_datasets_if_required;
use ordhook::scan::bitcoin::scan_bitcoin_chainstate_via_rpc_using_predicate;
use ordhook::service::observers::initialize_observers_db;
use ordhook::service::{start_observer_forwarding, Service};
use ordhook::utils::bitcoind::bitcoind_get_block_height;
use ordhook::{hex, initialize_databases};
use ordhook::{hex, initialize_databases, try_error, try_info, try_warn};
use reqwest::Client as HttpClient;
use std::collections::HashSet;
use std::io::{BufReader, Read};
@@ -118,6 +120,9 @@ struct ScanBlocksCommand {
conflicts_with = "regtest"
)]
pub config_path: Option<String>,
/// Meta protocols
#[clap(long = "meta-protocols", conflicts_with = "config-path")]
pub meta_protocols: Option<String>,
/// HTTP Post activity to a URL
#[clap(long = "post-to")]
pub post_to: Option<String>,
@@ -535,8 +540,13 @@ pub fn main() {
async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
match opts.command {
Command::Scan(ScanCommand::Blocks(cmd)) => {
let config: Config =
ConfigFile::default(cmd.regtest, cmd.testnet, cmd.mainnet, &cmd.config_path)?;
let config: Config = ConfigFile::default(
cmd.regtest,
cmd.testnet,
cmd.mainnet,
&cmd.config_path,
&cmd.meta_protocols,
)?;
// Download dataset if required
// If console:
// - Replay based on SQLite queries
@@ -548,17 +558,14 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
.map_err(|_e| format!("Block start / end block spec invalid"))?;
if let Some(ref post_to) = cmd.post_to {
info!(ctx.expect_logger(), "A fully synchronized bitcoind node is required for retrieving inscriptions content.");
info!(
ctx.expect_logger(),
"Checking {}...", config.network.bitcoind_rpc_url
);
try_info!(ctx, "A fully synchronized bitcoind node is required for retrieving inscriptions content.");
try_info!(ctx, "Checking {}...", config.network.bitcoind_rpc_url);
let tip = bitcoind_get_block_height(&config, ctx);
if let Some(highest_desired) = block_range.pop_back() {
if tip < highest_desired {
error!(ctx.expect_logger(), "Unable to scan desired block range: underlying bitcoind synchronized until block #{} ", tip);
try_error!(ctx, "Unable to scan desired block range: underlying bitcoind synchronized until block #{} ", tip);
} else {
info!(ctx.expect_logger(), "Starting scan");
try_info!(ctx, "Starting scan");
}
block_range.push_back(highest_desired);
}
@@ -582,17 +589,16 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
)
.await?;
} else {
let _ = download_ordinals_dataset_if_required(&config, ctx).await;
download_archive_datasets_if_required(&config, ctx).await;
let mut total_inscriptions = 0;
let mut total_transfers = 0;
let db_connections = initialize_databases(&config, ctx);
let inscriptions_db_conn = db_connections.ordhook;
while let Some(block_height) = block_range.pop_front() {
let inscriptions =
find_all_inscriptions_in_block(&block_height, &inscriptions_db_conn, ctx);
find_all_inscriptions_in_block(&block_height, &db_connections.ordhook, ctx);
let locations =
find_all_transfers_in_block(&block_height, &inscriptions_db_conn, ctx);
find_all_transfers_in_block(&block_height, &db_connections.ordhook, ctx);
let mut total_transfers_in_block = 0;
@@ -618,6 +624,18 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
);
}
}
match db_connections.brc20 {
Some(ref conn) => {
let activity = get_brc20_operations_on_block(block_height, &conn, ctx);
for (_, row) in activity.iter() {
if row.operation == "transfer_receive" {
continue;
}
println!("BRC-20 {} {} {}", row.operation, row.tick, row.avail_balance);
}
}
None => todo!(),
}
if total_transfers_in_block > 0 && !inscriptions.is_empty() {
println!(
"Inscriptions revealed: {}, inscriptions transferred: {total_transfers_in_block}",
@@ -632,15 +650,20 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
if total_transfers == 0 && total_inscriptions == 0 {
let db_file_path =
get_default_ordhook_db_file_path(&config.expected_cache_path());
warn!(ctx.expect_logger(), "No data available. Check the validity of the range being scanned and the validity of your local database {}", db_file_path.display());
try_warn!(ctx, "No data available. Check the validity of the range being scanned and the validity of your local database {}", db_file_path.display());
}
}
}
Command::Scan(ScanCommand::Inscription(cmd)) => {
let config: Config =
ConfigFile::default(cmd.regtest, cmd.testnet, cmd.mainnet, &cmd.config_path)?;
let config: Config = ConfigFile::default(
cmd.regtest,
cmd.testnet,
cmd.mainnet,
&cmd.config_path,
&None,
)?;
let _ = download_ordinals_dataset_if_required(&config, ctx).await;
let _ = download_archive_datasets_if_required(&config, ctx).await;
let inscriptions_db_conn =
open_readonly_ordhook_db_conn(&config.expected_cache_path(), ctx)?;
@@ -663,8 +686,13 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
);
}
Command::Scan(ScanCommand::Transaction(cmd)) => {
let config: Config =
ConfigFile::default(cmd.regtest, cmd.testnet, cmd.mainnet, &cmd.config_path)?;
let config: Config = ConfigFile::default(
cmd.regtest,
cmd.testnet,
cmd.mainnet,
&cmd.config_path,
&None,
)?;
let http_client = build_http_client();
let block = fetch_and_standardize_block(
&http_client,
@@ -702,8 +730,13 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
sleep(Duration::from_secs(3600 * 24 * 7))
}
let config =
ConfigFile::default(cmd.regtest, cmd.testnet, cmd.mainnet, &cmd.config_path)?;
let config = ConfigFile::default(
cmd.regtest,
cmd.testnet,
cmd.mainnet,
&cmd.config_path,
&None,
)?;
let db_connections = initialize_databases(&config, ctx);
let last_known_block =
@@ -769,7 +802,8 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
ConfigCommand::New(cmd) => {
use std::fs::File;
use std::io::Write;
let config = ConfigFile::default(cmd.regtest, cmd.testnet, cmd.mainnet, &None)?;
let config =
ConfigFile::default(cmd.regtest, cmd.testnet, cmd.mainnet, &None, &None)?;
let config_content = generate_config(&config.network.bitcoin_network);
let mut file_path = PathBuf::new();
file_path.push("Ordhook.toml");
@@ -781,7 +815,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
}
},
Command::Db(OrdhookDbCommand::New(cmd)) => {
let config = ConfigFile::default(false, false, false, &cmd.config_path)?;
let config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
// Create DB
initialize_databases(&config, ctx);
open_ordhook_db_conn_rocks_db_loop(
@@ -793,14 +827,14 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
);
}
Command::Db(OrdhookDbCommand::Sync(cmd)) => {
let config = ConfigFile::default(false, false, false, &cmd.config_path)?;
let config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
initialize_databases(&config, ctx);
let service = Service::new(config, ctx.clone());
service.update_state(None).await?;
}
Command::Db(OrdhookDbCommand::Repair(subcmd)) => match subcmd {
RepairCommand::Blocks(cmd) => {
let config = ConfigFile::default(false, false, false, &cmd.config_path)?;
let config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
let mut ordhook_config = config.get_ordhook_config();
if let Some(network_threads) = cmd.network_threads {
ordhook_config.resources.bitcoind_rpc_threads = network_threads;
@@ -839,7 +873,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
}
}
RepairCommand::Inscriptions(cmd) => {
let config = ConfigFile::default(false, false, false, &cmd.config_path)?;
let config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
let mut ordhook_config = config.get_ordhook_config();
if let Some(network_threads) = cmd.network_threads {
ordhook_config.resources.bitcoind_rpc_threads = network_threads;
@@ -867,7 +901,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
.await?;
}
RepairCommand::Transfers(cmd) => {
let config = ConfigFile::default(false, false, false, &cmd.config_path)?;
let config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
let block_post_processor = match cmd.repair_observers {
Some(true) => {
let tx_replayer =
@@ -891,7 +925,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
}
},
Command::Db(OrdhookDbCommand::Check(cmd)) => {
let config = ConfigFile::default(false, false, false, &cmd.config_path)?;
let config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
{
let blocks_db = open_readonly_ordhook_db_conn_rocks_db(
&config.expected_cache_path(),
@@ -906,7 +940,7 @@ async fn handle_command(opts: Opts, ctx: &Context) -> Result<(), String> {
}
}
Command::Db(OrdhookDbCommand::Drop(cmd)) => {
let config = ConfigFile::default(false, false, false, &cmd.config_path)?;
let config = ConfigFile::default(false, false, false, &cmd.config_path, &None)?;
println!(
"{} blocks will be deleted. Confirm? [Y/n]",

View File

@@ -5,8 +5,9 @@ use ordhook::chainhook_sdk::types::{
};
use ordhook::config::{
Config, LogConfig, MetaProtocolsConfig, PredicatesApi, PredicatesApiConfig, ResourcesConfig,
SnapshotConfig, StorageConfig, DEFAULT_BITCOIND_RPC_THREADS, DEFAULT_BITCOIND_RPC_TIMEOUT,
DEFAULT_BRC20_LRU_CACHE_SIZE, DEFAULT_CONTROL_PORT, DEFAULT_MEMORY_AVAILABLE, DEFAULT_ULIMIT,
SnapshotConfig, SnapshotConfigDownloadUrls, StorageConfig, DEFAULT_BITCOIND_RPC_THREADS,
DEFAULT_BITCOIND_RPC_TIMEOUT, DEFAULT_BRC20_LRU_CACHE_SIZE, DEFAULT_CONTROL_PORT,
DEFAULT_MEMORY_AVAILABLE, DEFAULT_ULIMIT,
};
use std::fs::File;
use std::io::{BufReader, Read};
@@ -51,8 +52,11 @@ impl ConfigFile {
};
let snapshot = match config_file.snapshot {
Some(bootstrap) => match bootstrap.download_url {
Some(ref url) => SnapshotConfig::Download(url.to_string()),
Some(bootstrap) => match bootstrap.ordinals_url {
Some(ref url) => SnapshotConfig::Download(SnapshotConfigDownloadUrls {
ordinals: url.to_string(),
brc20: bootstrap.brc20_url,
}),
None => SnapshotConfig::Build,
},
None => SnapshotConfig::Build,
@@ -144,14 +148,21 @@ impl ConfigFile {
testnet: bool,
mainnet: bool,
config_path: &Option<String>,
meta_protocols: &Option<String>,
) -> Result<Config, String> {
let config = match (devnet, testnet, mainnet, config_path) {
let mut config = match (devnet, testnet, mainnet, config_path) {
(true, false, false, _) => Config::devnet_default(),
(false, true, false, _) => Config::testnet_default(),
(false, false, true, _) => Config::mainnet_default(),
(false, false, false, Some(config_path)) => ConfigFile::from_file_path(config_path)?,
_ => Err("Invalid combination of arguments".to_string())?,
};
if let Some(meta_protocols) = meta_protocols {
match meta_protocols.as_str() {
"brc20" => config.meta_protocols.brc20 = true,
_ => Err("Invalid meta protocol".to_string())?,
}
}
Ok(config)
}
}
@@ -177,7 +188,8 @@ pub struct PredicatesApiConfigFile {
#[derive(Deserialize, Debug, Clone)]
pub struct SnapshotConfigFile {
pub download_url: Option<String>,
pub ordinals_url: Option<String>,
pub brc20_url: Option<String>,
}
#[derive(Deserialize, Debug, Clone)]

View File

@@ -37,7 +37,8 @@ expected_observers_count = 1
# Disable the following section if the state
# must be built locally
[snapshot]
download_url = "https://archive.hiro.so/mainnet/ordhook/mainnet-ordhook-sqlite-latest"
ordinals_url = "https://archive.hiro.so/mainnet/ordhook/mainnet-ordhook-sqlite-latest"
brc20_url = "https://archive.hiro.so/mainnet/ordhook/mainnet-ordhook-brc20-latest"
[logs]
ordinals_internals = true

View File

@@ -8,6 +8,8 @@ use std::path::PathBuf;
const DEFAULT_MAINNET_ORDINALS_SQLITE_ARCHIVE: &str =
"https://archive.hiro.so/mainnet/ordhook/mainnet-ordhook-sqlite-latest";
const DEFAULT_MAINNET_BRC20_SQLITE_ARCHIVE: &str =
"https://archive.hiro.so/mainnet/ordhook/mainnet-ordhook-brc20-latest";
pub const DEFAULT_INGESTION_PORT: u16 = 20455;
pub const DEFAULT_CONTROL_PORT: u16 = 20456;
@@ -56,10 +58,16 @@ pub struct PredicatesApiConfig {
pub display_logs: bool,
}
#[derive(Clone, Debug)]
pub struct SnapshotConfigDownloadUrls {
pub ordinals: String,
pub brc20: Option<String>,
}
#[derive(Clone, Debug)]
pub enum SnapshotConfig {
Build,
Download(String),
Download(SnapshotConfigDownloadUrls),
}
#[derive(Clone, Debug)]
@@ -153,21 +161,6 @@ impl Config {
destination_path
}
fn expected_remote_ordinals_sqlite_base_url(&self) -> &str {
match &self.snapshot {
SnapshotConfig::Build => unreachable!(),
SnapshotConfig::Download(url) => &url,
}
}
pub fn expected_remote_ordinals_sqlite_sha256(&self) -> String {
format!("{}.sha256", self.expected_remote_ordinals_sqlite_base_url())
}
pub fn expected_remote_ordinals_sqlite_url(&self) -> String {
format!("{}.tar.gz", self.expected_remote_ordinals_sqlite_base_url())
}
pub fn devnet_default() -> Config {
Config {
storage: StorageConfig {
@@ -242,7 +235,10 @@ impl Config {
working_dir: default_cache_path(),
},
http_api: PredicatesApi::Off,
snapshot: SnapshotConfig::Download(DEFAULT_MAINNET_ORDINALS_SQLITE_ARCHIVE.to_string()),
snapshot: SnapshotConfig::Download(SnapshotConfigDownloadUrls {
ordinals: DEFAULT_MAINNET_ORDINALS_SQLITE_ARCHIVE.to_string(),
brc20: Some(DEFAULT_MAINNET_BRC20_SQLITE_ARCHIVE.to_string()),
}),
resources: ResourcesConfig {
cpu_core_available: num_cpus::get(),
memory_available: DEFAULT_MEMORY_AVAILABLE,

View File

@@ -329,11 +329,11 @@ pub fn insert_token_rows(rows: &Vec<Brc20DbTokenRow>, db_tx: &Connection, ctx: &
}
pub fn get_brc20_operations_on_block(
block_identifier: &BlockIdentifier,
block_height: u64,
db_tx: &Connection,
ctx: &Context,
) -> HashMap<u64, Brc20DbLedgerRow> {
let args: &[&dyn ToSql] = &[&block_identifier.index.to_sql().unwrap()];
let args: &[&dyn ToSql] = &[&block_height.to_sql().unwrap()];
let query = "
SELECT
inscription_id, inscription_number, ordinal_number, block_height, tx_index, tick, address, avail_balance, trans_balance, operation

View File

@@ -928,7 +928,7 @@ pub fn consolidate_block_with_pre_computed_ordinals_data(
};
let mut brc20_token_map = HashMap::new();
let mut brc20_block_ledger_map = match brc20_db_conn {
Some(conn) => get_brc20_operations_on_block(&block.block_identifier, &conn, &ctx),
Some(conn) => get_brc20_operations_on_block(block.block_identifier.index, &conn, &ctx),
None => HashMap::new(),
};
for (tx_index, tx) in block.transactions.iter_mut().enumerate() {

View File

@@ -1,6 +1,6 @@
use crate::config::Config;
use crate::config::{Config, SnapshotConfig};
use crate::utils::read_file_content_at_path;
use chainhook_sdk::types::BitcoinNetwork;
use crate::{try_error, try_info, try_warn};
use chainhook_sdk::utils::Context;
use flate2::read::GzDecoder;
use futures_util::StreamExt;
@@ -12,40 +12,19 @@ use std::io::{Read, Write};
use std::path::PathBuf;
use tar::Archive;
pub fn default_sqlite_file_path(_network: &BitcoinNetwork) -> String {
format!("hord.sqlite").to_lowercase()
}
pub fn default_sqlite_sha_file_path(_network: &BitcoinNetwork) -> String {
format!("hord.sqlite.sha256").to_lowercase()
}
pub async fn download_sqlite_file(config: &Config, ctx: &Context) -> Result<(), String> {
let destination_path = config.expected_cache_path();
std::fs::create_dir_all(&destination_path).unwrap_or_else(|e| {
if ctx.logger.is_some() {
println!("{}", e.to_string());
}
/// Downloads and decompresses a remote `tar.gz` file.
pub async fn download_and_decompress_archive_file(
file_url: String,
file_name: &str,
config: &Config,
ctx: &Context,
) -> Result<(), String> {
let destination_dir_path = config.expected_cache_path();
std::fs::create_dir_all(&destination_dir_path).unwrap_or_else(|e| {
try_error!(ctx, "{e}");
});
// let remote_sha_url = config.expected_remote_ordinals_sqlite_sha256();
// let res = reqwest::get(&remote_sha_url)
// .await
// .or(Err(format!("Failed to GET from '{}'", &remote_sha_url)))?
// .bytes()
// .await
// .or(Err(format!("Failed to GET from '{}'", &remote_sha_url)))?;
// let mut local_sha_file_path = destination_path.clone();
// local_sha_file_path.push(default_sqlite_sha_file_path(
// &config.network.bitcoin_network,
// ));
// write_file_content_at_path(&local_sha_file_path, &res.to_vec())?;
let file_url = config.expected_remote_ordinals_sqlite_url();
if ctx.logger.is_some() {
println!("=> {file_url}");
}
try_info!(ctx, "=> {file_url}");
let res = reqwest::get(&file_url)
.await
.or(Err(format!("Failed to GET from '{}'", &file_url)))?;
@@ -54,7 +33,7 @@ pub async fn download_sqlite_file(config: &Config, ctx: &Context) -> Result<(),
let (tx, rx) = flume::bounded(0);
if res.status() == reqwest::StatusCode::OK {
let limit = res.content_length().unwrap_or(10_000_000_000) as i64;
let archive_tmp_file = PathBuf::from("db.tar");
let archive_tmp_file = PathBuf::from(format!("{file_name}.tar.gz"));
let decoder_thread = std::thread::spawn(move || {
{
let input = ChannelRead::new(rx);
@@ -84,7 +63,7 @@ pub async fn download_sqlite_file(config: &Config, ctx: &Context) -> Result<(),
}
let archive_file = File::open(&archive_tmp_file).unwrap();
let mut archive = Archive::new(archive_file);
if let Err(e) = archive.unpack(&destination_path) {
if let Err(e) = archive.unpack(&destination_dir_path) {
let err = format!("unable to decompress file: {}", e.to_string());
return Err(err);
}
@@ -171,63 +150,78 @@ impl Read for ChannelRead {
}
}
pub async fn download_ordinals_dataset_if_required(config: &Config, ctx: &Context) -> bool {
if config.should_bootstrap_through_download() {
let url = config.expected_remote_ordinals_sqlite_url();
let mut sqlite_file_path = config.expected_cache_path();
sqlite_file_path.push(default_sqlite_file_path(&config.network.bitcoin_network));
let mut sqlite_sha_file_path = config.expected_cache_path();
sqlite_sha_file_path.push(default_sqlite_sha_file_path(
&config.network.bitcoin_network,
));
/// Compares the SHA256 of a previous local archive to the latest remote archive and downloads if required.
async fn validate_or_download_archive_file(
snapshot_url: &String,
file_name: &str,
config: &Config,
ctx: &Context,
) {
let remote_archive_url = format!("{snapshot_url}.tar.gz");
let remote_sha_url = format!("{snapshot_url}.sha256");
// Download archive if not already present in cache
// Load the local
let local_sha_file = read_file_content_at_path(&sqlite_sha_file_path);
let sha_url = config.expected_remote_ordinals_sqlite_sha256();
let mut local_sqlite_file_path = config.expected_cache_path();
local_sqlite_file_path.push(format!("{file_name}.sqlite"));
let mut local_sha_file_path = config.expected_cache_path();
local_sha_file_path.push(format!("{file_name}.sqlite.sha256"));
let remote_sha_file = match reqwest::get(&sha_url).await {
Ok(response) => response.bytes().await,
Err(e) => Err(e),
};
let should_download = match (local_sha_file, remote_sha_file) {
(Ok(local), Ok(remote_response)) => {
let cache_not_expired = remote_response.starts_with(&local[0..32]) == false;
if cache_not_expired {
info!(ctx.expect_logger(), "More recent hord.sqlite file detected");
}
cache_not_expired == false
// Compare local SHA256 to remote to see if there's a new one available.
let local_sha_file = read_file_content_at_path(&local_sha_file_path);
let remote_sha_file = match reqwest::get(&remote_sha_url).await {
Ok(response) => response.bytes().await,
Err(e) => Err(e),
};
let should_download = match (local_sha_file, remote_sha_file) {
(Ok(local), Ok(remote_response)) => {
let cache_not_expired = remote_response.starts_with(&local[0..32]) == false;
if cache_not_expired {
try_info!(ctx, "More recent {file_name}.sqlite file detected");
}
cache_not_expired == false
}
(_, _) => match std::fs::metadata(&local_sqlite_file_path) {
Ok(_) => false,
_ => {
try_info!(ctx, "Unable to retrieve {file_name}.sqlite file locally");
true
}
},
};
if should_download {
try_info!(ctx, "Downloading {remote_archive_url}");
match download_and_decompress_archive_file(remote_archive_url, file_name, &config, &ctx).await {
Ok(_) => {}
Err(e) => {
try_error!(ctx, "{e}");
std::process::exit(1);
}
(_, _) => match std::fs::metadata(&sqlite_file_path) {
Ok(_) => false,
_ => {
info!(
ctx.expect_logger(),
"Unable to retrieve hord.sqlite file locally"
);
true
}
},
};
if should_download {
info!(ctx.expect_logger(), "Downloading {}", url);
match download_sqlite_file(&config, &ctx).await {
Ok(_) => {}
Err(e) => {
error!(ctx.expect_logger(), "{}", e);
std::process::exit(1);
}
}
} else {
info!(
ctx.expect_logger(),
"Basing ordinals evaluation on database {}",
sqlite_file_path.display()
);
}
// config.add_local_ordinals_sqlite_source(&sqlite_file_path);
true
} else {
false
try_info!(
ctx,
"Basing ordinals evaluation on database {}",
local_sqlite_file_path.display()
);
}
}
/// Downloads remote SQLite archive datasets.
pub async fn download_archive_datasets_if_required(config: &Config, ctx: &Context) {
if !config.should_bootstrap_through_download() {
return;
}
let snapshot_urls = match &config.snapshot {
SnapshotConfig::Build => unreachable!(),
SnapshotConfig::Download(url) => url,
};
validate_or_download_archive_file(&snapshot_urls.ordinals, "hord", config, ctx).await;
if config.meta_protocols.brc20 {
match &snapshot_urls.brc20 {
Some(url) => validate_or_download_archive_file(url, "brc20", config, ctx).await,
None => {
try_warn!(ctx, "No brc20 snapshot url configured");
}
}
}
}

View File

@@ -5,7 +5,7 @@ use crate::core::protocol::inscription_parsing::{
};
use crate::core::protocol::inscription_sequencing::consolidate_block_with_pre_computed_ordinals_data;
use crate::db::get_any_entry_in_ordinal_activities;
use crate::download::download_ordinals_dataset_if_required;
use crate::download::download_archive_datasets_if_required;
use crate::initialize_databases;
use crate::service::observers::{
open_readwrite_observers_db_conn_or_panic, update_observer_progress,
@@ -34,7 +34,7 @@ pub async fn scan_bitcoin_chainstate_via_rpc_using_predicate(
event_observer_config_override: Option<&EventObserverConfig>,
ctx: &Context,
) -> Result<(), String> {
let _ = download_ordinals_dataset_if_required(config, ctx).await;
download_archive_datasets_if_required(config, ctx).await;
let mut floating_end_block = false;
let block_heights_to_scan_res = if let Some(ref blocks) = predicate_spec.blocks {