feat: v0.1.0 release (#13)

* fix: don't remove first block

* fix: make etching_terms nullable

* fix: fix panic if empty pkscript

* chore: change testnet starting block

* feat: more logs

* fix: extract tapscript bug

* feat: more logs

* fix: switch pk to block height

* chore: remove redundant log

* fix: repo

* fix: not found error

* fix: golangci-lint

* feat: add etching tx hash to rune entries

* feat: stop main if indexer failed

* fix: check balance after populating current balance

* fix: sql ambiguous column

* feat: add tx hash and out index in tx output

* fix: actually use transactions to write db

* fix: create rune entry states only during flushes

* fix: mint cap reached off by one

* fix: debug log unsafe

* feat: prevent processing of txs before activation height

* feat: add rune number to rune entry

* feat: include new rune entries in event hash and flushing

* refactor(config): separate init and get config func

Co-authored-by: Gaze <dev@gaze.network>

* feat: remove annoying log

Co-authored-by: Gaze <dev@gaze.network>

* feat: mod tidy

Co-authored-by: Gaze <dev@gaze.network>

* refactor: move main to root

Co-authored-by: Gaze <dev@gaze.network>

* feat(cli): create cli commands

Co-authored-by: Gaze <dev@gaze.network>

* refactor: move main logic to command

Co-authored-by: Gaze <dev@gaze.network>

* doc: remove unused desc

Co-authored-by: Gaze <dev@gaze.network>

* refactor: test structure in runestone_test.go

* fix: edict flaws were ignored

* feat: more tests

* refactor(cli): add local flag

Co-authored-by: Gaze <dev@gaze.network>

* feat: set symbol limit to utf8.MaxRune

* refactor(cli): flags for each module

Co-authored-by: Gaze <dev@gaze.network>

* feat(cli): support db selection

Co-authored-by: Gaze <dev@gaze.network>

* fix: remove temp code

Co-authored-by: Gaze <dev@gaze.network>

* fix: get data from cache in processor first, then dg

* feat(cli): add version command

Co-authored-by: Gaze <dev@gaze.network>

* doc(cli): add refactor plan

Co-authored-by: Gaze <dev@gaze.network>

* refactor(cli): rename files

Co-authored-by: Gaze <dev@gaze.network>

* feat: add main.go

Co-authored-by: Gaze <dev@gaze.network>

* feat: more tests

* feat: add overflow err

* feat: finish runestone tests

* refactor(cli): separate protocol config and cli flag

Co-authored-by: Gaze <dev@gaze.network>

* chore(btc): update example config

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add get block header to datasource interface

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): reorg handling

Co-authored-by: Gaze <dev@gaze.network>

* fix: interface

Co-authored-by: Gaze <dev@gaze.network>

* fix: rename postgres config key

* fix: migrated runes indexer integration to new cli

* fix: commit every block

* feat(btc): add revert data query

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add revert data to processor

Co-authored-by: Gaze <dev@gaze.network>

* feat: implement public errors

* fix: use errs in api

* refactor: move api and usecase outside of internal

* feat: add custom opcode check for datapush

* fix: break if input utxo is not P2TR

* fix: zero len destination case

* fix: get the rest of transaction data in GetTransaction

* refactor: create subscription utils tools

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add btc_database from datasource

Co-authored-by: Gaze <dev@gaze.network>

* doc(btc): add note

Co-authored-by: Gaze <dev@gaze.network>

* wip(btc): imple prepare range func

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add pg queries for datasource

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): update queries

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): implement repo for get blocks

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): update dg

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): return nil if errors

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): update fetch async for db datasource

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add get block header from db for reorg handling

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add todo notes

Co-authored-by: Gaze <dev@gaze.network>

* feat: implement get tx by hash

* fix: rename func

* fix: rename func

* fix: rename func

* fix: fix get transaction by hash

* feat: integrate bitcoin client db to main

* fix: reduce chunk size

* fix: stop main if bitcoin indexer failed

* fix: stop main if runes indexer failed

* fix: move stop() inside goroutine

* chore: add log

* fix: duplicate rune entry number

* feat(btc): add witness utils

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): witness datamodel parsing

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): invalid table name

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): remove uniqte index for hash

Co-authored-by: Gaze <dev@gaze.network>

* doc: add todo

Co-authored-by: Gaze <dev@gaze.network>

* feat(logger): remove error verbose

Co-authored-by: Gaze <dev@gaze.network>

* feat: support postgresql db

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add err notfound

Co-authored-by: Gaze <dev@gaze.network>

* fix: invalid pgx version

Co-authored-by: Gaze <dev@gaze.network>

* fix: invalid indexer flow

Co-authored-by: Gaze <dev@gaze.network>

* feat: refactor runes api

* feat: implement http server

* fix: mount runes api

* fix: error handler

* fix: first empty state error

Co-authored-by: Gaze <dev@gaze.network>

* fix: off by one confirmation

* ci: ignore RollBack error

* fix: change WithPublicMessage to be prefix

* feat: bump cstream version

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): nullable pkscript

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): change rollback style

Co-authored-by: Gaze <dev@gaze.network>

* refactor: move runes out of internal

* feat: rename id field to runeId in rune transaction

* feat(btc): update index

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add default current block

Co-authored-by: Gaze <dev@gaze.network>

* doc: add note

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): use int64 to store sequence

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): upgrade data type for numbers

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc):  upgrade data type for idx

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): get indexed block impl

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add common.ZeroHash

Co-authored-by: Gaze <dev@gaze.network>

* feat: add chainparam

* feat: implement get transactions

* fix: wrong condition for non-OP_RETURN output

* feat(btc): add verify indexer states

Co-authored-by: Gaze <dev@gaze.network>

* refactor: sorting code

Co-authored-by: Gaze <dev@gaze.network>

* feat: fix interface

* feat(btc): update chuunk size

Co-authored-by: Gaze <dev@gaze.network>

* feat: add rune_etched column in rune transaction

* fix: missing field in create

* feat: add runeEtched in get transactions

* feat: implement get token info

* feat: add holders count in token info

* feat: implement get holders

* fix: return a new repository when beginning a new tx

* fix: rename type

* feat: add pkscript to outpoint balance

* feat: implement get utxos by address api

* fix: spend outpoint bug

* feat: implement get balances by address batch

* feat: sort balances result by amount

* ci: create Dockerfile

Co-authored-by: Gaze <dev@gaze.network>

* ci: add arg run

Co-authored-by: Gaze <dev@gaze.network>

* perf: add automaxprocs

Co-authored-by: Gaze <dev@gaze.network>

* chore: add performance logging

Co-authored-by: Gaze <dev@gaze.network>

* chore: add performance logger for debyug

Co-authored-by: Gaze <dev@gaze.network>

* fix: empty etched at

* fix: revert data sequentially

* fix: remove unused funcs

* fix: main.go

* feat: add flag --api-only to run cmd

* fix: create index

* fix: don't add zero mint to unallocated

* fix: ignore zero burn amount

* feat(reorg): add reorg detail

Co-authored-by: Gaze <dev@gaze.network>

* fix: wrong index type

* feat: implement reporting client to report runes blocks

* feat: implement report node

* feat(runes): add latest block api

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): use logger warn

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): txout aren't revert if it's have to revert spent

Co-authored-by: Gaze <dev@gaze.network>

* fix: annoying error when unsubscribe fetcher

Co-authored-by: Gaze <dev@gaze.network>

* refactor(btc): readable code

Co-authored-by: Gaze <dev@gaze.network>

* fix(indexer): fix subscription closed before process when success fetch

Co-authored-by: Gaze <dev@gaze.network>

* fix: remove module enum

* fix: increase max reorg limit

* feat: add starting height for runes mainnet

* fix(btc): fix `with` modified same row twice

Co-authored-by: Gaze <dev@gaze.network>

* fix(runes): handling latest block not found

Co-authored-by: Gaze <dev@gaze.network>

* feat: add decimals in get transactions

* fix: wrong condition

* feat: add more index

* feat: implement get transactions by pkscript

* feat: allow query by rune id too

* feat: more comments

* perf(btc): bitcoin indexer performance optimization (#4)

* feat(btc): not null to witness

Co-authored-by: Gaze <dev@gaze.network>

* perf(btc): add batch insert txin

Co-authored-by: Gaze <dev@gaze.network>

* perf(btc): batch insert txout

Co-authored-by: Gaze <dev@gaze.network>

* perf(btc): batch insert transaction

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): remove old queries

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): typo

Co-authored-by: Gaze <dev@gaze.network>

* perf(btc): batch insert blocks (#5)

Co-authored-by: Gaze <gazenw@users.noreply.github.com>

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>

* feat(btc): Duplicate coinbase transaction handling (#7)

* feat(btc): tx_hash can duplicated in block v1

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): duplicate tx  will use same txin/txout from previous tx

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): prevent revert block v1 data

if you really want to revert the data before the block version 2, you should reset the database and reindex the data instead.

Co-authored-by: Gaze <dev@gaze.network>

* doc(btc): update list duplicate tx hash

Co-authored-by: Gaze <dev@gaze.network>

* doc(btc): update docs

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): use last v1 block instead

Co-authored-by: Gaze <dev@gaze.network>

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>

* feat: add ping handler

* fix: type

Co-authored-by: Gaze <dev@gaze.network>

* doc: add refactor note

Co-authored-by: Gaze <dev@gaze.network>

* ci: add golang linter and test runner gh action

* ci: use go-test-action@v0

* ci: annotate test result

* ci: update running flag

* fix: try to fix malformed import path

* feat: add mock test

* ci: remove annotation ci

* ci: add annotate test result

* chore: remove unused

* feat: try testify

* feat: remove test

* ci: add go test on macos, windows and go latest version

* ci: test building

* feat: remove mock code

* ci: add sqlc diff checker action (#10)

* feat: Graceful shutdown (#8)

* feat: add shutdown function for indexer

Co-authored-by: Gaze <dev@gaze.network>

* feat: add force shutdown

Co-authored-by: Gaze <dev@gaze.network>

* revert

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): remove unused

Co-authored-by: Gaze <dev@gaze.network>

* style: go fmt

Co-authored-by: Gaze <dev@gaze.network>

* feat: separate context for worker and application

* feat: increase force shutdown timeout

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): update logging

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): update shutdown function

Co-authored-by: Gaze <dev@gaze.network>

* feat: remove wg for shutdown

Co-authored-by: Gaze <dev@gaze.network>

* feat: refactor shutdown flow

Co-authored-by: Gaze <dev@gaze.network>

* feat: update shutdown flow

Co-authored-by: Gaze <dev@gaze.network>

* feat: update maming

Co-authored-by: Gaze <dev@gaze.network>

* feat: update force shutdown logic

Co-authored-by: Gaze <dev@gaze.network>

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>

* feat: check reporting config name

* fix: use db config in bitcoin module for runes datasource

* Add migrate commands (#2)

* feat: add migrate up

* feat: add down migration

* fix: example

* feat: change description

* fix: hardcode migration source directory

* Update README.md for public release. (#11)

* feat: initial draft for README.md

* fix: remove some sections

* feat: add block reporting to first description

* fix: reduce redundancy

* feat: update README.md

* Update README.md

* feat: update README.md

* fix: update config.yaml in README

* fix: remove redundant words

* fix: change default datasource

* fix: config.yaml comments

* feat: update README.md

* refactor(logger): format logging (#12)

* feat(logger): format main logger

* feat(logger): use duration ms for gcp output

* refactor(logger): bitcoin node logger

* refactor(logger): indexer logger

* refactor(logger): fix cmd logger

* refactor(logger): logger in config pacakge

* refactor(logger): set pgx error log level debug

* refactor(logger): btcclient datasource

* refactor: processor name

* refactor(logger): runese logger

* refactor(logger): update logger

* fix(runes): wrong btc db datasource

* refactor(logger): remove unnecessary debug log

* refactor: update logger in indexer

* fix(logger): deadlock in load()

* fix: remove unused

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>

* feat(btc): remove unused func

* fix: fix golangci-lint error

* fix(pg): update logger level

* doc: update config example

* feat: go mod tidy

* doc: update readme

* fix: panic cause didn't handle error

* doc: update example config

* doc: update example config in readme

* feat(logger): only log error stacktrace when debug mode is on

* feat(reporting): handling invalid config error

* feat(pg): handling invalid config error

* fix: panic in get_token_info

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>
Co-authored-by: Planxnx <thanee@cleverse.com>
Co-authored-by: Thanee Charattrakool <37617738+Planxnx@users.noreply.github.com>
This commit is contained in:
gazenw
2024-04-29 15:16:10 +07:00
committed by GitHub
parent 5f9cdd5af1
commit fcdecd4046
144 changed files with 15758 additions and 4 deletions

28
.github/workflows/sqlc-verify.yml vendored Normal file
View File

@@ -0,0 +1,28 @@
name: Sqlc ORM Framework Verify
on:
workflow_dispatch:
pull_request:
branches:
- develop
- main
paths:
- "sqlc.yaml"
- "**.sql"
- ".github/workflows/sqlc-verify.yml"
jobs:
sqlc-diff:
name: Sqlc Diff Checker
runs-on: "ubuntu-latest" # "self-hosted", "ubuntu-latest", "macos-latest", "windows-latest"
steps:
- uses: actions/checkout@v4
with:
fetch-depth: "0"
- name: Setup Sqlc
uses: sqlc-dev/setup-sqlc@v4
with:
sqlc-version: "1.26.0"
- name: Check Diff
run: sqlc diff

View File

@@ -51,6 +51,8 @@ linters:
- prealloc # performance - Find slice declarations that could potentially be pre-allocated, https://github.com/alexkohler/prealloc
- gosec # bugs - Inspects source code for security problems
- wrapcheck # style, error - Checks that errors returned from external packages are wrapped, we should wrap the error from external library
- depguard # import - Go linter that checks if package imports are in a list of acceptable packages.
- sloglint # style, format Ensure consistent code style when using log/slog.
### Annoying Linters
# - dupl # style - code clone detection
@@ -66,20 +68,36 @@ linters-settings:
misspell:
locale: US
ignore-words: []
errcheck:
exclude-functions:
- (github.com/jackc/pgx/v5.Tx).Rollback
wrapcheck:
ignoreSigs:
- .Errorf(
- errors.New(
- errors.Unwrap(
- errors.Join(
- .Wrap(
- .Wrapf(
- .WithMessage(
- .WithMessagef(
- .WithStack(
- errs.NewPublicError(
- errs.WithPublicMessage(
- withstack.WithStackDepth(
ignoreSigRegexps:
- \.New.*Error\(
ignorePackageGlobs:
- "github.com/gofiber/fiber/*"
goconst:
ignore-tests: true
min-occurrences: 5
depguard:
rules:
main:
# Packages that are not allowed.
deny:
- pkg: "github.com/pkg/errors"
desc: Should be replaced by "cockroachdb/errors" or "cleverse/go-utilities" package
sloglint:
attr-only: true
key-naming-case: snake
args-on-sep-lines: true

3
.vscode/extensions.json vendored Normal file
View File

@@ -0,0 +1,3 @@
{
"recommendations": ["dotenv.dotenv-vscode", "golang.go"]
}

82
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,82 @@
{
"editor.formatOnSave": true,
"files.exclude": {
"**/.git": true,
"**/.svn": true,
"**/.hg": true,
"**/CVS": true,
"**/.DS_Store": true
},
"search.exclude": {
"**/node_modules": true,
"**/build": true,
"**/dist": true
},
"[json]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
// Golang
"[go]": {
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": "explicit"
},
"editor.codeLens": true
},
"go.useLanguageServer": true,
"go.lintTool": "golangci-lint",
"go.lintFlags": ["--fix"],
"go.lintOnSave": "package",
"go.toolsManagement.autoUpdate": true,
"gopls": {
"formatting.gofumpt": true, // https://github.com/mvdan/gofumpt
"ui.codelenses": {
"gc_details": true
},
"build.directoryFilters": ["-**/node_modules"],
"ui.semanticTokens": true,
"ui.completion.usePlaceholders": false,
"ui.diagnostic.analyses": {
// https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md
// "fieldalignment": false,
"nilness": true,
"shadow": false,
"unusedparams": true,
"unusedvariable": true,
"unusedwrite": true, // ineffective assignment
"useany": true
},
"ui.diagnostic.staticcheck": false, // use golangci-lint instead
"ui.diagnostic.annotations": {
// CMD+P and run command `Go: Toggle gc details`
"bounds": true,
"escape": true,
"inline": true,
"nil": true
},
"ui.documentation.hoverKind": "FullDocumentation"
},
"go.editorContextMenuCommands": {
// Right click on code to use this command
"toggleTestFile": false,
"addTags": false,
"removeTags": false,
"fillStruct": true,
"testAtCursor": false,
"testFile": false,
"testPackage": false,
"generateTestForFunction": true,
"generateTestForFile": false,
"generateTestForPackage": false,
"addImport": false,
"testCoverage": false,
"playground": false,
"debugTestAtCursor": false,
"benchmarkAtCursor": false
},
"dotenv.enableAutocloaking": false,
"protoc": {
"options": ["--proto_path=pb"]
}
}

27
Dockerfile Normal file
View File

@@ -0,0 +1,27 @@
FROM golang:1.22 as builder
WORKDIR /app
COPY go.mod go.sum ./
RUN go mod download
COPY ./ ./
ENV GOOS=linux
ENV CGO_ENABLED=0
RUN go build \
-o main ./main.go
FROM alpine:latest
WORKDIR /app
RUN apk --no-cache add ca-certificates tzdata
COPY --from=builder /app/main .
# You can set `TZ` environment variable to change the timezone
CMD ["/app/main", "run"]

177
README.md
View File

@@ -1 +1,176 @@
# Gaze Indexer Network
<!-- omit from toc -->
# Gaze Indexer
Gaze Indexer is an open-source and modular indexing client for Bitcoin meta-protocols. It has support for Bitcoin and Runes out of the box, with **Unified Consistent APIs** across fungible token protocols.
Gaze Indexer is built with **modularity** in mind, allowing users to run all modules in one monolithic instance with a single command, or as a distributed cluster of micro-services.
Gaze Indexer serves as a foundation for building ANY meta-protocol indexers, with efficient data fetching, reorg detection, and database migration tool.
This allows developers to focus on what **truly** matters: Meta-protocol indexing logic. New meta-protocols can be easily added by implementing new modules.
Gaze Indexer also comes with a block reporting system for verifying data integrity of indexers. Visit the [Gaze Network dashboard](https://dash.gaze.network) to see the status of other indexers.
- [Modules](#modules)
- [1. Bitcoin](#1-bitcoin)
- [2. Runes](#2-runes)
- [Installation](#installation)
- [Prerequisites](#prerequisites)
- [1. Hardware Requirements](#1-hardware-requirements)
- [2. Prepare Bitcoin Core RPC server.](#2-prepare-bitcoin-core-rpc-server)
- [3. Prepare database.](#3-prepare-database)
- [4. Prepare `config.yaml` file.](#4-prepare-configyaml-file)
- [Install with Docker (recommended)](#install-with-docker-recommended)
- [Install from source](#install-from-source)
## Modules
### 1. Bitcoin
The Bitcoin Indexer, the heart of every meta-protocol, is responsible for indexing **Bitcoin transactions, blocks, and UTXOs**. It requires a Bitcoin Core RPC as source of Bitcoin transactions,
and stores the indexed data in database to be used by other modules.
### 2. Runes
The Runes Indexer is our first meta-protocol indexer. It indexes Runes states, transactions, runestones, and balances using Bitcoin transactions.
It comes with a set of APIs for querying historical Runes data. See our [API Reference](https://documenter.getpostman.com/view/28396285/2sA3Bn7Cxr) for full details.
## Installation
### Prerequisites
#### 1. Hardware Requirements
Each module requires different hardware requirements.
| Module | CPU | RAM |
| ------- | ---------- | ------ |
| Bitcoin | 0.25 cores | 256 MB |
| Runes | 0.5 cores | 1 GB |
#### 2. Prepare Bitcoin Core RPC server.
Gaze Indexer needs to fetch transaction data from a Bitcoin Core RPC, either self-hosted or using managed providers like QuickNode.
To self host a Bitcoin Core, see https://bitcoin.org/en/full-node.
#### 3. Prepare database.
Gaze Indexer has first-class support for PostgreSQL. If you wish to use other databases, you can implement your own database repository that satisfies each module's Data Gateway interface.
Here is our minimum database disk space requirement for each module.
| Module | Database Storage |
| ------- | ---------------- |
| Bitcoin | 240 GB |
| Runes | 150 GB |
#### 4. Prepare `config.yaml` file.
```yaml
# config.yaml
logger:
output: TEXT # Output format for logs. current supported formats: "TEXT" | "JSON" | "GCP"
debug: false
# Network to run the indexer on. Current supported networks: "mainnet" | "testnet"
network: mainnet
# Bitcoin Core RPC configuration options.
bitcoin_node:
host: "" # [Required] Host of Bitcoin Core RPC (without https://)
user: "" # Username to authenticate with Bitcoin Core RPC
pass: "" # Password to authenticate with Bitcoin Core RPC
disable_tls: false # Set to true to disable tls
# Block reporting configuration options. See Block Reporting section for more details.
reporting:
disabled: false # Set to true to disable block reporting to Gaze Network. Default is false.
base_url: "https://indexer.api.gaze.network" # Defaults to "https://indexer.api.gaze.network" if left empty
name: "" # [Required if not disabled] Name of this indexer to show on the Gaze Network dashboard
website_url: "" # Public website URL to show on the dashboard. Can be left empty.
indexer_api_url: "" # Public url to access this indexer's API. Can be left empty if you want to keep your indexer private.
# HTTP server configuration options.
http_server:
port: 8080 # Port to run the HTTP server on for modules with HTTP API handlers.
# Meta-protocol modules configuration options.
modules:
# Configuration options for Bitcoin module. Can be removed if not used.
bitcoin:
database: "postgres" # Database to store bitcoin data. current supported databases: "postgres"
postgres:
host: "localhost"
port: 5432
user: "postgres"
password: "password"
db_name: "postgres"
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.
# Configuration options for Runes module. Can be removed if not used.
runes:
database: "postgres" # Database to store Runes data. current supported databases: "postgres"
datasource: "database" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node" | "database". If "database" is used, it will use the database config in bitcoin module as datasource.
api_handlers: # API handlers to enable. current supported handlers: "http"
- http
postgres:
host: "localhost"
port: 5432
user: "postgres"
password: "password"
db_name: "postgres"
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.
```
### Install with Docker (recommended)
We will be using `docker-compose` for our installation guide. Make sure the `docker-compose.yaml` file is in the same directory as the `config.yaml` file.
```yaml
# docker-compose.yaml
services:
gaze-indexer:
image: ghcr.io/gaze-network/gaze-indexer:v1.0.0
container_name: gaze-indexer
restart: unless-stopped
ports:
- 8080:8080 # Expose HTTP server port to host
volumes:
- "./config.yaml:/app/config.yaml" # mount config.yaml file to the container as "/app/config.yaml"
command: ["/app/main", "run", "--bitcoin", "--runes"] # Put module flags after "run" commands to select which modules to run.
```
### Install from source
1. Install `go` version 1.22 or higher. See Go installation guide [here](https://go.dev/doc/install).
2. Clone this repository.
```bash
git clone https://github.com/gaze-network/gaze-indexer.git
cd gaze-indexer
```
3. Build the main binary.
```bash
# Get dependencies
go mod download
# Build the main binary
go build -o gaze main.go
```
4. Run database migrations with the `migrate` command and module flags.
```bash
./gaze migrate up --bitcoin --runes --database postgres://postgres:password@localhost:5432/postgres
```
5. Start the indexer with the `run` command and module flags.
```bash
./gaze run --bitcoin --runes
```
If `config.yaml` is not located at `./app/config.yaml`, use the `--config` flag to specify the path to the `config.yaml` file.
```bash
./gaze run --bitcoin --runes --config /path/to/config.yaml
```

59
cmd/cmd.go Normal file
View File

@@ -0,0 +1,59 @@
package cmd
import (
"context"
"log/slog"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/spf13/cobra"
)
var (
// root command
cmd = &cobra.Command{
Use: "gaze",
Long: `Description of gaze indexer`,
}
// sub-commands
cmds = []*cobra.Command{
NewVersionCommand(),
NewRunCommand(),
NewMigrateCommand(),
}
)
// Execute runs the root command
func Execute(ctx context.Context) {
var configFile string
// Add global flags
flags := cmd.PersistentFlags()
flags.StringVar(&configFile, "config", "", "config file, E.g. `./config.yaml`")
flags.String("network", "mainnet", "network to connect to, E.g. `mainnet` or `testnet`")
// Bind flags to configuration
config.BindPFlag("network", flags.Lookup("network"))
// Initialize configuration and logger on start command
cobra.OnInitialize(func() {
// Initialize configuration
config := config.Parse(configFile)
// Initialize logger
if err := logger.Init(config.Logger); err != nil {
logger.PanicContext(ctx, "Something went wrong, can't init logger", slogx.Error(err), slog.Any("config", config.Logger))
}
})
// Register sub-commands
cmd.AddCommand(cmds...)
// Execute command
if err := cmd.ExecuteContext(ctx); err != nil {
// Cobra will print the error message by default
logger.DebugContext(ctx, "Error executing command", slogx.Error(err))
}
}

20
cmd/cmd_migrate.go Normal file
View File

@@ -0,0 +1,20 @@
package cmd
import (
"github.com/gaze-network/indexer-network/cmd/migrate"
_ "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/spf13/cobra"
)
func NewMigrateCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "migrate",
Short: "Migrate database schema",
}
cmd.AddCommand(
migrate.NewMigrateUpCommand(),
migrate.NewMigrateDownCommand(),
)
return cmd
}

370
cmd/cmd_run.go Normal file
View File

@@ -0,0 +1,370 @@
package cmd
import (
"context"
"fmt"
"log/slog"
"net/http"
"os"
"os/signal"
"runtime"
"strings"
"syscall"
"time"
"github.com/btcsuite/btcd/rpcclient"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/datasources"
"github.com/gaze-network/indexer-network/core/indexers"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/internal/postgres"
"github.com/gaze-network/indexer-network/modules/bitcoin"
"github.com/gaze-network/indexer-network/modules/bitcoin/btcclient"
btcdatagateway "github.com/gaze-network/indexer-network/modules/bitcoin/datagateway"
btcpostgres "github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres"
"github.com/gaze-network/indexer-network/modules/runes"
runesapi "github.com/gaze-network/indexer-network/modules/runes/api"
runesdatagateway "github.com/gaze-network/indexer-network/modules/runes/datagateway"
runespostgres "github.com/gaze-network/indexer-network/modules/runes/repository/postgres"
runesusecase "github.com/gaze-network/indexer-network/modules/runes/usecase"
"github.com/gaze-network/indexer-network/pkg/errorhandler"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/compress"
fiberrecover "github.com/gofiber/fiber/v2/middleware/recover"
"github.com/samber/lo"
"github.com/spf13/cobra"
)
const (
shutdownTimeout = 60 * time.Second
)
type runCmdOptions struct {
APIOnly bool
Bitcoin bool
Runes bool
}
func NewRunCommand() *cobra.Command {
opts := &runCmdOptions{}
// Create command
runCmd := &cobra.Command{
Use: "run",
Short: "Start indexer-network service",
RunE: func(cmd *cobra.Command, args []string) error {
return runHandler(opts, cmd, args)
},
}
// TODO: separate flags and bind flags to each module cmd package.
// Add local flags
flags := runCmd.Flags()
flags.BoolVar(&opts.APIOnly, "api-only", false, "Run only API server")
flags.BoolVar(&opts.Bitcoin, "bitcoin", false, "Enable Bitcoin indexer module")
flags.String("bitcoin-db", "postgres", `Database to store bitcoin data. current supported databases: "postgres"`)
flags.BoolVar(&opts.Runes, "runes", false, "Enable Runes indexer module")
flags.String("runes-db", "postgres", `Database to store runes data. current supported databases: "postgres"`)
flags.String("runes-datasource", "bitcoin-node", `Datasource to fetch bitcoin data for processing Meta-Protocol data. current supported datasources: "bitcoin-node" | "database"`)
// Bind flags to configuration
config.BindPFlag("modules.bitcoin.database", flags.Lookup("bitcoin-db"))
config.BindPFlag("modules.runes.database", flags.Lookup("runes-db"))
config.BindPFlag("modules.runes.datasource", flags.Lookup("runes-datasource"))
return runCmd
}
type HttpHandler interface {
Mount(router fiber.Router) error
}
func runHandler(opts *runCmdOptions, cmd *cobra.Command, _ []string) error {
conf := config.Load()
// Validate inputs
{
if !conf.Network.IsSupported() {
return errors.Wrapf(errs.Unsupported, "%q network is not supported", conf.Network.String())
}
}
// Initialize application process context
ctx, stop := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
defer stop()
// Initialize worker context to separate worker's lifecycle from main process
ctxWorker, stopWorker := context.WithCancel(context.Background())
defer stopWorker()
// Add logger context
ctxWorker = logger.WithContext(ctxWorker, slogx.Stringer("network", conf.Network))
// Initialize Bitcoin Core RPC Client
client, err := rpcclient.New(&rpcclient.ConnConfig{
Host: conf.BitcoinNode.Host,
User: conf.BitcoinNode.User,
Pass: conf.BitcoinNode.Pass,
DisableTLS: conf.BitcoinNode.DisableTLS,
HTTPPostMode: true,
}, nil)
if err != nil {
logger.PanicContext(ctx, "Invalid Bitcoin node configuration", slogx.Error(err))
}
defer client.Shutdown()
// Check Bitcoin RPC connection
{
start := time.Now()
logger.InfoContext(ctx, "Connecting to Bitcoin Core RPC Server...", slogx.String("host", conf.BitcoinNode.Host))
if err := client.Ping(); err != nil {
logger.PanicContext(ctx, "Can't connect to Bitcoin Core RPC Server", slogx.String("host", conf.BitcoinNode.Host), slogx.Error(err))
}
logger.InfoContext(ctx, "Connected to Bitcoin Core RPC Server", slog.Duration("latency", time.Since(start)))
}
// TODO: create module command package.
// each module should have its own command package and main package will routing the command to the module command package.
// TODO: refactor module name to specific type instead of string?
httpHandlers := make(map[string]HttpHandler, 0)
var reportingClient *reportingclient.ReportingClient
if !conf.Reporting.Disabled {
reportingClient, err = reportingclient.New(conf.Reporting)
if err != nil {
if errors.Is(err, errs.InvalidArgument) {
logger.PanicContext(ctx, "Invalid reporting configuration", slogx.Error(err))
}
logger.PanicContext(ctx, "Something went wrong, can't create reporting client", slogx.Error(err))
}
}
// Initialize Bitcoin Indexer
if opts.Bitcoin {
ctx := logger.WithContext(ctx, slogx.String("module", "bitcoin"))
var (
btcDB btcdatagateway.BitcoinDataGateway
indexerInfoDB btcdatagateway.IndexerInformationDataGateway
)
switch strings.ToLower(conf.Modules.Bitcoin.Database) {
case "postgresql", "postgres", "pg":
pg, err := postgres.NewPool(ctx, conf.Modules.Bitcoin.Postgres)
if err != nil {
if errors.Is(err, errs.InvalidArgument) {
logger.PanicContext(ctx, "Invalid Postgres configuration for indexer", slogx.Error(err))
}
logger.PanicContext(ctx, "Something went wrong, can't create Postgres connection pool", slogx.Error(err))
}
defer pg.Close()
repo := btcpostgres.NewRepository(pg)
btcDB = repo
indexerInfoDB = repo
default:
return errors.Wrapf(errs.Unsupported, "%q database for indexer is not supported", conf.Modules.Bitcoin.Database)
}
if !opts.APIOnly {
processor := bitcoin.NewProcessor(conf, btcDB, indexerInfoDB)
datasource := datasources.NewBitcoinNode(client)
indexer := indexers.NewBitcoinIndexer(processor, datasource)
defer func() {
if err := indexer.ShutdownWithTimeout(shutdownTimeout); err != nil {
logger.ErrorContext(ctx, "Error during shutdown indexer", slogx.Error(err))
return
}
logger.InfoContext(ctx, "Indexer stopped gracefully")
}()
// Verify states before running Indexer
if err := processor.VerifyStates(ctx); err != nil {
return errors.WithStack(err)
}
// Run Indexer
go func() {
// stop main process if indexer stopped
defer stop()
logger.InfoContext(ctx, "Starting Gaze Indexer")
if err := indexer.Run(ctxWorker); err != nil {
logger.PanicContext(ctx, "Something went wrong, error during running indexer", slogx.Error(err))
}
}()
}
}
// Initialize Runes Indexer
if opts.Runes {
ctx := logger.WithContext(ctx, slogx.String("module", "runes"))
var (
runesDg runesdatagateway.RunesDataGateway
indexerInfoDg runesdatagateway.IndexerInfoDataGateway
)
switch strings.ToLower(conf.Modules.Runes.Database) {
case "postgresql", "postgres", "pg":
pg, err := postgres.NewPool(ctx, conf.Modules.Runes.Postgres)
if err != nil {
if errors.Is(err, errs.InvalidArgument) {
logger.PanicContext(ctx, "Invalid Postgres configuration for indexer", slogx.Error(err))
}
logger.PanicContext(ctx, "Something went wrong, can't create Postgres connection pool", slogx.Error(err))
}
defer pg.Close()
runesRepo := runespostgres.NewRepository(pg)
runesDg = runesRepo
indexerInfoDg = runesRepo
default:
return errors.Wrapf(errs.Unsupported, "%q database for indexer is not supported", conf.Modules.Runes.Database)
}
var bitcoinDatasource indexers.BitcoinDatasource
var bitcoinClient btcclient.Contract
switch strings.ToLower(conf.Modules.Runes.Datasource) {
case "bitcoin-node":
bitcoinNodeDatasource := datasources.NewBitcoinNode(client)
bitcoinDatasource = bitcoinNodeDatasource
bitcoinClient = bitcoinNodeDatasource
case "database":
pg, err := postgres.NewPool(ctx, conf.Modules.Bitcoin.Postgres)
if err != nil {
if errors.Is(err, errs.InvalidArgument) {
logger.PanicContext(ctx, "Invalid Postgres configuration for datasource", slogx.Error(err))
}
logger.PanicContext(ctx, "Something went wrong, can't create Postgres connection pool", slogx.Error(err))
}
defer pg.Close()
btcRepo := btcpostgres.NewRepository(pg)
btcClientDB := btcclient.NewClientDatabase(btcRepo)
bitcoinDatasource = btcClientDB
bitcoinClient = btcClientDB
default:
return errors.Wrapf(errs.Unsupported, "%q datasource is not supported", conf.Modules.Runes.Datasource)
}
if !opts.APIOnly {
processor := runes.NewProcessor(runesDg, indexerInfoDg, bitcoinClient, bitcoinDatasource, conf.Network, reportingClient)
indexer := indexers.NewBitcoinIndexer(processor, bitcoinDatasource)
defer func() {
if err := indexer.ShutdownWithTimeout(shutdownTimeout); err != nil {
logger.ErrorContext(ctx, "Error during shutdown indexer", slogx.Error(err))
return
}
logger.InfoContext(ctx, "Indexer stopped gracefully")
}()
if err := processor.VerifyStates(ctx); err != nil {
return errors.WithStack(err)
}
// Run Indexer
go func() {
// stop main process if indexer stopped
defer stop()
logger.InfoContext(ctx, "Starting Gaze Indexer")
if err := indexer.Run(ctxWorker); err != nil {
logger.PanicContext(ctx, "Something went wrong, error during running indexer", slogx.Error(err))
}
}()
}
// Mount API
apiHandlers := lo.Uniq(conf.Modules.Runes.APIHandlers)
for _, handler := range apiHandlers {
switch handler { // TODO: support more handlers (e.g. gRPC)
case "http":
runesUsecase := runesusecase.New(runesDg, bitcoinClient)
runesHTTPHandler := runesapi.NewHTTPHandler(conf.Network, runesUsecase)
httpHandlers["runes"] = runesHTTPHandler
default:
logger.PanicContext(ctx, "Something went wrong, unsupported API handler", slogx.String("handler", handler))
}
}
}
// Wait for interrupt signal to gracefully stop the server with
// Setup HTTP server if there are any HTTP handlers
if len(httpHandlers) > 0 {
app := fiber.New(fiber.Config{
AppName: "Gaze Indexer",
ErrorHandler: errorhandler.NewHTTPErrorHandler(),
})
app.
Use(fiberrecover.New(fiberrecover.Config{
EnableStackTrace: true,
StackTraceHandler: func(c *fiber.Ctx, e interface{}) {
buf := make([]byte, 1024) // bufLen = 1024
buf = buf[:runtime.Stack(buf, false)]
logger.ErrorContext(c.UserContext(), "Something went wrong, panic in http handler", slogx.Any("panic", e), slog.String("stacktrace", string(buf)))
},
})).
Use(compress.New(compress.Config{
Level: compress.LevelDefault,
}))
defer func() {
if err := app.ShutdownWithTimeout(shutdownTimeout); err != nil {
logger.ErrorContext(ctx, "Error during shutdown HTTP server", slogx.Error(err))
return
}
logger.InfoContext(ctx, "HTTP server stopped gracefully")
}()
// Health check
app.Get("/", func(c *fiber.Ctx) error {
return errors.WithStack(c.SendStatus(http.StatusOK))
})
// mount http handlers from each http-enabled module
for module, handler := range httpHandlers {
if err := handler.Mount(app); err != nil {
logger.PanicContext(ctx, "Something went wrong, can't mount HTTP handler", slogx.Error(err), slogx.String("module", module))
}
logger.InfoContext(ctx, "Mounted HTTP handler", slogx.String("module", module))
}
go func() {
// stop main process if API stopped
defer stop()
logger.InfoContext(ctx, "Started HTTP server", slog.Int("port", conf.HTTPServer.Port))
if err := app.Listen(fmt.Sprintf(":%d", conf.HTTPServer.Port)); err != nil {
logger.PanicContext(ctx, "Something went wrong, error during running HTTP server", slogx.Error(err))
}
}()
}
// Stop application if worker context is done
go func() {
<-ctxWorker.Done()
defer stop()
logger.InfoContext(ctx, "Gaze Indexer Worker is stopped. Stopping application...")
}()
logger.InfoContext(ctxWorker, "Gaze Indexer started")
// Wait for interrupt signal to gracefully stop the server
<-ctx.Done()
// Force shutdown if timeout exceeded or got signal again
go func() {
defer os.Exit(1)
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
defer stop()
select {
case <-ctx.Done():
logger.FatalContext(ctx, "Received exit signal again. Force shutdown...")
case <-time.After(shutdownTimeout + 15*time.Second):
logger.FatalContext(ctx, "Shutdown timeout exceeded. Force shutdown...")
}
}()
return nil
}

49
cmd/cmd_version.go Normal file
View File

@@ -0,0 +1,49 @@
package cmd
import (
"fmt"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/constants"
"github.com/gaze-network/indexer-network/modules/bitcoin"
"github.com/gaze-network/indexer-network/modules/runes"
"github.com/spf13/cobra"
)
var versions = map[string]string{
"": constants.Version,
"bitcoin": bitcoin.Version,
"runes": runes.Version,
}
type versionCmdOptions struct {
Modules string
}
func NewVersionCommand() *cobra.Command {
opts := &versionCmdOptions{}
cmd := &cobra.Command{
Use: "version",
Short: "Show indexer-network version",
RunE: func(cmd *cobra.Command, args []string) error {
return versionHandler(opts, cmd, args)
},
}
flags := cmd.Flags()
flags.StringVar(&opts.Modules, "module", "", `Show version of a specific module. E.g. "bitcoin" | "runes"`)
return cmd
}
func versionHandler(opts *versionCmdOptions, _ *cobra.Command, _ []string) error {
version, ok := versions[opts.Modules]
if !ok {
// fmt.Fprintln(cmd.ErrOrStderr(), "Unknown module")
return errors.Wrap(errs.Unsupported, "Invalid module name")
}
fmt.Println(version)
return nil
}

132
cmd/migrate/cmd_down.go Normal file
View File

@@ -0,0 +1,132 @@
package migrate
import (
"fmt"
"net/url"
"strconv"
"strings"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/samber/lo"
"github.com/spf13/cobra"
)
type migrateDownCmdOptions struct {
DatabaseURL string
Bitcoin bool
Runes bool
All bool
}
type migrateDownCmdArgs struct {
N int
}
func (a *migrateDownCmdArgs) ParseArgs(args []string) error {
if len(args) > 0 {
// assume args already validated by cobra to be len(args) <= 1
n, err := strconv.Atoi(args[0])
if err != nil {
return errors.Wrap(err, "failed to parse N")
}
if n < 0 {
return errors.New("N must be a positive integer")
}
a.N = n
}
return nil
}
func NewMigrateDownCommand() *cobra.Command {
opts := &migrateDownCmdOptions{}
cmd := &cobra.Command{
Use: "down [N]",
Short: "Apply all or N down migrations",
Args: cobra.MaximumNArgs(1),
Example: `gaze migrate down --database "postgres://postgres:postgres@localhost:5432/gaze-indexer?sslmode=disable"`,
RunE: func(cmd *cobra.Command, args []string) error {
// args already validated by cobra
var downArgs migrateDownCmdArgs
if err := downArgs.ParseArgs(args); err != nil {
return errors.Wrap(err, "failed to parse args")
}
return migrateDownHandler(opts, cmd, downArgs)
},
}
flags := cmd.Flags()
flags.BoolVar(&opts.Bitcoin, "bitcoin", false, "Apply Bitcoin down migrations")
flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes down migrations")
flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on")
flags.BoolVar(&opts.All, "all", false, "Confirm apply ALL down migrations without prompt")
return cmd
}
func migrateDownHandler(opts *migrateDownCmdOptions, _ *cobra.Command, args migrateDownCmdArgs) error {
if opts.DatabaseURL == "" {
return errors.New("--database is required")
}
databaseURL, err := url.Parse(opts.DatabaseURL)
if err != nil {
return errors.Wrap(err, "failed to parse database URL")
}
if _, ok := supportedDrivers[databaseURL.Scheme]; !ok {
return errors.Errorf("unsupported database driver: %s", databaseURL.Scheme)
}
// prevent accidental down all migrations
if args.N == 0 && !opts.All {
input := ""
fmt.Print("Are you sure you want to apply all down migrations? (y/N):")
fmt.Scanln(&input)
if !lo.Contains([]string{"y", "yes"}, strings.ToLower(input)) {
return nil
}
}
applyDownMigrations := func(module string, sourcePath string, migrationTable string) error {
newDatabaseURL := cloneURLWithQuery(databaseURL, url.Values{"x-migrations-table": {migrationTable}})
sourceURL := "file://" + sourcePath
m, err := migrate.New(sourceURL, newDatabaseURL.String())
if err != nil {
if strings.Contains(err.Error(), "no such file or directory") {
return errors.Wrap(errs.InternalError, "migrations directory not found")
}
return errors.Wrap(err, "failed to open database")
}
m.Log = &consoleLogger{
prefix: fmt.Sprintf("[%s] ", module),
}
if args.N == 0 {
m.Log.Printf("Applying down migrations...\n")
err = m.Down()
} else {
m.Log.Printf("Applying %d down migrations...\n", args.N)
err = m.Steps(-args.N)
}
if err != nil {
if !errors.Is(err, migrate.ErrNoChange) {
return errors.Wrapf(err, "failed to apply %s down migrations", module)
}
m.Log.Printf("No more down migrations to apply\n")
}
return nil
}
if opts.Bitcoin {
if err := applyDownMigrations("Bitcoin", bitcoinMigrationSource, "bitcoin_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
if opts.Runes {
if err := applyDownMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
return nil
}

117
cmd/migrate/cmd_up.go Normal file
View File

@@ -0,0 +1,117 @@
package migrate
import (
"fmt"
"net/url"
"strconv"
"strings"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/spf13/cobra"
)
type migrateUpCmdOptions struct {
DatabaseURL string
Bitcoin bool
Runes bool
}
type migrateUpCmdArgs struct {
N int
}
func (a *migrateUpCmdArgs) ParseArgs(args []string) error {
if len(args) > 0 {
// assume args already validated by cobra to be len(args) <= 1
n, err := strconv.Atoi(args[0])
if err != nil {
return errors.Wrap(err, "failed to parse N")
}
a.N = n
}
return nil
}
func NewMigrateUpCommand() *cobra.Command {
opts := &migrateUpCmdOptions{}
cmd := &cobra.Command{
Use: "up [N]",
Short: "Apply all or N up migrations",
Args: cobra.MaximumNArgs(1),
Example: `gaze migrate up --database "postgres://postgres:postgres@localhost:5432/gaze-indexer?sslmode=disable"`,
RunE: func(cmd *cobra.Command, args []string) error {
// args already validated by cobra
var upArgs migrateUpCmdArgs
if err := upArgs.ParseArgs(args); err != nil {
return errors.Wrap(err, "failed to parse args")
}
return migrateUpHandler(opts, cmd, upArgs)
},
}
flags := cmd.Flags()
flags.BoolVar(&opts.Bitcoin, "bitcoin", false, "Apply Bitcoin up migrations")
flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes up migrations")
flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on")
return cmd
}
func migrateUpHandler(opts *migrateUpCmdOptions, _ *cobra.Command, args migrateUpCmdArgs) error {
if opts.DatabaseURL == "" {
return errors.New("--database is required")
}
databaseURL, err := url.Parse(opts.DatabaseURL)
if err != nil {
return errors.Wrap(err, "failed to parse database URL")
}
if _, ok := supportedDrivers[databaseURL.Scheme]; !ok {
return errors.Errorf("unsupported database driver: %s", databaseURL.Scheme)
}
applyUpMigrations := func(module string, sourcePath string, migrationTable string) error {
newDatabaseURL := cloneURLWithQuery(databaseURL, url.Values{"x-migrations-table": {migrationTable}})
sourceURL := "file://" + sourcePath
m, err := migrate.New(sourceURL, newDatabaseURL.String())
if err != nil {
if strings.Contains(err.Error(), "no such file or directory") {
return errors.Wrap(errs.InternalError, "migrations directory not found")
}
return errors.Wrap(err, "failed to open database")
}
m.Log = &consoleLogger{
prefix: fmt.Sprintf("[%s] ", module),
}
if args.N == 0 {
m.Log.Printf("Applying up migrations...\n")
err = m.Up()
} else {
m.Log.Printf("Applying %d up migrations...\n", args.N)
err = m.Steps(args.N)
}
if err != nil {
if !errors.Is(err, migrate.ErrNoChange) {
return errors.Wrapf(err, "failed to apply %s up migrations", module)
}
m.Log.Printf("Migrations already up-to-date\n")
}
return nil
}
if opts.Bitcoin {
if err := applyUpMigrations("Bitcoin", bitcoinMigrationSource, "bitcoin_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
if opts.Runes {
if err := applyUpMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
return nil
}

22
cmd/migrate/logger.go Normal file
View File

@@ -0,0 +1,22 @@
package migrate
import (
"fmt"
"github.com/golang-migrate/migrate/v4"
)
var _ migrate.Logger = (*consoleLogger)(nil)
type consoleLogger struct {
prefix string
verbose bool
}
func (l *consoleLogger) Printf(format string, v ...interface{}) {
fmt.Printf(l.prefix+format, v...)
}
func (l *consoleLogger) Verbose() bool {
return l.verbose
}

25
cmd/migrate/migrate.go Normal file
View File

@@ -0,0 +1,25 @@
package migrate
import "net/url"
const (
bitcoinMigrationSource = "modules/bitcoin/database/postgresql/migrations"
runesMigrationSource = "modules/runes/database/postgresql/migrations"
)
func cloneURLWithQuery(u *url.URL, newQuery url.Values) *url.URL {
clone := *u
query := clone.Query()
for key, values := range newQuery {
for _, value := range values {
query.Add(key, value)
}
}
clone.RawQuery = query.Encode()
return &clone
}
var supportedDrivers = map[string]struct{}{
"postgres": {},
"postgresql": {},
}

View File

4
common/bitcoin.go Normal file
View File

@@ -0,0 +1,4 @@
package common
// HalvingInterval is the number of blocks between each halving event.
const HalvingInterval = 210_000

99
common/errs/errs.go Normal file
View File

@@ -0,0 +1,99 @@
package errs
import (
"github.com/cockroachdb/errors"
)
// set depth to 10 to skip runtime stacks and current file.
const depth = 10
// Common Application Errors
var (
// NotFound is returned when a resource is not found
NotFound = errors.NewWithDepth(depth, "not found")
// InternalError is returned when internal logic got error
InternalError = errors.NewWithDepth(depth, "internal error")
// SomethingWentWrong is returned when got some bug or unexpected case
//
// inherited error from InternalError,
// so errors.Is(err, InternalError) == true
SomethingWentWrong = errors.WrapWithDepth(depth, InternalError, "something went wrong")
// Skippable is returned when got an error but it can be skipped or ignored and continue
Skippable = errors.NewWithDepth(depth, "skippable")
// Unsupported is returned when a feature or result is not supported
Unsupported = errors.NewWithDepth(depth, "unsupported")
// NotSupported is returned when a feature or result is not supported
// alias of Unsupported
NotSupported = Unsupported
// Unauthorized is returned when a request is unauthorized
Unauthorized = errors.NewWithDepth(depth, "unauthorized")
// Timeout is returned when a connection to a resource timed out
Timeout = errors.NewWithDepth(depth, "timeout")
// BadRequest is returned when a request is invalid
BadRequest = errors.NewWithDepth(depth, "bad request")
// InvalidArgument is returned when an argument is invalid
//
// inherited error from BadRequest,
// so errors.Is(err, BadRequest) == true
InvalidArgument = errors.WrapWithDepth(depth, BadRequest, "invalid argument")
// ArgumentRequired is returned when an argument is required
//
// inherited error from BadRequest,
// so errors.Is(err, BadRequest) == true
ArgumentRequired = errors.WrapWithDepth(depth, BadRequest, "argument required")
// Duplicate is returned when a resource already exists
Duplicate = errors.NewWithDepth(depth, "duplicate")
// Unimplemented is returned when a feature or method is not implemented
//
// inherited error from Unsupported,
// so errors.Is(err, Unsupported) == true
Unimplemented = errors.WrapWithDepth(depth, Unsupported, "unimplemented")
)
// Business Logic errors
var (
// Overflow is returned when an overflow error occurs
//
// inherited error from InternalError,
// so errors.Is(err, InternalError) == true
Overflow = errors.WrapWithDepth(depth, InternalError, "overflow")
// OverflowUint64 is returned when an uint64 overflow error occurs
//
// inherited error from Overflow,
// so errors.Is(err, Overflow) == true
OverflowUint32 = errors.WrapWithDepth(depth, Overflow, "overflow uint32")
// OverflowUint64 is returned when an uint64 overflow error occurs
//
// inherited error from Overflow,
// so errors.Is(err, Overflow) == true
OverflowUint64 = errors.WrapWithDepth(depth, Overflow, "overflow uint64")
// OverflowUint128 is returned when an uint128 overflow error occurs
//
// inherited error from Overflow,
// so errors.Is(err, Overflow) == true
OverflowUint128 = errors.WrapWithDepth(depth, Overflow, "overflow uint128")
// InvalidState is returned when a state is invalid
InvalidState = errors.NewWithDepth(depth, "invalid state")
// ConflictSetting is returned when an indexer setting is conflicted
ConflictSetting = errors.NewWithDepth(depth, "conflict setting")
// Closed is returned when a resource is closed
Closed = errors.NewWithDepth(depth, "closed")
)

View File

@@ -0,0 +1,43 @@
package errs
import (
"fmt"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/errors/withstack"
)
// PublicError is an error that, when caught by error handler, should return a user-friendly error response to the user. Responses vary between each protocol (http, grpc, etc.).
type PublicError struct {
err error
message string
}
func (p PublicError) Error() string {
return p.err.Error()
}
func (p PublicError) Message() string {
return p.message
}
func (p PublicError) Unwrap() error {
return p.err
}
func NewPublicError(message string) error {
return withstack.WithStackDepth(&PublicError{err: errors.New(message), message: message}, 1)
}
func WithPublicMessage(err error, prefix string) error {
if err == nil {
return nil
}
var message string
if prefix != "" {
message = fmt.Sprintf("%s: %s", prefix, err.Error())
} else {
message = err.Error()
}
return withstack.WithStackDepth(&PublicError{err: err, message: message}, 1)
}

12
common/hash.go Normal file
View File

@@ -0,0 +1,12 @@
package common
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
)
// Zero value of chainhash.Hash
var (
ZeroHash = *utils.Must(chainhash.NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000"))
NullHash = ZeroHash
)

33
common/network.go Normal file
View File

@@ -0,0 +1,33 @@
package common
import "github.com/btcsuite/btcd/chaincfg"
type Network string
const (
NetworkMainnet Network = "mainnet"
NetworkTestnet Network = "testnet"
)
var supportedNetworks = map[Network]struct{}{
NetworkMainnet: {},
NetworkTestnet: {},
}
var chainParams = map[Network]*chaincfg.Params{
NetworkMainnet: &chaincfg.MainNetParams,
NetworkTestnet: &chaincfg.TestNet3Params,
}
func (n Network) IsSupported() bool {
_, ok := supportedNetworks[n]
return ok
}
func (n Network) ChainParams() *chaincfg.Params {
return chainParams[n]
}
func (n Network) String() string {
return string(n)
}

52
config.example.yaml Normal file
View File

@@ -0,0 +1,52 @@
logger:
output: TEXT # Output format for logs. current supported formats: "TEXT" | "JSON" | "GCP"
debug: false
# Network to run the indexer on. Current supported networks: "mainnet" | "testnet"
network: mainnet
# Bitcoin Core RPC configuration options.
bitcoin_node:
host: "" # [Required] Host of Bitcoin Core RPC (without https://)
user: "" # Username to authenticate with Bitcoin Core RPC
pass: "" # Password to authenticate with Bitcoin Core RPC
disable_tls: false # Set to true to disable tls
# Block reporting configuration options. See Block Reporting section for more details.
reporting:
disabled: false # Set to true to disable block reporting to Gaze Network. Default is false.
base_url: "https://indexer.api.gaze.network" # Defaults to "https://indexer.api.gaze.network" if left empty
name: "" # [Required if not disabled] Name of this indexer to show on the Gaze Network dashboard
website_url: "" # Public website URL to show on the dashboard. Can be left empty.
indexer_api_url: "" # Public url to access this indexer's API. Can be left empty if you want to keep your indexer private.
# HTTP server configuration options.
http_server:
port: 8080 # Port to run the HTTP server on for modules with HTTP API handlers.
# Meta-protocol modules configuration options.
modules:
# Configuration options for Bitcoin module. Can be removed if not used.
bitcoin:
database: "postgres" # Database to store bitcoin data. current supported databases: "postgres"
postgres:
host: "localhost"
port: 5432
user: "postgres"
password: "password"
db_name: "postgres"
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.
# Configuration options for Runes module. Can be removed if not used.
runes:
database: "postgres" # Database to store Runes data. current supported databases: "postgres"
datasource: "database" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node" | "database". If "database" is used, it will use the database config in bitcoin module as datasource.
api_handlers: # API handlers to enable. current supported handlers: "http"
- http
postgres:
host: "localhost"
port: 5432
user: "postgres"
password: "password"
db_name: "postgres"
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.

View File

@@ -0,0 +1,5 @@
package constants
const (
Version = "v0.0.1"
)

View File

@@ -0,0 +1,294 @@
package datasources
import (
"bytes"
"context"
"encoding/hex"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/rpcclient"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/internal/subscription"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
cstream "github.com/planxnx/concurrent-stream"
"github.com/samber/lo"
)
const (
blockStreamChunkSize = 5
)
// Make sure to implement the BitcoinDatasource interface
var _ Datasource[[]*types.Block] = (*BitcoinNodeDatasource)(nil)
// BitcoinNodeDatasource fetch data from Bitcoin node for Bitcoin Indexer
type BitcoinNodeDatasource struct {
btcclient *rpcclient.Client
}
// NewBitcoinNode create new BitcoinNodeDatasource with Bitcoin Core RPC Client
func NewBitcoinNode(btcclient *rpcclient.Client) *BitcoinNodeDatasource {
return &BitcoinNodeDatasource{
btcclient: btcclient,
}
}
func (p BitcoinNodeDatasource) Name() string {
return "bitcoin_node"
}
// Fetch polling blocks from Bitcoin node
//
// - from: block height to start fetching, if -1, it will start from genesis block
// - to: block height to stop fetching, if -1, it will fetch until the latest block
func (d *BitcoinNodeDatasource) Fetch(ctx context.Context, from, to int64) ([]*types.Block, error) {
ch := make(chan []*types.Block)
subscription, err := d.FetchAsync(ctx, from, to, ch)
if err != nil {
return nil, errors.WithStack(err)
}
defer subscription.Unsubscribe()
blocks := make([]*types.Block, 0)
for {
select {
case b, ok := <-ch:
if !ok {
return blocks, nil
}
blocks = append(blocks, b...)
case <-subscription.Done():
if err := ctx.Err(); err != nil {
return nil, errors.Wrap(err, "context done")
}
return blocks, nil
case err := <-subscription.Err():
if err != nil {
return nil, errors.Wrap(err, "got error while fetch async")
}
return blocks, nil
case <-ctx.Done():
return nil, errors.Wrap(ctx.Err(), "context done")
}
}
}
// FetchAsync polling blocks from Bitcoin node asynchronously (non-blocking)
//
// - from: block height to start fetching, if -1, it will start from genesis block
// - to: block height to stop fetching, if -1, it will fetch until the latest block
func (d *BitcoinNodeDatasource) FetchAsync(ctx context.Context, from, to int64, ch chan<- []*types.Block) (*subscription.ClientSubscription[[]*types.Block], error) {
ctx = logger.WithContext(ctx,
slogx.String("package", "datasources"),
slogx.String("datasource", d.Name()),
)
from, to, skip, err := d.prepareRange(from, to)
if err != nil {
return nil, errors.Wrap(err, "failed to prepare fetch range")
}
subscription := subscription.NewSubscription(ch)
if skip {
if err := subscription.UnsubscribeWithContext(ctx); err != nil {
return nil, errors.Wrap(err, "failed to unsubscribe")
}
return subscription.Client(), nil
}
// Create parallel stream
out := make(chan []*types.Block)
stream := cstream.NewStream(ctx, 8, out)
// create slice of block height to fetch
blockHeights := make([]int64, 0, to-from+1)
for i := from; i <= to; i++ {
blockHeights = append(blockHeights, i)
}
// Wait for stream to finish and close out channel
go func() {
defer close(out)
_ = stream.Wait()
}()
// Fan-out blocks to subscription channel
go func() {
defer func() {
// add a bit delay to prevent shutdown before client receive all blocks
time.Sleep(100 * time.Millisecond)
subscription.Unsubscribe()
}()
for {
select {
case data, ok := <-out:
// stream closed
if !ok {
return
}
// empty blocks
if len(data) == 0 {
continue
}
// send blocks to subscription channel
if err := subscription.Send(ctx, data); err != nil {
if errors.Is(err, errs.Closed) {
return
}
logger.WarnContext(ctx, "Failed to send bitcoin blocks to subscription client",
slogx.Int64("start", data[0].Header.Height),
slogx.Int64("end", data[len(data)-1].Header.Height),
slogx.Error(err),
)
}
case <-ctx.Done():
return
}
}
}()
// Parallel fetch blocks from Bitcoin node until complete all block heights
// or subscription is done.
go func() {
defer stream.Close()
done := subscription.Done()
chunks := lo.Chunk(blockHeights, blockStreamChunkSize)
for _, chunk := range chunks {
// TODO: Implement throttling logic to control the rate of fetching blocks (block/sec)
chunk := chunk
select {
case <-done:
return
case <-ctx.Done():
return
default:
stream.Go(func() []*types.Block {
startAt := time.Now()
defer func() {
logger.DebugContext(ctx, "Fetched chunk of blocks from Bitcoin node",
slogx.Int("total_blocks", len(chunk)),
slogx.Int64("from", chunk[0]),
slogx.Int64("to", chunk[len(chunk)-1]),
slogx.Duration("duration", time.Since(startAt)),
)
}()
// TODO: should concurrent fetch block or not ?
blocks := make([]*types.Block, 0, len(chunk))
for _, height := range chunk {
hash, err := d.btcclient.GetBlockHash(height)
if err != nil {
logger.ErrorContext(ctx, "Can't get block hash from Bitcoin node rpc", slogx.Error(err), slogx.Int64("height", height))
if err := subscription.SendError(ctx, errors.Wrapf(err, "failed to get block hash: height: %d", height)); err != nil {
logger.WarnContext(ctx, "Failed to send datasource error to subscription client", slogx.Error(err))
}
return nil
}
block, err := d.btcclient.GetBlock(hash)
if err != nil {
logger.ErrorContext(ctx, "Can't get block data from Bitcoin node rpc", slogx.Error(err), slogx.Int64("height", height))
if err := subscription.SendError(ctx, errors.Wrapf(err, "failed to get block: height: %d, hash: %s", height, hash)); err != nil {
logger.WarnContext(ctx, "Failed to send datasource error to subscription client", slogx.Error(err))
}
return nil
}
blocks = append(blocks, types.ParseMsgBlock(block, height))
}
return blocks
})
}
}
}()
return subscription.Client(), nil
}
func (d *BitcoinNodeDatasource) prepareRange(fromHeight, toHeight int64) (start, end int64, skip bool, err error) {
start = fromHeight
end = toHeight
// get current bitcoin block height
latestBlockHeight, err := d.btcclient.GetBlockCount()
if err != nil {
return -1, -1, false, errors.Wrap(err, "failed to get block count")
}
// set start to genesis block height
if start < 0 {
start = 0
}
// set end to current bitcoin block height if
// - end is -1
// - end is greater that current bitcoin block height
if end < 0 || end > latestBlockHeight {
end = latestBlockHeight
}
// if start is greater than end, skip this round
if start > end {
return -1, -1, true, nil
}
return start, end, false, nil
}
// GetTransaction fetch transaction from Bitcoin node
func (d *BitcoinNodeDatasource) GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error) {
rawTxVerbose, err := d.btcclient.GetRawTransactionVerbose(&txHash)
if err != nil {
return nil, errors.Wrap(err, "failed to get raw transaction")
}
blockHash, err := chainhash.NewHashFromStr(rawTxVerbose.BlockHash)
if err != nil {
return nil, errors.Wrap(err, "failed to parse block hash")
}
block, err := d.btcclient.GetBlockVerboseTx(blockHash)
if err != nil {
return nil, errors.Wrap(err, "failed to get block header")
}
// parse tx
txBytes, err := hex.DecodeString(rawTxVerbose.Hex)
if err != nil {
return nil, errors.Wrap(err, "failed to decode transaction hex")
}
var msgTx wire.MsgTx
if err := msgTx.Deserialize(bytes.NewReader(txBytes)); err != nil {
return nil, errors.Wrap(err, "failed to deserialize transaction")
}
var txIndex uint32
for i, tx := range block.Tx {
if tx.Hex == rawTxVerbose.Hex {
txIndex = uint32(i)
break
}
}
return types.ParseMsgTx(&msgTx, block.Height, *blockHash, txIndex), nil
}
// GetBlockHeader fetch block header from Bitcoin node
func (d *BitcoinNodeDatasource) GetBlockHeader(ctx context.Context, height int64) (types.BlockHeader, error) {
hash, err := d.btcclient.GetBlockHash(height)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to get block hash")
}
block, err := d.btcclient.GetBlockHeader(hash)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to get block header")
}
return types.ParseMsgBlockHeader(*block, height), nil
}

View File

@@ -0,0 +1,16 @@
package datasources
import (
"context"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/internal/subscription"
)
// Datasource is an interface for indexer data sources.
type Datasource[T any] interface {
Name() string
Fetch(ctx context.Context, from, to int64) (T, error)
FetchAsync(ctx context.Context, from, to int64, ch chan<- T) (*subscription.ClientSubscription[T], error)
GetBlockHeader(ctx context.Context, height int64) (types.BlockHeader, error)
}

View File

@@ -0,0 +1,257 @@
package indexers
import (
"context"
"log/slog"
"sync"
"time"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/datasources"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
)
const (
maxReorgLookBack = 1000
)
type (
BitcoinProcessor Processor[[]*types.Block]
BitcoinDatasource datasources.Datasource[[]*types.Block]
)
// Make sure to implement the IndexerWorker interface
var _ IndexerWorker = (*BitcoinIndexer)(nil)
// BitcoinIndexer is the polling indexer for sync Bitcoin data to the database.
type BitcoinIndexer struct {
Processor BitcoinProcessor
Datasource BitcoinDatasource
currentBlock types.BlockHeader
quitOnce sync.Once
quit chan struct{}
done chan struct{}
}
// NewBitcoinIndexer create new BitcoinIndexer
func NewBitcoinIndexer(processor BitcoinProcessor, datasource BitcoinDatasource) *BitcoinIndexer {
return &BitcoinIndexer{
Processor: processor,
Datasource: datasource,
quit: make(chan struct{}),
done: make(chan struct{}),
}
}
func (*BitcoinIndexer) Type() string {
return "bitcoin"
}
func (i *BitcoinIndexer) Shutdown() error {
return i.ShutdownWithContext(context.Background())
}
func (i *BitcoinIndexer) ShutdownWithTimeout(timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
return i.ShutdownWithContext(ctx)
}
func (i *BitcoinIndexer) ShutdownWithContext(ctx context.Context) (err error) {
i.quitOnce.Do(func() {
close(i.quit)
select {
case <-i.done:
case <-time.After(180 * time.Second):
err = errors.Wrap(errs.Timeout, "indexer shutdown timeout")
case <-ctx.Done():
err = errors.Wrap(ctx.Err(), "indexer shutdown context canceled")
}
})
return
}
func (i *BitcoinIndexer) Run(ctx context.Context) (err error) {
defer close(i.done)
ctx = logger.WithContext(ctx,
slog.String("package", "indexers"),
slog.String("indexer", i.Type()),
slog.String("processor", i.Processor.Name()),
slog.String("datasource", i.Datasource.Name()),
)
// set to -1 to start from genesis block
i.currentBlock, err = i.Processor.CurrentBlock(ctx)
if err != nil {
if !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "can't init state, failed to get indexer current block")
}
i.currentBlock.Height = -1
}
ticker := time.NewTicker(pollingInterval)
defer ticker.Stop()
for {
select {
case <-i.quit:
logger.InfoContext(ctx, "Got quit signal, stopping indexer")
return nil
case <-ctx.Done():
return nil
case <-ticker.C:
if err := i.process(ctx); err != nil {
logger.ErrorContext(ctx, "Indexer failed while processing", slogx.Error(err))
return errors.Wrap(err, "process failed")
}
logger.DebugContext(ctx, "Waiting for next polling interval")
}
}
}
func (i *BitcoinIndexer) process(ctx context.Context) (err error) {
// height range to fetch data
from, to := i.currentBlock.Height+1, int64(-1)
logger.InfoContext(ctx, "Start fetching bitcoin blocks", slog.Int64("from", from))
ch := make(chan []*types.Block)
subscription, err := i.Datasource.FetchAsync(ctx, from, to, ch)
if err != nil {
return errors.Wrap(err, "failed to fetch data")
}
defer subscription.Unsubscribe()
for {
select {
case <-i.quit:
return nil
case blocks := <-ch:
// empty blocks
if len(blocks) == 0 {
continue
}
startAt := time.Now()
ctx := logger.WithContext(ctx,
slogx.Int64("from", blocks[0].Header.Height),
slogx.Int64("to", blocks[len(blocks)-1].Header.Height),
)
// validate reorg from first block
{
remoteBlockHeader := blocks[0].Header
if !remoteBlockHeader.PrevBlock.IsEqual(&i.currentBlock.Hash) {
logger.WarnContext(ctx, "Detected chain reorganization. Searching for fork point...",
slogx.String("event", "reorg_detected"),
slogx.Stringer("current_hash", i.currentBlock.Hash),
slogx.Stringer("expected_hash", remoteBlockHeader.PrevBlock),
)
var (
start = time.Now()
targetHeight = i.currentBlock.Height - 1
beforeReorgBlockHeader = types.BlockHeader{
Height: -1,
}
)
for n := 0; n < maxReorgLookBack; n++ {
// TODO: concurrent fetch
indexedHeader, err := i.Processor.GetIndexedBlock(ctx, targetHeight)
if err != nil {
return errors.Wrapf(err, "failed to get indexed block, height: %d", targetHeight)
}
remoteHeader, err := i.Datasource.GetBlockHeader(ctx, targetHeight)
if err != nil {
return errors.Wrapf(err, "failed to get remote block header, height: %d", targetHeight)
}
// Found no reorg block
if indexedHeader.Hash.IsEqual(&remoteHeader.Hash) {
beforeReorgBlockHeader = remoteHeader
break
}
// Walk back to find fork point
targetHeight -= 1
}
// Reorg look back limit reached
if beforeReorgBlockHeader.Height < 0 {
return errors.Wrap(errs.SomethingWentWrong, "reorg look back limit reached")
}
logger.InfoContext(ctx, "Found reorg fork point, starting to revert data...",
slogx.String("event", "reorg_forkpoint"),
slogx.Int64("since", beforeReorgBlockHeader.Height+1),
slogx.Int64("total_blocks", i.currentBlock.Height-beforeReorgBlockHeader.Height),
slogx.Duration("search_duration", time.Since(start)),
)
// Revert all data since the reorg block
start = time.Now()
if err := i.Processor.RevertData(ctx, beforeReorgBlockHeader.Height+1); err != nil {
return errors.Wrap(err, "failed to revert data")
}
// Set current block to before reorg block and
// end current round to fetch again
i.currentBlock = beforeReorgBlockHeader
logger.Info("Fixing chain reorganization completed",
slogx.Int64("current_block", i.currentBlock.Height),
slogx.Duration("duration", time.Since(start)),
)
return nil
}
}
// validate is block is continuous and no reorg
for i := 1; i < len(blocks); i++ {
if blocks[i].Header.Height != blocks[i-1].Header.Height+1 {
return errors.Wrapf(errs.InternalError, "block is not continuous, block[%d] height: %d, block[%d] height: %d", i-1, blocks[i-1].Header.Height, i, blocks[i].Header.Height)
}
if !blocks[i].Header.PrevBlock.IsEqual(&blocks[i-1].Header.Hash) {
logger.WarnContext(ctx, "Chain Reorganization occurred in the middle of batch fetching blocks, need to try to fetch again")
// end current round
return nil
}
}
ctx = logger.WithContext(ctx, slog.Int("total_blocks", len(blocks)))
// Start processing blocks
logger.InfoContext(ctx, "Processing blocks")
if err := i.Processor.Process(ctx, blocks); err != nil {
return errors.WithStack(err)
}
// Update current state
i.currentBlock = blocks[len(blocks)-1].Header
logger.InfoContext(ctx, "Processed blocks successfully",
slogx.String("event", "processed_blocks"),
slogx.Int64("current_block", i.currentBlock.Height),
slogx.Duration("duration", time.Since(startAt)),
)
case <-subscription.Done():
// end current round
if err := ctx.Err(); err != nil {
return errors.Wrap(err, "context done")
}
return nil
case <-ctx.Done():
return errors.WithStack(ctx.Err())
case err := <-subscription.Err():
if err != nil {
return errors.Wrap(err, "got error while fetch async")
}
}
}
}

41
core/indexers/indexers.go Normal file
View File

@@ -0,0 +1,41 @@
package indexers
import (
"context"
"time"
"github.com/gaze-network/indexer-network/core/types"
)
const (
// pollingInterval is the default polling interval for the indexer polling worker
pollingInterval = 15 * time.Second
)
type IndexerWorker interface {
Type() string
Run(ctx context.Context) error
Shutdown() error
ShutdownWithTimeout(timeout time.Duration) error
ShutdownWithContext(ctx context.Context) error
}
type Processor[T any] interface {
Name() string
// Process processes the input data and indexes it.
Process(ctx context.Context, inputs T) error
// CurrentBlock returns the latest indexed block header.
CurrentBlock(ctx context.Context) (types.BlockHeader, error)
// GetIndexedBlock returns the indexed block header by the specified block height.
GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error)
// RevertData revert synced data to the specified block height for re-indexing.
RevertData(ctx context.Context, from int64) error
// VerifyStates verifies the states of the indexed data and the indexer
// to ensure the last shutdown was graceful and no missing data.
VerifyStates(ctx context.Context) error
}

View File

@@ -0,0 +1,47 @@
package types
import (
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/samber/lo"
)
type BlockHeader struct {
Hash chainhash.Hash
Height int64
Version int32
PrevBlock chainhash.Hash
MerkleRoot chainhash.Hash
Timestamp time.Time
Bits uint32
Nonce uint32
}
func ParseMsgBlockHeader(src wire.BlockHeader, height int64) BlockHeader {
hash := src.BlockHash()
return BlockHeader{
Hash: hash,
Height: height,
Version: src.Version,
PrevBlock: src.PrevBlock,
MerkleRoot: src.MerkleRoot,
Timestamp: src.Timestamp,
Bits: src.Bits,
Nonce: src.Nonce,
}
}
type Block struct {
Header BlockHeader
Transactions []*Transaction
}
func ParseMsgBlock(src *wire.MsgBlock, height int64) *Block {
hash := src.Header.BlockHash()
return &Block{
Header: ParseMsgBlockHeader(src.Header, height),
Transactions: lo.Map(src.Transactions, func(item *wire.MsgTx, index int) *Transaction { return ParseMsgTx(item, height, hash, uint32(index)) }),
}
}

View File

@@ -0,0 +1,73 @@
package types
import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/samber/lo"
)
type Transaction struct {
BlockHeight int64
BlockHash chainhash.Hash
Index uint32
TxHash chainhash.Hash
Version int32
LockTime uint32
TxIn []*TxIn
TxOut []*TxOut
}
type TxIn struct {
SignatureScript []byte
Witness [][]byte
Sequence uint32
PreviousOutIndex uint32
PreviousOutTxHash chainhash.Hash
}
type TxOut struct {
PkScript []byte
Value int64
}
func (o TxOut) IsOpReturn() bool {
return len(o.PkScript) > 0 && o.PkScript[0] == txscript.OP_RETURN
}
// ParseMsgTx parses btcd/wire.MsgTx to Transaction.
func ParseMsgTx(src *wire.MsgTx, blockHeight int64, blockHash chainhash.Hash, index uint32) *Transaction {
return &Transaction{
BlockHeight: blockHeight,
BlockHash: blockHash,
Index: index,
TxHash: src.TxHash(),
Version: src.Version,
LockTime: src.LockTime,
TxIn: lo.Map(src.TxIn, func(item *wire.TxIn, _ int) *TxIn {
return ParseTxIn(item)
}),
TxOut: lo.Map(src.TxOut, func(item *wire.TxOut, _ int) *TxOut {
return ParseTxOut(item)
}),
}
}
// ParseTxIn parses btcd/wire.TxIn to TxIn.
func ParseTxIn(src *wire.TxIn) *TxIn {
return &TxIn{
SignatureScript: src.SignatureScript,
Witness: src.Witness,
Sequence: src.Sequence,
PreviousOutIndex: src.PreviousOutPoint.Index,
PreviousOutTxHash: src.PreviousOutPoint.Hash,
}
}
// ParseTxOut parses btcd/wire.TxOut to TxOut.
func ParseTxOut(src *wire.TxOut) *TxOut {
return &TxOut{
PkScript: src.PkScript,
Value: src.Value,
}
}

View File

@@ -1 +0,0 @@
package core

View File

@@ -0,0 +1,34 @@
# Database Migration
We've used the golang-migrate library to manage the database migration.
### Install golang-migrate
```shell
$ brew install golang-migrate
```
### Commands
#### Create new database sequence
```shell
$ migrate create -ext sql -dir . -seq file_name
```
#### Up version database
```shell
$ migrate -source file://. -database "postgres://postgres:$PASSWORD@localhost:5432/postgres?sslmode=disable" up
```
#### Down version database 1 version
```shell
$ migrate -source file://. -database "postgres://postgres:$PASSWORD@localhost:5432/postgres?sslmode=disable" down 1
```
### References:
- Golang-Migrate: https://github.com/golang-migrate
- Connection string: https://www.connectionstrings.com/postgresql/

78
go.mod
View File

@@ -1,3 +1,81 @@
module github.com/gaze-network/indexer-network
go 1.22
require (
github.com/Cleverse/go-utilities/utils v0.0.0-20240119201306-d71eb577ef11
github.com/btcsuite/btcd v0.24.0
github.com/btcsuite/btcd/btcutil v1.1.5
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0
github.com/cockroachdb/errors v1.11.1
github.com/gaze-network/uint128 v1.3.0
github.com/gofiber/fiber/v2 v2.52.4
github.com/golang-migrate/migrate/v4 v4.17.1
github.com/jackc/pgx v3.6.2+incompatible
github.com/jackc/pgx/v5 v5.5.5
github.com/mcosta74/pgx-slog v0.3.0
github.com/planxnx/concurrent-stream v0.1.5
github.com/samber/lo v1.39.0
github.com/shopspring/decimal v1.3.1
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.18.2
github.com/stretchr/testify v1.8.4
github.com/valyala/fasthttp v1.51.0
go.uber.org/automaxprocs v1.5.3
golang.org/x/sync v0.5.0
)
require (
github.com/andybalholm/brotli v1.0.5 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.1.3 // indirect
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/uuid v1.5.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/klauspost/compress v1.17.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/tcplisten v1.0.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.20.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

311
go.sum
View File

@@ -0,0 +1,311 @@
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Cleverse/go-utilities/utils v0.0.0-20240119201306-d71eb577ef11 h1:Xpbu03JdzqWEXcL6xr43Wxjnwh/Txt16WXJ7IlzvoxA=
github.com/Cleverse/go-utilities/utils v0.0.0-20240119201306-d71eb577ef11/go.mod h1:ft8CEDBt0csuZ+yM/bKf7ZlV6lWvWY/TFXzp7+Ze9Jw=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A=
github.com/btcsuite/btcd v0.24.0 h1:gL3uHE/IaFj6fcZSu03SvqPMSx7s/dPzfpG/atRwWdo=
github.com/btcsuite/btcd v0.24.0/go.mod h1:K4IDc1593s8jKXIF7yS7yCTSxrknB9z0STzc2j6XgE4=
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE=
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8=
github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/dhui/dktest v0.4.1 h1:/w+IWuDXVymg3IrRJCHHOkMK10m9aNVMOyD0X12YVTg=
github.com/dhui/dktest v0.4.1/go.mod h1:DdOqcUpL7vgyP4GlF3X3w7HbSlz8cEQzwewPveYEQbA=
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0=
github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gaze-network/uint128 v1.3.0 h1:25qtRiDKQXa+mD5rN0nbUkbvY26/uzfSF97eWvhIr0I=
github.com/gaze-network/uint128 v1.3.0/go.mod h1:zAwwcnoRUNiiQj0vjLmHgNgJ+w2RUgzMAJgl8d7tRug=
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/gofiber/fiber/v2 v2.52.4 h1:P+T+4iK7VaqUsq2PALYEfBBo6bJZ4q3FP8cZ84EggTM=
github.com/gofiber/fiber/v2 v2.52.4/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-migrate/migrate/v4 v4.17.1 h1:4zQ6iqL6t6AiItphxJctQb3cFqWiSpMnX7wLTPnnYO4=
github.com/golang-migrate/migrate/v4 v4.17.1/go.mod h1:m8hinFyWBn0SA4QKHuKh175Pm9wjmxj3S2Mia7dbXzM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o=
github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mcosta74/pgx-slog v0.3.0 h1:v7nl8XKE4ObGxZfYUUs8uUWrimvNib2V4P7Mp0WjSyw=
github.com/mcosta74/pgx-slog v0.3.0/go.mod h1:73/rhilX7+ybQ9RH/BZBtOkTDiGAH1yBrcatN6jQW5E=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/planxnx/concurrent-stream v0.1.5 h1:qSMM27m7AApvalS0rSmovxOtDCnLy0/HinYJPe3oQfQ=
github.com/planxnx/concurrent-stream v0.1.5/go.mod h1:vxnW2qxkCLppMo5+Zns3b5/CiVxYQjXRLVFGJ9xvkXk=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA=
github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.51.0 h1:8b30A5JlZ6C7AS81RsWjYMQmrZG6feChmgAolCl1SqA=
github.com/valyala/fasthttp v1.51.0/go.mod h1:oI2XroL+lI7vdXyYoQk03bXBThfFl2cVdIA3Xl7cH8g=
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg=
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

120
internal/config/config.go Normal file
View File

@@ -0,0 +1,120 @@
package config
import (
"context"
"log/slog"
"strings"
"sync"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
btcconfig "github.com/gaze-network/indexer-network/modules/bitcoin/config"
runesconfig "github.com/gaze-network/indexer-network/modules/runes/config"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/spf13/pflag"
"github.com/spf13/viper"
)
var (
isInit bool
mu sync.Mutex
config = &Config{
Logger: logger.Config{
Output: "TEXT",
},
Network: common.NetworkMainnet,
BitcoinNode: BitcoinNodeClient{
User: "user",
Pass: "pass",
},
}
)
type Config struct {
Logger logger.Config `mapstructure:"logger"`
BitcoinNode BitcoinNodeClient `mapstructure:"bitcoin_node"`
Network common.Network `mapstructure:"network"`
HTTPServer HTTPServerConfig `mapstructure:"http_server"`
Modules Modules `mapstructure:"modules"`
Reporting reportingclient.Config `mapstructure:"reporting"`
}
type BitcoinNodeClient struct {
Host string `mapstructure:"host"`
User string `mapstructure:"user"`
Pass string `mapstructure:"pass"`
DisableTLS bool `mapstructure:"disable_tls"`
}
type Modules struct {
Bitcoin btcconfig.Config `mapstructure:"bitcoin"`
Runes runesconfig.Config `mapstructure:"runes"`
}
type HTTPServerConfig struct {
Port int `mapstructure:"port"`
}
// Parse parse the configuration from environment variables
func Parse(configFile ...string) Config {
mu.Lock()
defer mu.Unlock()
return parse(configFile...)
}
// Load returns the loaded configuration
func Load() Config {
mu.Lock()
defer mu.Unlock()
if isInit {
return *config
}
return parse()
}
// BindPFlag binds a specific key to a pflag (as used by cobra).
// Example (where serverCmd is a Cobra instance):
//
// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
func BindPFlag(key string, flag *pflag.Flag) {
if err := viper.BindPFlag(key, flag); err != nil {
logger.Panic("Something went wrong, failed to bind flag for config", slog.String("package", "config"), slogx.Error(err))
}
}
// SetDefault sets the default value for this key.
// SetDefault is case-insensitive for a key.
// Default only used when no value is provided by the user via flag, config or ENV.
func SetDefault(key string, value any) { viper.SetDefault(key, value) }
func parse(configFile ...string) Config {
ctx := logger.WithContext(context.Background(), slog.String("package", "config"))
if len(configFile) > 0 && configFile[0] != "" {
viper.SetConfigFile(configFile[0])
} else {
viper.AddConfigPath("./")
viper.SetConfigName("config")
}
viper.AutomaticEnv()
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
if err := viper.ReadInConfig(); err != nil {
var errNotfound viper.ConfigFileNotFoundError
if errors.As(err, &errNotfound) {
logger.WarnContext(ctx, "Config file not found, use default config value", slogx.Error(err))
} else {
logger.PanicContext(ctx, "Invalid config file", slogx.Error(err))
}
}
if err := viper.Unmarshal(&config); err != nil {
logger.PanicContext(ctx, "Something went wrong, failed to unmarshal config", slogx.Error(err))
}
isInit = true
return *config
}

View File

@@ -0,0 +1,37 @@
package postgres
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgxpool"
)
// Make sure that interfaces are compatible with the pgx package
var (
_ DB = (*pgx.Conn)(nil)
_ DB = (*pgxpool.Conn)(nil)
)
// Queryable is an interface that can be used to execute queries and commands
type Queryable interface {
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
QueryRow(context.Context, string, ...interface{}) pgx.Row
}
// TxQueryable is an interface that can be used to execute queries and commands within a transaction
type TxQueryable interface {
Queryable
Begin(context.Context) (pgx.Tx, error)
BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error)
}
// DB is an interface that can be used to execute queries and commands, and also to send batches
type DB interface {
Queryable
TxQueryable
SendBatch(ctx context.Context, b *pgx.Batch) (br pgx.BatchResults)
Ping(ctx context.Context) error
}

View File

@@ -0,0 +1,127 @@
package postgres
import (
"context"
"fmt"
"github.com/Cleverse/go-utilities/utils"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/jackc/pgx/v5/tracelog"
pgxslog "github.com/mcosta74/pgx-slog"
)
const (
DefaultMaxConns = 16
DefaultMinConns = 0
DefaultLogLevel = tracelog.LogLevelError
)
type Config struct {
Host string `mapstructure:"host"` // Default is 127.0.0.1
Port string `mapstructure:"port"` // Default is 5432
User string `mapstructure:"user"` // Default is empty
Password string `mapstructure:"password"` // Default is empty
DBName string `mapstructure:"db_name"` // Default is postgres
SSLMode string `mapstructure:"ssl_mode"` // Default is prefer
URL string `mapstructure:"url"` // If URL is provided, other fields are ignored
MaxConns int32 `mapstructure:"max_conns"` // Default is 16
MinConns int32 `mapstructure:"min_conns"` // Default is 0
Debug bool `mapstructure:"debug"`
}
// New creates a new connection to the database
func New(ctx context.Context, conf Config) (*pgx.Conn, error) {
// Prepare connection pool configuration
connConfig, err := pgx.ParseConfig(conf.String())
if err != nil {
return nil, errors.Join(errs.InvalidArgument, errors.Wrap(err, "failed while parse config"))
}
connConfig.Tracer = conf.QueryTracer()
// Create a new connection
conn, err := pgx.ConnectConfig(ctx, connConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to create a new connection")
}
// Test the connection
if err := conn.Ping(ctx); err != nil {
return nil, errors.Wrap(err, "failed to connect to the database")
}
return conn, nil
}
// NewPool creates a new connection pool to the database
func NewPool(ctx context.Context, conf Config) (*pgxpool.Pool, error) {
// Prepare connection pool configuration
connConfig, err := pgxpool.ParseConfig(conf.String())
if err != nil {
return nil, errors.Join(errs.InvalidArgument, errors.Wrap(err, "failed while parse config"))
}
connConfig.MaxConns = utils.Default(conf.MaxConns, DefaultMaxConns)
connConfig.MinConns = utils.Default(conf.MinConns, DefaultMinConns)
connConfig.ConnConfig.Tracer = conf.QueryTracer()
// Create a new connection pool
connPool, err := pgxpool.NewWithConfig(ctx, connConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to create a new connection pool")
}
// Test the connection
if err := connPool.Ping(ctx); err != nil {
return nil, errors.Wrap(err, "failed to connect to the database")
}
return connPool, nil
}
// String returns the connection string (DSN format or URL format)
func (conf Config) String() string {
if conf.Host == "" {
conf.Host = "127.0.0.1"
}
if conf.Port == "" {
conf.Port = "5432"
}
if conf.SSLMode == "" {
conf.SSLMode = "prefer"
}
if conf.DBName == "" {
conf.DBName = "postgres"
}
// Construct DSN
connString := fmt.Sprintf("host=%s dbname=%s port=%s sslmode=%s", conf.Host, conf.DBName, conf.Port, conf.SSLMode)
if conf.User != "" {
connString = fmt.Sprintf("%s user=%s", connString, conf.User)
}
if conf.Password != "" {
connString = fmt.Sprintf("%s password=%s", connString, conf.Password)
}
// Prefer URL over DSN format
if conf.URL != "" {
connString = conf.URL
}
return connString
}
func (conf Config) QueryTracer() pgx.QueryTracer {
loglevel := DefaultLogLevel
if conf.Debug {
loglevel = tracelog.LogLevelTrace
}
return &tracelog.TraceLog{
Logger: pgxslog.NewLogger(logger.With("package", "postgres")),
LogLevel: loglevel,
}
}

View File

@@ -0,0 +1,31 @@
package subscription
import "context"
// ClientSubscription is a subscription that can be used by the client to unsubscribe from the subscription.
type ClientSubscription[T any] struct {
subscription *Subscription[T]
}
func (c *ClientSubscription[T]) Unsubscribe() {
c.subscription.Unsubscribe()
}
func (c *ClientSubscription[T]) UnsubscribeWithContext(ctx context.Context) (err error) {
return c.subscription.UnsubscribeWithContext(ctx)
}
// Err returns the error channel of the subscription.
func (c *ClientSubscription[T]) Err() <-chan error {
return c.subscription.Err()
}
// Done returns the done channel of the subscription
func (c *ClientSubscription[T]) Done() <-chan struct{} {
return c.subscription.Done()
}
// IsClosed returns status of the subscription
func (c *ClientSubscription[T]) IsClosed() bool {
return c.subscription.IsClosed()
}

View File

@@ -0,0 +1,132 @@
package subscription
import (
"context"
"sync"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
)
// SubscriptionBufferSize is the buffer size of the subscription channel.
// It is used to prevent blocking the client dispatcher when the client is slow to consume values.
var SubscriptionBufferSize = 8
// Subscription is a subscription to a stream of values from the client dispatcher.
// It has two channels: one for values, and one for errors.
type Subscription[T any] struct {
// The channel which the subscription sends values.
channel chan<- T
// The in channel receives values from client dispatcher.
in chan T
// The error channel receives the error from the client dispatcher.
err chan error
quiteOnce sync.Once
// Closing of the subscription is requested by sending on 'quit'. This is handled by
// the forwarding loop, which closes 'forwardDone' when it has stopped sending to
// sub.channel. Finally, 'unsubDone' is closed after unsubscribing on the server side.
quit chan struct{}
quitDone chan struct{}
}
func NewSubscription[T any](channel chan<- T) *Subscription[T] {
subscription := &Subscription[T]{
channel: channel,
in: make(chan T, SubscriptionBufferSize),
err: make(chan error, SubscriptionBufferSize),
quit: make(chan struct{}),
quitDone: make(chan struct{}),
}
go func() {
subscription.run()
}()
return subscription
}
func (s *Subscription[T]) Unsubscribe() {
_ = s.UnsubscribeWithContext(context.Background())
}
func (s *Subscription[T]) UnsubscribeWithContext(ctx context.Context) (err error) {
s.quiteOnce.Do(func() {
select {
case s.quit <- struct{}{}:
<-s.quitDone
case <-ctx.Done():
err = ctx.Err()
}
})
return errors.WithStack(err)
}
// Client returns a client subscription for this subscription.
func (s *Subscription[T]) Client() *ClientSubscription[T] {
return &ClientSubscription[T]{
subscription: s,
}
}
// Err returns the error channel of the subscription.
func (s *Subscription[T]) Err() <-chan error {
return s.err
}
// Done returns the done channel of the subscription
func (s *Subscription[T]) Done() <-chan struct{} {
return s.quitDone
}
// IsClosed returns status of the subscription
func (s *Subscription[T]) IsClosed() bool {
select {
case <-s.quitDone:
return true
default:
return false
}
}
// Send sends a value to the subscription channel. If the subscription is closed, it returns an error.
func (s *Subscription[T]) Send(ctx context.Context, value T) error {
select {
case s.in <- value:
case <-s.quitDone:
return errors.Wrap(errs.Closed, "subscription is closed")
case <-ctx.Done():
return errors.WithStack(ctx.Err())
}
return nil
}
// SendError sends an error to the subscription error channel. If the subscription is closed, it returns an error.
func (s *Subscription[T]) SendError(ctx context.Context, err error) error {
select {
case s.err <- err:
case <-s.quitDone:
return errors.Wrap(errs.Closed, "subscription is closed")
case <-ctx.Done():
return errors.WithStack(ctx.Err())
}
return nil
}
// run starts the forwarding loop for the subscription.
func (s *Subscription[T]) run() {
defer close(s.quitDone)
for {
select {
case <-s.quit:
return
case value := <-s.in:
select {
case s.channel <- value:
case <-s.quit:
return
}
}
}
}

18
main.go Normal file
View File

@@ -0,0 +1,18 @@
package main
import (
"context"
"os"
"os/signal"
"syscall"
"github.com/gaze-network/indexer-network/cmd"
_ "go.uber.org/automaxprocs"
)
func main() {
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
defer stop()
cmd.Execute(ctx)
}

View File

@@ -0,0 +1,244 @@
package btcclient
import (
"context"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/datasources"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/internal/subscription"
"github.com/gaze-network/indexer-network/modules/bitcoin/datagateway"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
cstream "github.com/planxnx/concurrent-stream"
"github.com/samber/lo"
)
// TODO: Refactor this, datasources.BitcoinNode and This package is the same.
const (
blockStreamChunkSize = 100
)
// Make sure to implement the BitcoinDatasource interface
var _ datasources.Datasource[[]*types.Block] = (*ClientDatabase)(nil)
// ClientDatabase is a client to connect to the bitcoin database.
type ClientDatabase struct {
bitcoinDg datagateway.BitcoinDataGateway
}
func NewClientDatabase(bitcoinDg datagateway.BitcoinDataGateway) *ClientDatabase {
return &ClientDatabase{
bitcoinDg: bitcoinDg,
}
}
func (d ClientDatabase) Name() string {
return "bitcoin_database"
}
func (d *ClientDatabase) Fetch(ctx context.Context, from, to int64) ([]*types.Block, error) {
ch := make(chan []*types.Block)
subscription, err := d.FetchAsync(ctx, from, to, ch)
if err != nil {
return nil, errors.WithStack(err)
}
defer subscription.Unsubscribe()
blocks := make([]*types.Block, 0)
for {
select {
case b, ok := <-ch:
if !ok {
return blocks, nil
}
blocks = append(blocks, b...)
case <-subscription.Done():
if err := ctx.Err(); err != nil {
return nil, errors.Wrap(err, "context done")
}
return blocks, nil
case err := <-subscription.Err():
if err != nil {
return nil, errors.Wrap(err, "got error while fetch async")
}
return blocks, nil
case <-ctx.Done():
return nil, errors.Wrap(ctx.Err(), "context done")
}
}
}
func (d *ClientDatabase) FetchAsync(ctx context.Context, from, to int64, ch chan<- []*types.Block) (*subscription.ClientSubscription[[]*types.Block], error) {
ctx = logger.WithContext(ctx,
slogx.String("package", "datasources"),
slogx.String("datasource", d.Name()),
)
from, to, skip, err := d.prepareRange(ctx, from, to)
if err != nil {
return nil, errors.Wrap(err, "failed to prepare fetch range")
}
subscription := subscription.NewSubscription(ch)
if skip {
if err := subscription.UnsubscribeWithContext(ctx); err != nil {
return nil, errors.Wrap(err, "failed to unsubscribe")
}
return subscription.Client(), nil
}
// Create parallel stream
out := make(chan []*types.Block)
stream := cstream.NewStream(ctx, 8, out)
// create slice of block height to fetch
blockHeights := make([]int64, 0, to-from+1)
for i := from; i <= to; i++ {
blockHeights = append(blockHeights, i)
}
// Wait for stream to finish and close out channel
go func() {
defer close(out)
_ = stream.Wait()
}()
// Fan-out blocks to subscription channel
go func() {
defer func() {
// add a bit delay to prevent shutdown before client receive all blocks
time.Sleep(100 * time.Millisecond)
subscription.Unsubscribe()
}()
for {
select {
case data, ok := <-out:
// stream closed
if !ok {
return
}
// empty blocks
if len(data) == 0 {
continue
}
// send blocks to subscription channel
if err := subscription.Send(ctx, data); err != nil {
if errors.Is(err, errs.Closed) {
return
}
logger.WarnContext(ctx, "Failed to send bitcoin blocks to subscription client",
slogx.Int64("start", data[0].Header.Height),
slogx.Int64("end", data[len(data)-1].Header.Height),
slogx.Error(err),
)
}
case <-ctx.Done():
return
}
}
}()
// Parallel fetch blocks from Bitcoin node until complete all block heights
// or subscription is done.
go func() {
defer stream.Close()
done := subscription.Done()
chunks := lo.Chunk(blockHeights, blockStreamChunkSize)
for _, chunk := range chunks {
chunk := chunk
select {
case <-done:
return
case <-ctx.Done():
return
default:
if len(chunk) == 0 {
continue
}
stream.Go(func() []*types.Block {
startAt := time.Now()
defer func() {
logger.DebugContext(ctx, "Fetched chunk of blocks from Bitcoin node",
slogx.Int("total_blocks", len(chunk)),
slogx.Int64("from", chunk[0]),
slogx.Int64("to", chunk[len(chunk)-1]),
slogx.Duration("duration", time.Since(startAt)),
)
}()
fromHeight, toHeight := chunk[0], chunk[len(chunk)-1]
blocks, err := d.bitcoinDg.GetBlocksByHeightRange(ctx, fromHeight, toHeight)
if err != nil {
logger.ErrorContext(ctx, "Can't get block data from Bitcoin database",
slogx.Error(err),
slogx.Int64("from", fromHeight),
slogx.Int64("to", toHeight),
)
if err := subscription.SendError(ctx, errors.Wrapf(err, "failed to get blocks: from_height: %d, to_height: %d", fromHeight, toHeight)); err != nil {
logger.WarnContext(ctx, "Failed to send datasource error to subscription client", slogx.Error(err))
}
return nil
}
return blocks
})
}
}
}()
return subscription.Client(), nil
}
func (c *ClientDatabase) GetBlockHeader(ctx context.Context, height int64) (types.BlockHeader, error) {
header, err := c.bitcoinDg.GetBlockHeaderByHeight(ctx, height)
if err != nil {
return types.BlockHeader{}, errors.WithStack(err)
}
return header, nil
}
func (c *ClientDatabase) prepareRange(ctx context.Context, fromHeight, toHeight int64) (start, end int64, skip bool, err error) {
start = fromHeight
end = toHeight
// get current bitcoin block height
latestBlock, err := c.bitcoinDg.GetLatestBlockHeader(ctx)
if err != nil {
return -1, -1, false, errors.Wrap(err, "failed to get block count")
}
// set start to genesis block height
if start < 0 {
start = 0
}
// set end to current bitcoin block height if
// - end is -1
// - end is greater that current bitcoin block height
if end < 0 || end > latestBlock.Height {
end = latestBlock.Height
}
// if start is greater than end, skip this round
if start > end {
return -1, -1, true, nil
}
return start, end, false, nil
}
// GetTransactionByHash returns a transaction with the given hash. Returns errs.NotFound if transaction does not exist.
func (c *ClientDatabase) GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error) {
tx, err := c.bitcoinDg.GetTransactionByHash(ctx, txHash)
if err != nil {
return nil, errors.Wrap(err, "failed to get transaction by hash")
}
return tx, nil
}

View File

@@ -0,0 +1,12 @@
package btcclient
import (
"context"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/core/types"
)
type Contract interface {
GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error)
}

View File

@@ -0,0 +1,8 @@
package config
import "github.com/gaze-network/indexer-network/internal/postgres"
type Config struct {
Database string `mapstructure:"database"` // Database to store bitcoin data.
Postgres postgres.Config `mapstructure:"postgres"`
}

View File

@@ -0,0 +1,26 @@
package bitcoin
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/core/types"
)
const (
Version = "v0.0.1"
DBVersion = 1
)
var (
// defaultCurrentBlockHeight is the default value for the current block height for first time indexing
defaultCurrentBlock = types.BlockHeader{
Hash: common.ZeroHash,
Height: -1,
}
lastV1Block = types.BlockHeader{
Hash: *utils.Must(chainhash.NewHashFromStr("00000000000001aa077d7aa84c532a4d69bdbff519609d1da0835261b7a74eb6")),
Height: 227835,
}
)

View File

@@ -0,0 +1,18 @@
BEGIN;
-- DROP INDEX
DROP INDEX IF EXISTS bitcoin_blocks_block_hash_idx;
DROP INDEX IF EXISTS bitcoin_transactions_tx_hash_idx;
DROP INDEX IF EXISTS bitcoin_transactions_block_hash_idx;
DROP INDEX IF EXISTS bitcoin_transaction_txouts_pkscript_idx;
DROP INDEX IF EXISTS bitcoin_transaction_txins_prevout_idx;
-- DROP TABLE
DROP TABLE IF EXISTS "bitcoin_indexer_stats";
DROP TABLE IF EXISTS "bitcoin_indexer_db_version";
DROP TABLE IF EXISTS "bitcoin_transaction_txins";
DROP TABLE IF EXISTS "bitcoin_transaction_txouts";
DROP TABLE IF EXISTS "bitcoin_transactions";
DROP TABLE IF EXISTS "bitcoin_blocks";
COMMIT;

View File

@@ -0,0 +1,72 @@
BEGIN;
-- Indexer Client Information
CREATE TABLE IF NOT EXISTS "bitcoin_indexer_stats" (
"id" BIGSERIAL PRIMARY KEY,
"client_version" TEXT NOT NULL,
"network" TEXT NOT NULL,
"created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS "bitcoin_indexer_db_version" (
"id" BIGSERIAL PRIMARY KEY,
"version" INT NOT NULL,
"created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
);
INSERT INTO "bitcoin_indexer_db_version" ("version") VALUES (1);
-- Bitcoin Data
CREATE TABLE IF NOT EXISTS "bitcoin_blocks" (
"block_height" INT NOT NULL PRIMARY KEY,
"block_hash" TEXT NOT NULL,
"version" INT NOT NULL,
"merkle_root" TEXT NOT NULL,
"prev_block_hash" TEXT NOT NULL,
"timestamp" TIMESTAMP WITH TIME ZONE NOT NULL,
"bits" BIGINT NOT NULL,
"nonce" BIGINT NOT NULL
);
CREATE INDEX IF NOT EXISTS bitcoin_blocks_block_hash_idx ON "bitcoin_blocks" USING HASH ("block_hash");
CREATE TABLE IF NOT EXISTS "bitcoin_transactions" (
"tx_hash" TEXT NOT NULL, -- can't use as primary key because block v1 has duplicate tx hashes (coinbase tx). See: https://github.com/bitcoin/bitcoin/commit/a206b0ea12eb4606b93323268fc81a4f1f952531
"version" INT NOT NULL,
"locktime" BIGINT NOT NULL,
"block_height" INT NOT NULL,
"block_hash" TEXT NOT NULL,
"idx" INT NOT NULL,
PRIMARY KEY ("block_height", "idx")
);
CREATE INDEX IF NOT EXISTS bitcoin_transactions_tx_hash_idx ON "bitcoin_transactions" USING HASH ("tx_hash");
CREATE INDEX IF NOT EXISTS bitcoin_transactions_block_hash_idx ON "bitcoin_transactions" USING HASH ("block_hash");
CREATE TABLE IF NOT EXISTS "bitcoin_transaction_txouts" (
"tx_hash" TEXT NOT NULL,
"tx_idx" INT NOT NULL,
"pkscript" TEXT NOT NULL, -- Hex String
"value" BIGINT NOT NULL,
"is_spent" BOOLEAN NOT NULL DEFAULT false,
PRIMARY KEY ("tx_hash", "tx_idx")
);
CREATE INDEX IF NOT EXISTS bitcoin_transaction_txouts_pkscript_idx ON "bitcoin_transaction_txouts" USING HASH ("pkscript");
CREATE TABLE IF NOT EXISTS "bitcoin_transaction_txins" (
"tx_hash" TEXT NOT NULL,
"tx_idx" INT NOT NULL,
"prevout_tx_hash" TEXT NOT NULL,
"prevout_tx_idx" INT NOT NULL,
"prevout_pkscript" TEXT NULL, -- Hex String, Can be NULL if the prevout is a coinbase transaction
"scriptsig" TEXT NOT NULL, -- Hex String
"witness" TEXT NOT NULL DEFAULT '', -- Hex String
"sequence" BIGINT NOT NULL,
PRIMARY KEY ("tx_hash", "tx_idx")
);
CREATE INDEX IF NOT EXISTS bitcoin_transaction_txins_prevout_idx ON "bitcoin_transaction_txins" USING BTREE ("prevout_tx_hash", "prevout_tx_idx");
COMMIT;

View File

@@ -0,0 +1,99 @@
-- name: GetLatestBlockHeader :one
SELECT * FROM bitcoin_blocks ORDER BY block_height DESC LIMIT 1;
-- name: InsertBlock :exec
INSERT INTO bitcoin_blocks ("block_height","block_hash","version","merkle_root","prev_block_hash","timestamp","bits","nonce") VALUES ($1, $2, $3, $4, $5, $6, $7, $8);
-- name: BatchInsertBlocks :exec
INSERT INTO bitcoin_blocks ("block_height","block_hash","version","merkle_root","prev_block_hash","timestamp","bits","nonce")
VALUES (
unnest(@block_height_arr::INT[]),
unnest(@block_hash_arr::TEXT[]),
unnest(@version_arr::INT[]),
unnest(@merkle_root_arr::TEXT[]),
unnest(@prev_block_hash_arr::TEXT[]),
unnest(@timestamp_arr::TIMESTAMP WITH TIME ZONE[]), -- or use TIMESTAMPTZ
unnest(@bits_arr::BIGINT[]),
unnest(@nonce_arr::BIGINT[])
);
-- name: BatchInsertTransactions :exec
INSERT INTO bitcoin_transactions ("tx_hash","version","locktime","block_height","block_hash","idx")
VALUES (
unnest(@tx_hash_arr::TEXT[]),
unnest(@version_arr::INT[]),
unnest(@locktime_arr::BIGINT[]),
unnest(@block_height_arr::INT[]),
unnest(@block_hash_arr::TEXT[]),
unnest(@idx_arr::INT[])
);
-- name: BatchInsertTransactionTxIns :exec
WITH update_txout AS (
UPDATE "bitcoin_transaction_txouts"
SET "is_spent" = true
FROM (SELECT unnest(@prevout_tx_hash_arr::TEXT[]) as tx_hash, unnest(@prevout_tx_idx_arr::INT[]) as tx_idx) as txin
WHERE "bitcoin_transaction_txouts"."tx_hash" = txin.tx_hash AND "bitcoin_transaction_txouts"."tx_idx" = txin.tx_idx AND "is_spent" = false
RETURNING "bitcoin_transaction_txouts"."tx_hash", "bitcoin_transaction_txouts"."tx_idx", "pkscript"
), prepare_insert AS (
SELECT input.tx_hash, input.tx_idx, prevout_tx_hash, prevout_tx_idx, update_txout.pkscript as prevout_pkscript, scriptsig, witness, sequence
FROM (
SELECT
unnest(@tx_hash_arr::TEXT[]) as tx_hash,
unnest(@tx_idx_arr::INT[]) as tx_idx,
unnest(@prevout_tx_hash_arr::TEXT[]) as prevout_tx_hash,
unnest(@prevout_tx_idx_arr::INT[]) as prevout_tx_idx,
unnest(@scriptsig_arr::TEXT[]) as scriptsig,
unnest(@witness_arr::TEXT[]) as witness,
unnest(@sequence_arr::INT[]) as sequence
) input LEFT JOIN update_txout ON "update_txout"."tx_hash" = "input"."prevout_tx_hash" AND "update_txout"."tx_idx" = "input"."prevout_tx_idx"
)
INSERT INTO bitcoin_transaction_txins ("tx_hash","tx_idx","prevout_tx_hash","prevout_tx_idx", "prevout_pkscript","scriptsig","witness","sequence")
SELECT "tx_hash", "tx_idx", "prevout_tx_hash", "prevout_tx_idx", "prevout_pkscript", "scriptsig", "witness", "sequence" FROM prepare_insert;
-- name: BatchInsertTransactionTxOuts :exec
INSERT INTO bitcoin_transaction_txouts ("tx_hash","tx_idx","pkscript","value")
VALUES (
unnest(@tx_hash_arr::TEXT[]),
unnest(@tx_idx_arr::INT[]),
unnest(@pkscript_arr::TEXT[]),
unnest(@value_arr::BIGINT[])
);
-- name: RevertData :exec
WITH delete_tx AS (
DELETE FROM "bitcoin_transactions" WHERE "block_height" >= @from_height
RETURNING "tx_hash"
), delete_txin AS (
DELETE FROM "bitcoin_transaction_txins" WHERE "tx_hash" = ANY(SELECT "tx_hash" FROM delete_tx)
RETURNING "prevout_tx_hash", "prevout_tx_idx"
), delete_txout AS (
DELETE FROM "bitcoin_transaction_txouts" WHERE "tx_hash" = ANY(SELECT "tx_hash" FROM delete_tx)
RETURNING "tx_hash", "tx_idx"
), revert_txout_spent AS (
UPDATE "bitcoin_transaction_txouts"
SET "is_spent" = false
WHERE
("tx_hash", "tx_idx") IN (SELECT "prevout_tx_hash", "prevout_tx_idx" FROM delete_txin) AND
("tx_hash", "tx_idx") NOT IN (SELECT "tx_hash", "tx_idx" FROM delete_txout) -- avoid to modified same row twice (modified the same row twice in a single statement is not supported)
RETURNING NULL
)
DELETE FROM "bitcoin_blocks" WHERE "bitcoin_blocks"."block_height" >= @from_height;
-- name: GetBlockByHeight :one
SELECT * FROM bitcoin_blocks WHERE block_height = $1;
-- name: GetBlocksByHeightRange :many
SELECT * FROM bitcoin_blocks WHERE block_height >= @from_height AND block_height <= @to_height ORDER BY block_height ASC;
-- name: GetTransactionsByHeightRange :many
SELECT * FROM bitcoin_transactions WHERE block_height >= @from_height AND block_height <= @to_height;
-- name: GetTransactionByHash :one
SELECT * FROM bitcoin_transactions WHERE tx_hash = $1;
-- name: GetTransactionTxOutsByTxHashes :many
SELECT * FROM bitcoin_transaction_txouts WHERE tx_hash = ANY(@tx_hashes::TEXT[]);
-- name: GetTransactionTxInsByTxHashes :many
SELECT * FROM bitcoin_transaction_txins WHERE tx_hash = ANY(@tx_hashes::TEXT[]);

View File

@@ -0,0 +1,8 @@
-- name: GetCurrentDBVersion :one
SELECT "version" FROM bitcoin_indexer_db_version ORDER BY id DESC LIMIT 1;
-- name: GetCurrentIndexerStats :one
SELECT "client_version", "network" FROM bitcoin_indexer_stats ORDER BY id DESC LIMIT 1;
-- name: UpdateIndexerStats :exec
INSERT INTO bitcoin_indexer_stats (client_version, network) VALUES ($1, $2);

View File

@@ -0,0 +1,25 @@
package datagateway
import (
"context"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/core/types"
)
type BitcoinDataGateway interface {
BitcoinWriterDataDataGateway
BitcoinReaderDataDataGateway
}
type BitcoinWriterDataDataGateway interface {
InsertBlocks(ctx context.Context, blocks []*types.Block) error
RevertBlocks(context.Context, int64) error
}
type BitcoinReaderDataDataGateway interface {
GetLatestBlockHeader(context.Context) (types.BlockHeader, error)
GetBlockHeaderByHeight(ctx context.Context, blockHeight int64) (types.BlockHeader, error)
GetBlocksByHeightRange(ctx context.Context, from int64, to int64) ([]*types.Block, error)
GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error)
}

View File

@@ -0,0 +1,13 @@
package datagateway
import (
"context"
"github.com/gaze-network/indexer-network/common"
)
type IndexerInformationDataGateway interface {
GetCurrentDBVersion(ctx context.Context) (int32, error)
GetLatestIndexerStats(ctx context.Context) (version string, network common.Network, err error)
UpdateIndexerStats(ctx context.Context, clientVersion string, network common.Network) error
}

View File

@@ -0,0 +1,122 @@
package bitcoin
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/indexers"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/modules/bitcoin/datagateway"
)
// Make sure to implement the BitcoinProcessor interface
var _ indexers.BitcoinProcessor = (*Processor)(nil)
type Processor struct {
config config.Config
bitcoinDg datagateway.BitcoinDataGateway
indexerInfoDg datagateway.IndexerInformationDataGateway
}
func NewProcessor(config config.Config, bitcoinDg datagateway.BitcoinDataGateway, indexerInfoDg datagateway.IndexerInformationDataGateway) *Processor {
return &Processor{
config: config,
bitcoinDg: bitcoinDg,
indexerInfoDg: indexerInfoDg,
}
}
func (p Processor) Name() string {
return "bitcoin"
}
func (p *Processor) Process(ctx context.Context, inputs []*types.Block) error {
if len(inputs) == 0 {
return nil
}
// Process the given blocks before inserting to the database
inputs, err := p.process(ctx, inputs)
if err != nil {
return errors.WithStack(err)
}
// Insert blocks
if err := p.bitcoinDg.InsertBlocks(ctx, inputs); err != nil {
return errors.Wrapf(err, "error during insert blocks, from: %d, to: %d", inputs[0].Header.Height, inputs[len(inputs)-1].Header.Height)
}
return nil
}
func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error) {
b, err := p.bitcoinDg.GetLatestBlockHeader(ctx)
if err != nil {
if errors.Is(err, errs.NotFound) {
return defaultCurrentBlock, nil
}
return types.BlockHeader{}, errors.WithStack(err)
}
return b, nil
}
func (p *Processor) GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error) {
header, err := p.bitcoinDg.GetBlockHeaderByHeight(ctx, height)
if err != nil {
return types.BlockHeader{}, errors.WithStack(err)
}
return header, nil
}
func (p *Processor) RevertData(ctx context.Context, from int64) error {
// to prevent remove txin/txout of duplicated coinbase transaction in the blocks 91842 and 91880
// if you really want to revert the data before the block `227835`, you should reset the database and reindex the data instead.
if from <= lastV1Block.Height {
return errors.Wrapf(errs.InvalidArgument, "can't revert data before block version 2, height: %d", lastV1Block.Height)
}
if err := p.bitcoinDg.RevertBlocks(ctx, from); err != nil {
return errors.WithStack(err)
}
return nil
}
func (p *Processor) VerifyStates(ctx context.Context) error {
// Check current db version with the required db version
{
dbVersion, err := p.indexerInfoDg.GetCurrentDBVersion(ctx)
if err != nil {
return errors.Wrap(err, "can't get current db version")
}
if dbVersion != DBVersion {
return errors.Wrapf(errs.ConflictSetting, "db version mismatch, please upgrade to version %d", DBVersion)
}
}
// Check if the latest indexed network is mismatched with configured network
{
_, network, err := p.indexerInfoDg.GetLatestIndexerStats(ctx)
if err != nil {
if errors.Is(err, errs.NotFound) {
goto end
}
return errors.Wrap(err, "can't get latest indexer stats")
}
if network != p.config.Network {
return errors.Wrapf(errs.ConflictSetting, "network mismatch, latest indexed network: %q, configured network: %q. If you want to change the network, please reset the database", network, p.config.Network)
}
}
// TODO: Verify the states of the indexed data to ensure the last shutdown was graceful and no missing data.
end:
if err := p.indexerInfoDg.UpdateIndexerStats(ctx, Version, p.config.Network); err != nil {
return errors.Wrap(err, "can't update indexer stats")
}
return nil
}

View File

@@ -0,0 +1,91 @@
package bitcoin
import (
"cmp"
"context"
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
)
// process is a processing rules for the given blocks before inserting to the database
//
// this function will modify the given data directly.
func (p *Processor) process(ctx context.Context, blocks []*types.Block) ([]*types.Block, error) {
if len(blocks) == 0 {
return blocks, nil
}
// Sort ASC by block height
slices.SortFunc(blocks, func(t1, t2 *types.Block) int {
return cmp.Compare(t1.Header.Height, t2.Header.Height)
})
if !p.isContinueFromLatestIndexedBlock(ctx, blocks[0]) {
return nil, errors.New("given blocks are not continue from the latest indexed block")
}
if !p.isBlocksSequential(blocks) {
return nil, errors.New("given blocks are not in sequence")
}
p.removeDuplicateCoinbaseTxInputsOutputs(blocks)
return blocks, nil
}
// check if the given blocks are continue from the latest indexed block
// to prevent inserting out-of-order blocks or duplicate blocks
func (p *Processor) isBlocksSequential(blocks []*types.Block) bool {
if len(blocks) == 0 {
return true
}
for i, block := range blocks {
if i == 0 {
continue
}
if block.Header.Height != blocks[i-1].Header.Height+1 {
return false
}
}
return true
}
// check if the given blocks are continue from the latest indexed block
// to prevent inserting out-of-order blocks or duplicate blocks
func (p *Processor) isContinueFromLatestIndexedBlock(ctx context.Context, block *types.Block) bool {
latestBlock, err := p.CurrentBlock(ctx)
if err != nil {
return false
}
return block.Header.Height == latestBlock.Height+1
}
// there 2 coinbase transaction that are duplicated in the blocks 91842 and 91880.
// if the given block version is v1 and height is `91842` or `91880`,
// then remove transaction inputs/outputs to prevent duplicate txin/txout error when inserting to the database.
//
// Theses duplicated coinbase transactions are having the same transaction input/output and
// utxo from these 2 duplicated coinbase txs can redeem only once. so, it's safe to remove them and can
// use inputs/outputs from the previous block.
//
// Duplicate Coinbase Transactions:
// - `454279874213763724535987336644243549a273058910332236515429488599` in blocks 91812, 91842
// - `e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468` in blocks 91722, 91880
//
// This function will modify the given data directly.
func (p *Processor) removeDuplicateCoinbaseTxInputsOutputs(blocks []*types.Block) {
for _, block := range blocks {
header := block.Header
if header.Version == 1 && (header.Height == 91842 || header.Height == 91880) {
// remove transaction inputs/outputs from coinbase transaction (first transaction)
block.Transactions[0].TxIn = nil
block.Transactions[0].TxOut = nil
}
}
}

View File

@@ -0,0 +1,144 @@
package bitcoin
import (
"fmt"
"testing"
"github.com/gaze-network/indexer-network/core/types"
"github.com/stretchr/testify/assert"
)
func TestDuplicateCoinbaseTxHashHandling(t *testing.T) {
processor := Processor{}
generator := func() []*types.Block {
return []*types.Block{
{
Header: types.BlockHeader{Height: 91842, Version: 1},
Transactions: []*types.Transaction{
{
TxIn: []*types.TxIn{{}, {}, {}, {}},
TxOut: []*types.TxOut{{}, {}, {}, {}},
},
{
TxIn: []*types.TxIn{{}, {}, {}, {}},
TxOut: []*types.TxOut{{}, {}, {}, {}},
},
},
},
{
Header: types.BlockHeader{Height: 91880, Version: 1},
Transactions: []*types.Transaction{
{
TxIn: []*types.TxIn{{}, {}, {}, {}},
TxOut: []*types.TxOut{{}, {}, {}, {}},
},
{
TxIn: []*types.TxIn{{}, {}, {}, {}},
TxOut: []*types.TxOut{{}, {}, {}, {}},
},
},
},
}
}
t.Run("all_duplicated_txs", func(t *testing.T) {
blocks := generator()
processor.removeDuplicateCoinbaseTxInputsOutputs(blocks)
assert.Len(t, blocks, 2, "should not remove any blocks")
for _, block := range blocks {
assert.Len(t, block.Transactions, 2, "should not remove any transactions")
assert.Len(t, block.Transactions[0].TxIn, 0, "should remove tx inputs from coinbase transaction")
assert.Len(t, block.Transactions[0].TxOut, 0, "should remove tx outputs from coinbase transaction")
}
})
t.Run("not_duplicated_txs", func(t *testing.T) {
blocks := []*types.Block{
{
Header: types.BlockHeader{Height: 91812, Version: 1},
Transactions: []*types.Transaction{
{
TxIn: []*types.TxIn{{}, {}, {}, {}},
TxOut: []*types.TxOut{{}, {}, {}, {}},
},
{
TxIn: []*types.TxIn{{}, {}, {}, {}},
TxOut: []*types.TxOut{{}, {}, {}, {}},
},
},
},
{
Header: types.BlockHeader{Height: 91722, Version: 1},
Transactions: []*types.Transaction{
{
TxIn: []*types.TxIn{{}, {}, {}, {}},
TxOut: []*types.TxOut{{}, {}, {}, {}},
},
{
TxIn: []*types.TxIn{{}, {}, {}, {}},
TxOut: []*types.TxOut{{}, {}, {}, {}},
},
},
},
}
processor.removeDuplicateCoinbaseTxInputsOutputs(blocks)
assert.Len(t, blocks, 2, "should not remove any blocks")
for _, block := range blocks {
assert.Len(t, block.Transactions, 2, "should not remove any transactions")
assert.Len(t, block.Transactions[0].TxIn, 4, "should not remove tx inputs from coinbase transaction")
assert.Len(t, block.Transactions[0].TxOut, 4, "should not remove tx outputs from coinbase transaction")
}
})
t.Run("mixed", func(t *testing.T) {
blocks := []*types.Block{
{
Header: types.BlockHeader{Height: 91812, Version: 1},
Transactions: []*types.Transaction{
{
TxIn: []*types.TxIn{{}, {}, {}, {}},
TxOut: []*types.TxOut{{}, {}, {}, {}},
},
{
TxIn: []*types.TxIn{{}, {}, {}, {}},
TxOut: []*types.TxOut{{}, {}, {}, {}},
},
},
},
}
blocks = append(blocks, generator()...)
blocks = append(blocks, &types.Block{
Header: types.BlockHeader{Height: 91722, Version: 1},
Transactions: []*types.Transaction{
{
TxIn: []*types.TxIn{{}, {}, {}, {}},
TxOut: []*types.TxOut{{}, {}, {}, {}},
},
{
TxIn: []*types.TxIn{{}, {}, {}, {}},
TxOut: []*types.TxOut{{}, {}, {}, {}},
},
},
})
processor.removeDuplicateCoinbaseTxInputsOutputs(blocks)
assert.Len(t, blocks, 4, "should not remove any blocks")
// only 2nd and 3rd blocks should be modified
for i, block := range blocks {
t.Run(fmt.Sprint(i), func(t *testing.T) {
if i == 1 || i == 2 {
assert.Len(t, block.Transactions, 2, "should not remove any transactions")
assert.Len(t, block.Transactions[0].TxIn, 0, "should remove tx inputs from coinbase transaction")
assert.Len(t, block.Transactions[0].TxOut, 0, "should remove tx outputs from coinbase transaction")
} else {
assert.Len(t, block.Transactions, 2, "should not remove any transactions")
assert.Lenf(t, block.Transactions[0].TxIn, 4, "should not remove tx inputs from coinbase transaction")
assert.Len(t, block.Transactions[0].TxOut, 4, "should not remove tx outputs from coinbase transaction")
}
})
}
})
}

View File

@@ -0,0 +1,169 @@
package postgres
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres/gen"
"github.com/jackc/pgx/v5"
"github.com/samber/lo"
)
func (r *Repository) GetLatestBlockHeader(ctx context.Context) (types.BlockHeader, error) {
model, err := r.queries.GetLatestBlockHeader(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return types.BlockHeader{}, errors.Join(errs.NotFound, err)
}
return types.BlockHeader{}, errors.Wrap(err, "failed to get latest block header")
}
data, err := mapBlockHeaderModelToType(model)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to map block header model to type")
}
return data, nil
}
func (r *Repository) InsertBlocks(ctx context.Context, blocks []*types.Block) error {
if len(blocks) == 0 {
return nil
}
blockParams, txParams, txoutParams, txinParams := mapBlocksTypeToParams(blocks)
tx, err := r.db.Begin(ctx)
if err != nil {
return errors.Wrap(err, "failed to begin transaction")
}
defer tx.Rollback(ctx)
queries := r.queries.WithTx(tx)
if err := queries.BatchInsertBlocks(ctx, blockParams); err != nil {
return errors.Wrap(err, "failed to batch insert block headers")
}
if err := queries.BatchInsertTransactions(ctx, txParams); err != nil {
return errors.Wrap(err, "failed to batch insert transactions")
}
// Should insert txout first, then txin
// Because txin references txout
if err := queries.BatchInsertTransactionTxOuts(ctx, txoutParams); err != nil {
return errors.Wrap(err, "failed to batch insert transaction txins")
}
if err := queries.BatchInsertTransactionTxIns(ctx, txinParams); err != nil {
return errors.Wrap(err, "failed to batch insert transaction txins")
}
if err := tx.Commit(ctx); err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
return nil
}
func (r *Repository) RevertBlocks(ctx context.Context, from int64) error {
tx, err := r.db.Begin(ctx)
if err != nil {
return errors.Wrap(err, "failed to begin transaction")
}
defer tx.Rollback(ctx)
queries := r.queries.WithTx(tx)
if err := queries.RevertData(ctx, int32(from)); err != nil && !errors.Is(err, pgx.ErrNoRows) {
return errors.Wrap(err, "failed to revert data")
}
if err := tx.Commit(ctx); err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
return nil
}
func (r *Repository) GetBlockHeaderByHeight(ctx context.Context, blockHeight int64) (types.BlockHeader, error) {
blockModel, err := r.queries.GetBlockByHeight(ctx, int32(blockHeight))
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return types.BlockHeader{}, errors.Join(errs.NotFound, err)
}
return types.BlockHeader{}, errors.Wrap(err, "failed to get block by height")
}
data, err := mapBlockHeaderModelToType(blockModel)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to map block header model to type")
}
return data, nil
}
func (r *Repository) GetBlocksByHeightRange(ctx context.Context, from int64, to int64) ([]*types.Block, error) {
blocks, err := r.queries.GetBlocksByHeightRange(ctx, gen.GetBlocksByHeightRangeParams{
FromHeight: int32(from),
ToHeight: int32(to),
})
if err != nil {
return nil, errors.Wrap(err, "failed to get blocks by height range")
}
if len(blocks) == 0 {
return []*types.Block{}, nil
}
txs, err := r.queries.GetTransactionsByHeightRange(ctx, gen.GetTransactionsByHeightRangeParams{
FromHeight: int32(from),
ToHeight: int32(to),
})
if err != nil {
return nil, errors.Wrap(err, "failed to get transactions by height range")
}
txHashes := lo.Map(txs, func(tx gen.BitcoinTransaction, _ int) string { return tx.TxHash })
txOuts, err := r.queries.GetTransactionTxOutsByTxHashes(ctx, txHashes)
if err != nil {
return nil, errors.Wrap(err, "failed to get transaction txouts by tx hashes")
}
txIns, err := r.queries.GetTransactionTxInsByTxHashes(ctx, txHashes)
if err != nil {
return nil, errors.Wrap(err, "failed to get transaction txins by tx hashes")
}
// Grouping result by block height and tx hash
groupedTxs := lo.GroupBy(txs, func(tx gen.BitcoinTransaction) int32 { return tx.BlockHeight })
groupedTxOuts := lo.GroupBy(txOuts, func(txOut gen.BitcoinTransactionTxout) string { return txOut.TxHash })
groupedTxIns := lo.GroupBy(txIns, func(txIn gen.BitcoinTransactionTxin) string { return txIn.TxHash })
var errs []error
result := lo.Map(blocks, func(blockModel gen.BitcoinBlock, _ int) *types.Block {
header, err := mapBlockHeaderModelToType(blockModel)
if err != nil {
errs = append(errs, errors.Wrap(err, "failed to map block header model to type"))
return nil
}
txsModel := groupedTxs[blockModel.BlockHeight]
return &types.Block{
Header: header,
Transactions: lo.Map(txsModel, func(txModel gen.BitcoinTransaction, _ int) *types.Transaction {
tx, err := mapTransactionModelToType(txModel, groupedTxIns[txModel.TxHash], groupedTxOuts[txModel.TxHash])
if err != nil {
errs = append(errs, errors.Wrap(err, "failed to map transaction model to type"))
return nil
}
return &tx
}),
}
})
if len(errs) > 0 {
return nil, errors.Wrap(errors.Join(errs...), "failed while mapping result")
}
return result, nil
}

View File

@@ -0,0 +1,408 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: data.sql
package gen
import (
"context"
"github.com/jackc/pgx/v5/pgtype"
)
const batchInsertBlocks = `-- name: BatchInsertBlocks :exec
INSERT INTO bitcoin_blocks ("block_height","block_hash","version","merkle_root","prev_block_hash","timestamp","bits","nonce")
VALUES (
unnest($1::INT[]),
unnest($2::TEXT[]),
unnest($3::INT[]),
unnest($4::TEXT[]),
unnest($5::TEXT[]),
unnest($6::TIMESTAMP WITH TIME ZONE[]), -- or use TIMESTAMPTZ
unnest($7::BIGINT[]),
unnest($8::BIGINT[])
)
`
type BatchInsertBlocksParams struct {
BlockHeightArr []int32
BlockHashArr []string
VersionArr []int32
MerkleRootArr []string
PrevBlockHashArr []string
TimestampArr []pgtype.Timestamptz
BitsArr []int64
NonceArr []int64
}
func (q *Queries) BatchInsertBlocks(ctx context.Context, arg BatchInsertBlocksParams) error {
_, err := q.db.Exec(ctx, batchInsertBlocks,
arg.BlockHeightArr,
arg.BlockHashArr,
arg.VersionArr,
arg.MerkleRootArr,
arg.PrevBlockHashArr,
arg.TimestampArr,
arg.BitsArr,
arg.NonceArr,
)
return err
}
const batchInsertTransactionTxIns = `-- name: BatchInsertTransactionTxIns :exec
WITH update_txout AS (
UPDATE "bitcoin_transaction_txouts"
SET "is_spent" = true
FROM (SELECT unnest($1::TEXT[]) as tx_hash, unnest($2::INT[]) as tx_idx) as txin
WHERE "bitcoin_transaction_txouts"."tx_hash" = txin.tx_hash AND "bitcoin_transaction_txouts"."tx_idx" = txin.tx_idx AND "is_spent" = false
RETURNING "bitcoin_transaction_txouts"."tx_hash", "bitcoin_transaction_txouts"."tx_idx", "pkscript"
), prepare_insert AS (
SELECT input.tx_hash, input.tx_idx, prevout_tx_hash, prevout_tx_idx, update_txout.pkscript as prevout_pkscript, scriptsig, witness, sequence
FROM (
SELECT
unnest($3::TEXT[]) as tx_hash,
unnest($4::INT[]) as tx_idx,
unnest($1::TEXT[]) as prevout_tx_hash,
unnest($2::INT[]) as prevout_tx_idx,
unnest($5::TEXT[]) as scriptsig,
unnest($6::TEXT[]) as witness,
unnest($7::INT[]) as sequence
) input LEFT JOIN update_txout ON "update_txout"."tx_hash" = "input"."prevout_tx_hash" AND "update_txout"."tx_idx" = "input"."prevout_tx_idx"
)
INSERT INTO bitcoin_transaction_txins ("tx_hash","tx_idx","prevout_tx_hash","prevout_tx_idx", "prevout_pkscript","scriptsig","witness","sequence")
SELECT "tx_hash", "tx_idx", "prevout_tx_hash", "prevout_tx_idx", "prevout_pkscript", "scriptsig", "witness", "sequence" FROM prepare_insert
`
type BatchInsertTransactionTxInsParams struct {
PrevoutTxHashArr []string
PrevoutTxIdxArr []int32
TxHashArr []string
TxIdxArr []int32
ScriptsigArr []string
WitnessArr []string
SequenceArr []int32
}
func (q *Queries) BatchInsertTransactionTxIns(ctx context.Context, arg BatchInsertTransactionTxInsParams) error {
_, err := q.db.Exec(ctx, batchInsertTransactionTxIns,
arg.PrevoutTxHashArr,
arg.PrevoutTxIdxArr,
arg.TxHashArr,
arg.TxIdxArr,
arg.ScriptsigArr,
arg.WitnessArr,
arg.SequenceArr,
)
return err
}
const batchInsertTransactionTxOuts = `-- name: BatchInsertTransactionTxOuts :exec
INSERT INTO bitcoin_transaction_txouts ("tx_hash","tx_idx","pkscript","value")
VALUES (
unnest($1::TEXT[]),
unnest($2::INT[]),
unnest($3::TEXT[]),
unnest($4::BIGINT[])
)
`
type BatchInsertTransactionTxOutsParams struct {
TxHashArr []string
TxIdxArr []int32
PkscriptArr []string
ValueArr []int64
}
func (q *Queries) BatchInsertTransactionTxOuts(ctx context.Context, arg BatchInsertTransactionTxOutsParams) error {
_, err := q.db.Exec(ctx, batchInsertTransactionTxOuts,
arg.TxHashArr,
arg.TxIdxArr,
arg.PkscriptArr,
arg.ValueArr,
)
return err
}
const batchInsertTransactions = `-- name: BatchInsertTransactions :exec
INSERT INTO bitcoin_transactions ("tx_hash","version","locktime","block_height","block_hash","idx")
VALUES (
unnest($1::TEXT[]),
unnest($2::INT[]),
unnest($3::BIGINT[]),
unnest($4::INT[]),
unnest($5::TEXT[]),
unnest($6::INT[])
)
`
type BatchInsertTransactionsParams struct {
TxHashArr []string
VersionArr []int32
LocktimeArr []int64
BlockHeightArr []int32
BlockHashArr []string
IdxArr []int32
}
func (q *Queries) BatchInsertTransactions(ctx context.Context, arg BatchInsertTransactionsParams) error {
_, err := q.db.Exec(ctx, batchInsertTransactions,
arg.TxHashArr,
arg.VersionArr,
arg.LocktimeArr,
arg.BlockHeightArr,
arg.BlockHashArr,
arg.IdxArr,
)
return err
}
const getBlockByHeight = `-- name: GetBlockByHeight :one
SELECT block_height, block_hash, version, merkle_root, prev_block_hash, timestamp, bits, nonce FROM bitcoin_blocks WHERE block_height = $1
`
func (q *Queries) GetBlockByHeight(ctx context.Context, blockHeight int32) (BitcoinBlock, error) {
row := q.db.QueryRow(ctx, getBlockByHeight, blockHeight)
var i BitcoinBlock
err := row.Scan(
&i.BlockHeight,
&i.BlockHash,
&i.Version,
&i.MerkleRoot,
&i.PrevBlockHash,
&i.Timestamp,
&i.Bits,
&i.Nonce,
)
return i, err
}
const getBlocksByHeightRange = `-- name: GetBlocksByHeightRange :many
SELECT block_height, block_hash, version, merkle_root, prev_block_hash, timestamp, bits, nonce FROM bitcoin_blocks WHERE block_height >= $1 AND block_height <= $2 ORDER BY block_height ASC
`
type GetBlocksByHeightRangeParams struct {
FromHeight int32
ToHeight int32
}
func (q *Queries) GetBlocksByHeightRange(ctx context.Context, arg GetBlocksByHeightRangeParams) ([]BitcoinBlock, error) {
rows, err := q.db.Query(ctx, getBlocksByHeightRange, arg.FromHeight, arg.ToHeight)
if err != nil {
return nil, err
}
defer rows.Close()
var items []BitcoinBlock
for rows.Next() {
var i BitcoinBlock
if err := rows.Scan(
&i.BlockHeight,
&i.BlockHash,
&i.Version,
&i.MerkleRoot,
&i.PrevBlockHash,
&i.Timestamp,
&i.Bits,
&i.Nonce,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getLatestBlockHeader = `-- name: GetLatestBlockHeader :one
SELECT block_height, block_hash, version, merkle_root, prev_block_hash, timestamp, bits, nonce FROM bitcoin_blocks ORDER BY block_height DESC LIMIT 1
`
func (q *Queries) GetLatestBlockHeader(ctx context.Context) (BitcoinBlock, error) {
row := q.db.QueryRow(ctx, getLatestBlockHeader)
var i BitcoinBlock
err := row.Scan(
&i.BlockHeight,
&i.BlockHash,
&i.Version,
&i.MerkleRoot,
&i.PrevBlockHash,
&i.Timestamp,
&i.Bits,
&i.Nonce,
)
return i, err
}
const getTransactionByHash = `-- name: GetTransactionByHash :one
SELECT tx_hash, version, locktime, block_height, block_hash, idx FROM bitcoin_transactions WHERE tx_hash = $1
`
func (q *Queries) GetTransactionByHash(ctx context.Context, txHash string) (BitcoinTransaction, error) {
row := q.db.QueryRow(ctx, getTransactionByHash, txHash)
var i BitcoinTransaction
err := row.Scan(
&i.TxHash,
&i.Version,
&i.Locktime,
&i.BlockHeight,
&i.BlockHash,
&i.Idx,
)
return i, err
}
const getTransactionTxInsByTxHashes = `-- name: GetTransactionTxInsByTxHashes :many
SELECT tx_hash, tx_idx, prevout_tx_hash, prevout_tx_idx, prevout_pkscript, scriptsig, witness, sequence FROM bitcoin_transaction_txins WHERE tx_hash = ANY($1::TEXT[])
`
func (q *Queries) GetTransactionTxInsByTxHashes(ctx context.Context, txHashes []string) ([]BitcoinTransactionTxin, error) {
rows, err := q.db.Query(ctx, getTransactionTxInsByTxHashes, txHashes)
if err != nil {
return nil, err
}
defer rows.Close()
var items []BitcoinTransactionTxin
for rows.Next() {
var i BitcoinTransactionTxin
if err := rows.Scan(
&i.TxHash,
&i.TxIdx,
&i.PrevoutTxHash,
&i.PrevoutTxIdx,
&i.PrevoutPkscript,
&i.Scriptsig,
&i.Witness,
&i.Sequence,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getTransactionTxOutsByTxHashes = `-- name: GetTransactionTxOutsByTxHashes :many
SELECT tx_hash, tx_idx, pkscript, value, is_spent FROM bitcoin_transaction_txouts WHERE tx_hash = ANY($1::TEXT[])
`
func (q *Queries) GetTransactionTxOutsByTxHashes(ctx context.Context, txHashes []string) ([]BitcoinTransactionTxout, error) {
rows, err := q.db.Query(ctx, getTransactionTxOutsByTxHashes, txHashes)
if err != nil {
return nil, err
}
defer rows.Close()
var items []BitcoinTransactionTxout
for rows.Next() {
var i BitcoinTransactionTxout
if err := rows.Scan(
&i.TxHash,
&i.TxIdx,
&i.Pkscript,
&i.Value,
&i.IsSpent,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getTransactionsByHeightRange = `-- name: GetTransactionsByHeightRange :many
SELECT tx_hash, version, locktime, block_height, block_hash, idx FROM bitcoin_transactions WHERE block_height >= $1 AND block_height <= $2
`
type GetTransactionsByHeightRangeParams struct {
FromHeight int32
ToHeight int32
}
func (q *Queries) GetTransactionsByHeightRange(ctx context.Context, arg GetTransactionsByHeightRangeParams) ([]BitcoinTransaction, error) {
rows, err := q.db.Query(ctx, getTransactionsByHeightRange, arg.FromHeight, arg.ToHeight)
if err != nil {
return nil, err
}
defer rows.Close()
var items []BitcoinTransaction
for rows.Next() {
var i BitcoinTransaction
if err := rows.Scan(
&i.TxHash,
&i.Version,
&i.Locktime,
&i.BlockHeight,
&i.BlockHash,
&i.Idx,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const insertBlock = `-- name: InsertBlock :exec
INSERT INTO bitcoin_blocks ("block_height","block_hash","version","merkle_root","prev_block_hash","timestamp","bits","nonce") VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
`
type InsertBlockParams struct {
BlockHeight int32
BlockHash string
Version int32
MerkleRoot string
PrevBlockHash string
Timestamp pgtype.Timestamptz
Bits int64
Nonce int64
}
func (q *Queries) InsertBlock(ctx context.Context, arg InsertBlockParams) error {
_, err := q.db.Exec(ctx, insertBlock,
arg.BlockHeight,
arg.BlockHash,
arg.Version,
arg.MerkleRoot,
arg.PrevBlockHash,
arg.Timestamp,
arg.Bits,
arg.Nonce,
)
return err
}
const revertData = `-- name: RevertData :exec
WITH delete_tx AS (
DELETE FROM "bitcoin_transactions" WHERE "block_height" >= $1
RETURNING "tx_hash"
), delete_txin AS (
DELETE FROM "bitcoin_transaction_txins" WHERE "tx_hash" = ANY(SELECT "tx_hash" FROM delete_tx)
RETURNING "prevout_tx_hash", "prevout_tx_idx"
), delete_txout AS (
DELETE FROM "bitcoin_transaction_txouts" WHERE "tx_hash" = ANY(SELECT "tx_hash" FROM delete_tx)
RETURNING "tx_hash", "tx_idx"
), revert_txout_spent AS (
UPDATE "bitcoin_transaction_txouts"
SET "is_spent" = false
WHERE
("tx_hash", "tx_idx") IN (SELECT "prevout_tx_hash", "prevout_tx_idx" FROM delete_txin) AND
("tx_hash", "tx_idx") NOT IN (SELECT "tx_hash", "tx_idx" FROM delete_txout) -- avoid to modified same row twice (modified the same row twice in a single statement is not supported)
RETURNING NULL
)
DELETE FROM "bitcoin_blocks" WHERE "bitcoin_blocks"."block_height" >= $1
`
func (q *Queries) RevertData(ctx context.Context, fromHeight int32) error {
_, err := q.db.Exec(ctx, revertData, fromHeight)
return err
}

View File

@@ -0,0 +1,32 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
package gen
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
)
type DBTX interface {
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
QueryRow(context.Context, string, ...interface{}) pgx.Row
}
func New(db DBTX) *Queries {
return &Queries{db: db}
}
type Queries struct {
db DBTX
}
func (q *Queries) WithTx(tx pgx.Tx) *Queries {
return &Queries{
db: tx,
}
}

View File

@@ -0,0 +1,51 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: info.sql
package gen
import (
"context"
)
const getCurrentDBVersion = `-- name: GetCurrentDBVersion :one
SELECT "version" FROM bitcoin_indexer_db_version ORDER BY id DESC LIMIT 1
`
func (q *Queries) GetCurrentDBVersion(ctx context.Context) (int32, error) {
row := q.db.QueryRow(ctx, getCurrentDBVersion)
var version int32
err := row.Scan(&version)
return version, err
}
const getCurrentIndexerStats = `-- name: GetCurrentIndexerStats :one
SELECT "client_version", "network" FROM bitcoin_indexer_stats ORDER BY id DESC LIMIT 1
`
type GetCurrentIndexerStatsRow struct {
ClientVersion string
Network string
}
func (q *Queries) GetCurrentIndexerStats(ctx context.Context) (GetCurrentIndexerStatsRow, error) {
row := q.db.QueryRow(ctx, getCurrentIndexerStats)
var i GetCurrentIndexerStatsRow
err := row.Scan(&i.ClientVersion, &i.Network)
return i, err
}
const updateIndexerStats = `-- name: UpdateIndexerStats :exec
INSERT INTO bitcoin_indexer_stats (client_version, network) VALUES ($1, $2)
`
type UpdateIndexerStatsParams struct {
ClientVersion string
Network string
}
func (q *Queries) UpdateIndexerStats(ctx context.Context, arg UpdateIndexerStatsParams) error {
_, err := q.db.Exec(ctx, updateIndexerStats, arg.ClientVersion, arg.Network)
return err
}

View File

@@ -0,0 +1,61 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
package gen
import (
"github.com/jackc/pgx/v5/pgtype"
)
type BitcoinBlock struct {
BlockHeight int32
BlockHash string
Version int32
MerkleRoot string
PrevBlockHash string
Timestamp pgtype.Timestamptz
Bits int64
Nonce int64
}
type BitcoinIndexerDbVersion struct {
Id int64
Version int32
CreatedAt pgtype.Timestamptz
}
type BitcoinIndexerStat struct {
Id int64
ClientVersion string
Network string
CreatedAt pgtype.Timestamptz
}
type BitcoinTransaction struct {
TxHash string
Version int32
Locktime int64
BlockHeight int32
BlockHash string
Idx int32
}
type BitcoinTransactionTxin struct {
TxHash string
TxIdx int32
PrevoutTxHash string
PrevoutTxIdx int32
PrevoutPkscript pgtype.Text
Scriptsig string
Witness string
Sequence int64
}
type BitcoinTransactionTxout struct {
TxHash string
TxIdx int32
Pkscript string
Value int64
IsSpent bool
}

View File

@@ -0,0 +1,44 @@
package postgres
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/bitcoin/datagateway"
"github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres/gen"
"github.com/jackc/pgx/v5"
)
// Make sure Repository implements the IndexerInformationDataGateway interface
var _ datagateway.IndexerInformationDataGateway = (*Repository)(nil)
func (r *Repository) GetCurrentDBVersion(ctx context.Context) (int32, error) {
version, err := r.queries.GetCurrentDBVersion(ctx)
if err != nil {
return 0, errors.WithStack(err)
}
return version, nil
}
func (r *Repository) GetLatestIndexerStats(ctx context.Context) (string, common.Network, error) {
stats, err := r.queries.GetCurrentIndexerStats(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return "", "", errors.Join(errs.NotFound, err)
}
return "", "", errors.WithStack(err)
}
return stats.ClientVersion, common.Network(stats.Network), nil
}
func (r *Repository) UpdateIndexerStats(ctx context.Context, clientVersion string, network common.Network) error {
if err := r.queries.UpdateIndexerStats(ctx, gen.UpdateIndexerStatsParams{
ClientVersion: clientVersion,
Network: network.String(),
}); err != nil {
return errors.WithStack(err)
}
return nil
}

View File

@@ -0,0 +1,197 @@
package postgres
import (
"cmp"
"encoding/hex"
"slices"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres/gen"
"github.com/gaze-network/indexer-network/pkg/btcutils"
"github.com/jackc/pgx/v5/pgtype"
)
func mapBlockHeaderModelToType(src gen.BitcoinBlock) (types.BlockHeader, error) {
hash, err := chainhash.NewHashFromStr(src.BlockHash)
if err != nil {
return types.BlockHeader{}, errors.Join(errors.Wrap(err, "failed to parse block hash"), errs.InternalError)
}
prevHash, err := chainhash.NewHashFromStr(src.PrevBlockHash)
if err != nil {
return types.BlockHeader{}, errors.Join(errors.Wrap(err, "failed to parse prev block hash"), errs.InternalError)
}
merkleRoot, err := chainhash.NewHashFromStr(src.MerkleRoot)
if err != nil {
return types.BlockHeader{}, errors.Join(errors.Wrap(err, "failed to parse merkle root"), errs.InternalError)
}
return types.BlockHeader{
Hash: *hash,
Height: int64(src.BlockHeight),
Version: src.Version,
PrevBlock: *prevHash,
MerkleRoot: *merkleRoot,
Timestamp: src.Timestamp.Time,
Bits: uint32(src.Bits),
Nonce: uint32(src.Nonce),
}, nil
}
func mapBlocksTypeToParams(src []*types.Block) (gen.BatchInsertBlocksParams, gen.BatchInsertTransactionsParams, gen.BatchInsertTransactionTxOutsParams, gen.BatchInsertTransactionTxInsParams) {
blocks := gen.BatchInsertBlocksParams{
BlockHeightArr: make([]int32, 0, len(src)),
BlockHashArr: make([]string, 0, len(src)),
VersionArr: make([]int32, 0, len(src)),
MerkleRootArr: make([]string, 0, len(src)),
PrevBlockHashArr: make([]string, 0, len(src)),
TimestampArr: make([]pgtype.Timestamptz, 0, len(src)),
BitsArr: make([]int64, 0, len(src)),
NonceArr: make([]int64, 0, len(src)),
}
txs := gen.BatchInsertTransactionsParams{
TxHashArr: []string{},
VersionArr: []int32{},
LocktimeArr: []int64{},
BlockHeightArr: []int32{},
BlockHashArr: []string{},
IdxArr: []int32{},
}
txouts := gen.BatchInsertTransactionTxOutsParams{
TxHashArr: []string{},
TxIdxArr: []int32{},
PkscriptArr: []string{},
ValueArr: []int64{},
}
txins := gen.BatchInsertTransactionTxInsParams{
PrevoutTxHashArr: []string{},
PrevoutTxIdxArr: []int32{},
TxHashArr: []string{},
TxIdxArr: []int32{},
ScriptsigArr: []string{},
WitnessArr: []string{},
SequenceArr: []int32{},
}
for _, block := range src {
blockHash := block.Header.Hash.String()
// Batch insert blocks
blocks.BlockHeightArr = append(blocks.BlockHeightArr, int32(block.Header.Height))
blocks.BlockHashArr = append(blocks.BlockHashArr, blockHash)
blocks.VersionArr = append(blocks.VersionArr, block.Header.Version)
blocks.MerkleRootArr = append(blocks.MerkleRootArr, block.Header.MerkleRoot.String())
blocks.PrevBlockHashArr = append(blocks.PrevBlockHashArr, block.Header.PrevBlock.String())
blocks.TimestampArr = append(blocks.TimestampArr, pgtype.Timestamptz{
Time: block.Header.Timestamp,
Valid: true,
})
blocks.BitsArr = append(blocks.BitsArr, int64(block.Header.Bits))
blocks.NonceArr = append(blocks.NonceArr, int64(block.Header.Nonce))
for txIdx, srcTx := range block.Transactions {
txHash := srcTx.TxHash.String()
// Batch insert transactions
txs.TxHashArr = append(txs.TxHashArr, txHash)
txs.VersionArr = append(txs.VersionArr, srcTx.Version)
txs.LocktimeArr = append(txs.LocktimeArr, int64(srcTx.LockTime))
txs.BlockHeightArr = append(txs.BlockHeightArr, int32(block.Header.Height))
txs.BlockHashArr = append(txs.BlockHashArr, blockHash)
txs.IdxArr = append(txs.IdxArr, int32(txIdx))
// Batch insert txins
for idx, txin := range srcTx.TxIn {
var witness string
if len(txin.Witness) > 0 {
witness = btcutils.WitnessToString(txin.Witness)
}
txins.TxHashArr = append(txins.TxHashArr, txHash)
txins.TxIdxArr = append(txins.TxIdxArr, int32(idx))
txins.PrevoutTxHashArr = append(txins.PrevoutTxHashArr, txin.PreviousOutTxHash.String())
txins.PrevoutTxIdxArr = append(txins.PrevoutTxIdxArr, int32(txin.PreviousOutIndex))
txins.ScriptsigArr = append(txins.ScriptsigArr, hex.EncodeToString(txin.SignatureScript))
txins.WitnessArr = append(txins.WitnessArr, witness)
txins.SequenceArr = append(txins.SequenceArr, int32(txin.Sequence))
}
// Batch insert txouts
for idx, txout := range srcTx.TxOut {
txouts.TxHashArr = append(txouts.TxHashArr, txHash)
txouts.TxIdxArr = append(txouts.TxIdxArr, int32(idx))
txouts.PkscriptArr = append(txouts.PkscriptArr, hex.EncodeToString(txout.PkScript))
txouts.ValueArr = append(txouts.ValueArr, txout.Value)
}
}
}
return blocks, txs, txouts, txins
}
func mapTransactionModelToType(src gen.BitcoinTransaction, txInModel []gen.BitcoinTransactionTxin, txOutModels []gen.BitcoinTransactionTxout) (types.Transaction, error) {
blockHash, err := chainhash.NewHashFromStr(src.BlockHash)
if err != nil {
return types.Transaction{}, errors.Wrap(err, "failed to parse block hash")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return types.Transaction{}, errors.Wrap(err, "failed to parse tx hash")
}
// Sort txins and txouts by index (Asc)
slices.SortFunc(txOutModels, func(i, j gen.BitcoinTransactionTxout) int {
return cmp.Compare(i.TxIdx, j.TxIdx)
})
slices.SortFunc(txInModel, func(i, j gen.BitcoinTransactionTxin) int {
return cmp.Compare(i.TxIdx, j.TxIdx)
})
txIns := make([]*types.TxIn, 0, len(txInModel))
txOuts := make([]*types.TxOut, 0, len(txOutModels))
for _, txInModel := range txInModel {
scriptsig, err := hex.DecodeString(txInModel.Scriptsig)
if err != nil {
return types.Transaction{}, errors.Wrap(err, "failed to decode scriptsig")
}
prevoutTxHash, err := chainhash.NewHashFromStr(txInModel.PrevoutTxHash)
if err != nil {
return types.Transaction{}, errors.Wrap(err, "failed to parse prevout tx hash")
}
witness, err := btcutils.WitnessFromString(txInModel.Witness)
if err != nil {
return types.Transaction{}, errors.Wrap(err, "failed to parse witness from hex string")
}
txIns = append(txIns, &types.TxIn{
SignatureScript: scriptsig,
Witness: witness,
Sequence: uint32(txInModel.Sequence),
PreviousOutIndex: uint32(txInModel.PrevoutTxIdx),
PreviousOutTxHash: *prevoutTxHash,
})
}
for _, txOutModel := range txOutModels {
pkscript, err := hex.DecodeString(txOutModel.Pkscript)
if err != nil {
return types.Transaction{}, errors.Wrap(err, "failed to decode pkscript")
}
txOuts = append(txOuts, &types.TxOut{
PkScript: pkscript,
Value: txOutModel.Value,
})
}
return types.Transaction{
BlockHeight: int64(src.BlockHeight),
BlockHash: *blockHash,
Index: uint32(src.Idx),
TxHash: *txHash,
Version: src.Version,
LockTime: uint32(src.Locktime),
TxIn: txIns,
TxOut: txOuts,
}, nil
}

View File

@@ -0,0 +1,22 @@
package postgres
import (
"github.com/gaze-network/indexer-network/internal/postgres"
"github.com/gaze-network/indexer-network/modules/bitcoin/datagateway"
"github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres/gen"
)
// Make sure Repository implements the BitcoinDataGateway interface
var _ datagateway.BitcoinDataGateway = (*Repository)(nil)
type Repository struct {
db postgres.DB
queries *gen.Queries
}
func NewRepository(db postgres.DB) *Repository {
return &Repository{
db: db,
queries: gen.New(db),
}
}

View File

@@ -0,0 +1,35 @@
package postgres
import (
"context"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/jackc/pgx/v5"
)
func (r *Repository) GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error) {
model, err := r.queries.GetTransactionByHash(ctx, txHash.String())
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, errors.Join(errs.NotFound, err)
}
return nil, errors.Wrap(err, "failed to get transaction by hash")
}
txIns, err := r.queries.GetTransactionTxInsByTxHashes(ctx, []string{txHash.String()})
if err != nil {
return nil, errors.Wrap(err, "failed to get transaction txins by tx hashes")
}
txOuts, err := r.queries.GetTransactionTxOutsByTxHashes(ctx, []string{txHash.String()})
if err != nil && !errors.Is(err, pgx.ErrNoRows) {
return nil, errors.Wrap(err, "failed to get transaction txouts by tx hashes")
}
tx, err := mapTransactionModelToType(model, txIns, txOuts)
if err != nil {
return nil, errors.Wrap(err, "failed to map transaction model to type")
}
return &tx, nil
}

View File

11
modules/runes/api/api.go Normal file
View File

@@ -0,0 +1,11 @@
package api
import (
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/runes/api/httphandler"
"github.com/gaze-network/indexer-network/modules/runes/usecase"
)
func NewHTTPHandler(network common.Network, usecase *usecase.Usecase) *httphandler.HttpHandler {
return httphandler.New(network, usecase)
}

View File

@@ -0,0 +1,116 @@
package httphandler
import (
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getBalancesByAddressRequest struct {
Wallet string `params:"wallet"`
Id string `query:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getBalancesByAddressRequest) Validate() error {
var errList []error
if r.Wallet == "" {
errList = append(errList, errors.New("'wallet' is required"))
}
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type balance struct {
Amount uint128.Uint128 `json:"amount"`
Id runes.RuneId `json:"id"`
Name runes.SpacedRune `json:"name"`
Symbol string `json:"symbol"`
Decimals uint8 `json:"decimals"`
}
type getBalancesByAddressResult struct {
List []balance `json:"list"`
BlockHeight uint64 `json:"blockHeight"`
}
type getBalancesByAddressResponse = HttpResponse[getBalancesByAddressResult]
func (h *HttpHandler) GetBalancesByAddress(ctx *fiber.Ctx) (err error) {
var req getBalancesByAddressRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
pkScript, ok := resolvePkScript(h.network, req.Wallet)
if !ok {
return errs.NewPublicError("unable to resolve pkscript from \"wallet\"")
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
balances, err := h.usecase.GetBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByPkScript")
}
runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id)
if ok {
// filter out balances that don't match the requested rune id
for key := range balances {
if key != runeId {
delete(balances, key)
}
}
}
balanceRuneIds := lo.Keys(balances)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), balanceRuneIds)
if err != nil {
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
balanceList := make([]balance, 0, len(balances))
for id, b := range balances {
runeEntry := runeEntries[id]
balanceList = append(balanceList, balance{
Amount: b.Amount,
Id: id,
Name: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Decimals: runeEntry.Divisibility,
})
}
slices.SortFunc(balanceList, func(i, j balance) int {
return j.Amount.Cmp(i.Amount)
})
resp := getBalancesByAddressResponse{
Result: &getBalancesByAddressResult{
BlockHeight: blockHeight,
List: balanceList,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,139 @@
package httphandler
import (
"context"
"fmt"
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
"golang.org/x/sync/errgroup"
)
type getBalanceQuery struct {
Wallet string `json:"wallet"`
Id string `json:"id"`
BlockHeight uint64 `json:"blockHeight"`
}
type getBalancesByAddressBatchRequest struct {
Queries []getBalanceQuery `json:"queries"`
}
func (r getBalancesByAddressBatchRequest) Validate() error {
var errList []error
for _, query := range r.Queries {
if query.Wallet == "" {
errList = append(errList, errors.Errorf("queries[%d]: 'wallet' is required"))
}
if query.Id != "" && !isRuneIdOrRuneName(query.Id) {
errList = append(errList, errors.Errorf("queries[%d]: 'id' is not valid rune id or rune name"))
}
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type getBalancesByAddressBatchResult struct {
List []*getBalancesByAddressResult `json:"list"`
}
type getBalancesByAddressBatchResponse = HttpResponse[getBalancesByAddressBatchResult]
func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
var req getBalancesByAddressBatchRequest
if err := ctx.BodyParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
var latestBlockHeight uint64
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
latestBlockHeight = uint64(blockHeader.Height)
processQuery := func(ctx context.Context, query getBalanceQuery, queryIndex int) (*getBalancesByAddressResult, error) {
pkScript, ok := resolvePkScript(h.network, query.Wallet)
if !ok {
return nil, errs.NewPublicError(fmt.Sprintf("unable to resolve pkscript from \"queries[%d].wallet\"", queryIndex))
}
blockHeight := query.BlockHeight
if blockHeight == 0 {
blockHeight = latestBlockHeight
}
balances, err := h.usecase.GetBalancesByPkScript(ctx, pkScript, blockHeight)
if err != nil {
return nil, errors.Wrap(err, "error during GetBalancesByPkScript")
}
runeId, ok := h.resolveRuneId(ctx, query.Id)
if ok {
// filter out balances that don't match the requested rune id
for key := range balances {
if key != runeId {
delete(balances, key)
}
}
}
balanceRuneIds := lo.Keys(balances)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx, balanceRuneIds)
if err != nil {
return nil, errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
balanceList := make([]balance, 0, len(balances))
for id, b := range balances {
runeEntry := runeEntries[id]
balanceList = append(balanceList, balance{
Amount: b.Amount,
Id: id,
Name: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Decimals: runeEntry.Divisibility,
})
}
slices.SortFunc(balanceList, func(i, j balance) int {
return j.Amount.Cmp(i.Amount)
})
result := getBalancesByAddressResult{
BlockHeight: blockHeight,
List: balanceList,
}
return &result, nil
}
results := make([]*getBalancesByAddressResult, len(req.Queries))
eg, ectx := errgroup.WithContext(ctx.UserContext())
for i, query := range req.Queries {
i := i
query := query
eg.Go(func() error {
result, err := processQuery(ectx, query, i)
if err != nil {
return errors.Wrapf(err, "error during processQuery for query %d", i)
}
results[i] = result
return nil
})
}
if err := eg.Wait(); err != nil {
return errors.WithStack(err)
}
resp := getBalancesByAddressBatchResponse{
Result: &getBalancesByAddressBatchResult{
List: results,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,50 @@
package httphandler
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gofiber/fiber/v2"
)
var startingBlockHeader = map[common.Network]types.BlockHeader{
common.NetworkMainnet: {
Height: 839999,
Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000172014ba58d66455762add0512355ad651207918494ab")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000000000001dcce6ce7c8a45872cafd1fb04732b447a14a91832591")),
},
common.NetworkTestnet: {
Height: 2583200,
Hash: *utils.Must(chainhash.NewHashFromStr("000000000006c5f0dfcd9e0e81f27f97a87aef82087ffe69cd3c390325bb6541")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000668f3bafac992f53424774515440cb47e1cb9e73af3f496139e28")),
},
}
type getCurrentBlockResult struct {
Hash string `json:"hash"`
Height int64 `json:"height"`
}
type getCurrentBlockResponse = HttpResponse[getCurrentBlockResult]
func (h *HttpHandler) GetCurrentBlock(ctx *fiber.Ctx) (err error) {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeader = startingBlockHeader[h.network]
}
resp := getCurrentBlockResponse{
Result: &getCurrentBlockResult{
Hash: blockHeader.Hash.String(),
Height: blockHeader.Height,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,114 @@
package httphandler
import (
"encoding/hex"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
"github.com/shopspring/decimal"
)
type getHoldersRequest struct {
Id string `params:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getHoldersRequest) Validate() error {
var errList []error
if !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type holdingBalance struct {
Address string `json:"address"`
PkScript string `json:"pkScript"`
Amount uint128.Uint128 `json:"amount"`
Percent float64 `json:"percent"`
}
type getHoldersResult struct {
BlockHeight uint64 `json:"blockHeight"`
TotalSupply uint128.Uint128 `json:"totalSupply"`
MintedAmount uint128.Uint128 `json:"mintedAmount"`
List []holdingBalance `json:"list"`
}
type getHoldersResponse = HttpResponse[getHoldersResult]
func (h *HttpHandler) GetHolders(ctx *fiber.Ctx) (err error) {
var req getHoldersRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
var runeId runes.RuneId
if req.Id != "" {
var ok bool
runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id)
if !ok {
return errs.NewPublicError("unable to resolve rune id from \"id\"")
}
}
runeEntry, err := h.usecase.GetRuneEntryByRuneIdAndHeight(ctx.UserContext(), runeId, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetHoldersByHeight")
}
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByRuneId")
}
totalSupply, err := runeEntry.Supply()
if err != nil {
return errors.Wrap(err, "cannot get total supply of rune")
}
mintedAmount, err := runeEntry.MintedAmount()
if err != nil {
return errors.Wrap(err, "cannot get minted amount of rune")
}
list := make([]holdingBalance, 0, len(holdingBalances))
for _, balance := range holdingBalances {
address := addressFromPkScript(balance.PkScript, h.network)
amount := decimal.NewFromBigInt(balance.Amount.Big(), 0)
percent := amount.Div(decimal.NewFromBigInt(totalSupply.Big(), 0))
list = append(list, holdingBalance{
Address: address,
PkScript: hex.EncodeToString(balance.PkScript),
Amount: balance.Amount,
Percent: percent.InexactFloat64(),
})
}
resp := getHoldersResponse{
Result: &getHoldersResult{
BlockHeight: blockHeight,
TotalSupply: totalSupply,
MintedAmount: mintedAmount,
List: list,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,165 @@
package httphandler
import (
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getTokenInfoRequest struct {
Id string `params:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getTokenInfoRequest) Validate() error {
var errList []error
if !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type entryTerms struct {
Amount uint128.Uint128 `json:"amount"`
Cap uint128.Uint128 `json:"cap"`
HeightStart *uint64 `json:"heightStart"`
HeightEnd *uint64 `json:"heightEnd"`
OffsetStart *uint64 `json:"offsetStart"`
OffsetEnd *uint64 `json:"offsetEnd"`
}
type entry struct {
Divisibility uint8 `json:"divisibility"`
Premine uint128.Uint128 `json:"premine"`
Rune runes.Rune `json:"rune"`
Spacers uint32 `json:"spacers"`
Symbol string `json:"symbol"`
Terms entryTerms `json:"terms"`
Turbo bool `json:"turbo"`
}
type tokenInfoExtend struct {
Entry entry `json:"entry"`
}
type getTokenInfoResult struct {
Id runes.RuneId `json:"id"`
Name runes.SpacedRune `json:"name"` // rune name
Symbol string `json:"symbol"`
TotalSupply uint128.Uint128 `json:"totalSupply"`
CirculatingSupply uint128.Uint128 `json:"circulatingSupply"`
MintedAmount uint128.Uint128 `json:"mintedAmount"`
BurnedAmount uint128.Uint128 `json:"burnedAmount"`
Decimals uint8 `json:"decimals"`
DeployedAt uint64 `json:"deployedAt"` // unix timestamp
DeployedAtHeight uint64 `json:"deployedAtHeight"`
CompletedAt *uint64 `json:"completedAt"` // unix timestamp
CompletedAtHeight *uint64 `json:"completedAtHeight"`
HoldersCount int `json:"holdersCount"`
Extend tokenInfoExtend `json:"extend"`
}
type getTokenInfoResponse = HttpResponse[getTokenInfoResult]
func (h *HttpHandler) GetTokenInfo(ctx *fiber.Ctx) (err error) {
var req getTokenInfoRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
var runeId runes.RuneId
if req.Id != "" {
var ok bool
runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id)
if !ok {
return errs.NewPublicError("unable to resolve rune id from \"id\"")
}
}
runeEntry, err := h.usecase.GetRuneEntryByRuneIdAndHeight(ctx.UserContext(), runeId, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetTokenInfoByHeight")
}
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByRuneId")
}
holdingBalances = lo.Filter(holdingBalances, func(b *entity.Balance, _ int) bool {
return !b.Amount.IsZero()
})
// sort by amount descending
slices.SortFunc(holdingBalances, func(i, j *entity.Balance) int {
return j.Amount.Cmp(i.Amount)
})
totalSupply, err := runeEntry.Supply()
if err != nil {
return errors.Wrap(err, "cannot get total supply of rune")
}
mintedAmount, err := runeEntry.MintedAmount()
if err != nil {
return errors.Wrap(err, "cannot get minted amount of rune")
}
circulatingSupply := mintedAmount.Sub(runeEntry.BurnedAmount)
terms := lo.FromPtr(runeEntry.Terms)
resp := getTokenInfoResponse{
Result: &getTokenInfoResult{
Id: runeId,
Name: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
TotalSupply: totalSupply,
CirculatingSupply: circulatingSupply,
MintedAmount: mintedAmount,
BurnedAmount: runeEntry.BurnedAmount,
Decimals: runeEntry.Divisibility,
DeployedAt: uint64(runeEntry.EtchedAt.Unix()),
DeployedAtHeight: runeEntry.EtchingBlock,
CompletedAt: lo.Ternary(runeEntry.CompletedAt.IsZero(), nil, lo.ToPtr(uint64(runeEntry.CompletedAt.Unix()))),
CompletedAtHeight: runeEntry.CompletedAtHeight,
HoldersCount: len(holdingBalances),
Extend: tokenInfoExtend{
Entry: entry{
Divisibility: runeEntry.Divisibility,
Premine: runeEntry.Premine,
Rune: runeEntry.SpacedRune.Rune,
Spacers: runeEntry.SpacedRune.Spacers,
Symbol: string(runeEntry.Symbol),
Terms: entryTerms{
Amount: lo.FromPtr(terms.Amount),
Cap: lo.FromPtr(terms.Cap),
HeightStart: terms.HeightStart,
HeightEnd: terms.HeightEnd,
OffsetStart: terms.OffsetStart,
OffsetEnd: terms.OffsetEnd,
},
Turbo: runeEntry.Turbo,
},
},
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,274 @@
package httphandler
import (
"encoding/hex"
"slices"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getTransactionsRequest struct {
Wallet string `query:"wallet"`
Id string `query:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getTransactionsRequest) Validate() error {
var errList []error
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type txInputOutput struct {
PkScript string `json:"pkScript"`
Address string `json:"address"`
Id runes.RuneId `json:"id"`
Amount uint128.Uint128 `json:"amount"`
Decimals uint8 `json:"decimals"`
Index uint32 `json:"index"`
}
type terms struct {
Amount *uint128.Uint128 `json:"amount"`
Cap *uint128.Uint128 `json:"cap"`
HeightStart *uint64 `json:"heightStart"`
HeightEnd *uint64 `json:"heightEnd"`
OffsetStart *uint64 `json:"offsetStart"`
OffsetEnd *uint64 `json:"offsetEnd"`
}
type etching struct {
Divisibility *uint8 `json:"divisibility"`
Premine *uint128.Uint128 `json:"premine"`
Rune *runes.Rune `json:"rune"`
Spacers *uint32 `json:"spacers"`
Symbol *string `json:"symbol"`
Terms *terms `json:"terms"`
Turbo bool `json:"turbo"`
}
type edict struct {
Id runes.RuneId `json:"id"`
Amount uint128.Uint128 `json:"amount"`
Output int `json:"output"`
}
type runestone struct {
Cenotaph bool `json:"cenotaph"`
Flaws []string `json:"flaws"`
Etching *etching `json:"etching"`
Edicts []edict `json:"edicts"`
Mint *runes.RuneId `json:"mint"`
Pointer *uint64 `json:"pointer"`
}
type runeTransactionExtend struct {
RuneEtched bool `json:"runeEtched"`
Runestone *runestone `json:"runestone"`
}
type amountWithDecimal struct {
Amount uint128.Uint128 `json:"amount"`
Decimals uint8 `json:"decimals"`
}
type transaction struct {
TxHash chainhash.Hash `json:"txHash"`
BlockHeight uint64 `json:"blockHeight"`
Index uint32 `json:"index"`
Timestamp int64 `json:"timestamp"`
Inputs []txInputOutput `json:"inputs"`
Outputs []txInputOutput `json:"outputs"`
Mints map[string]amountWithDecimal `json:"mints"`
Burns map[string]amountWithDecimal `json:"burns"`
Extend runeTransactionExtend `json:"extend"`
}
type getTransactionsResult struct {
List []transaction `json:"list"`
}
type getTransactionsResponse = HttpResponse[getTransactionsResult]
func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
var req getTransactionsRequest
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
var pkScript []byte
if req.Wallet != "" {
var ok bool
pkScript, ok = resolvePkScript(h.network, req.Wallet)
if !ok {
return errs.NewPublicError("unable to resolve pkscript from \"wallet\"")
}
}
var runeId runes.RuneId
if req.Id != "" {
var ok bool
runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id)
if !ok {
return errs.NewPublicError("unable to resolve rune id from \"id\"")
}
}
blockHeight := req.BlockHeight
// set blockHeight to the latest block height blockHeight, pkScript, and runeId are not provided
if blockHeight == 0 && pkScript == nil && runeId == (runes.RuneId{}) {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
txs, err := h.usecase.GetRuneTransactions(ctx.UserContext(), pkScript, runeId, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetRuneTransactions")
}
var allRuneIds []runes.RuneId
for _, tx := range txs {
for id := range tx.Mints {
allRuneIds = append(allRuneIds, id)
}
for id := range tx.Burns {
allRuneIds = append(allRuneIds, id)
}
for _, input := range tx.Inputs {
allRuneIds = append(allRuneIds, input.RuneId)
}
for _, output := range tx.Outputs {
allRuneIds = append(allRuneIds, output.RuneId)
}
}
allRuneIds = lo.Uniq(allRuneIds)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), allRuneIds)
if err != nil {
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
txList := make([]transaction, 0, len(txs))
for _, tx := range txs {
respTx := transaction{
TxHash: tx.Hash,
BlockHeight: tx.BlockHeight,
Index: tx.Index,
Timestamp: tx.Timestamp.Unix(),
Inputs: make([]txInputOutput, 0, len(tx.Inputs)),
Outputs: make([]txInputOutput, 0, len(tx.Outputs)),
Mints: make(map[string]amountWithDecimal, len(tx.Mints)),
Burns: make(map[string]amountWithDecimal, len(tx.Burns)),
Extend: runeTransactionExtend{
RuneEtched: tx.RuneEtched,
Runestone: nil,
},
}
for _, input := range tx.Inputs {
address := addressFromPkScript(input.PkScript, h.network)
respTx.Inputs = append(respTx.Inputs, txInputOutput{
PkScript: hex.EncodeToString(input.PkScript),
Address: address,
Id: input.RuneId,
Amount: input.Amount,
Decimals: runeEntries[input.RuneId].Divisibility,
Index: input.Index,
})
}
for _, output := range tx.Outputs {
address := addressFromPkScript(output.PkScript, h.network)
respTx.Outputs = append(respTx.Outputs, txInputOutput{
PkScript: hex.EncodeToString(output.PkScript),
Address: address,
Id: output.RuneId,
Amount: output.Amount,
Decimals: runeEntries[output.RuneId].Divisibility,
Index: output.Index,
})
}
for id, amount := range tx.Mints {
respTx.Mints[id.String()] = amountWithDecimal{
Amount: amount,
Decimals: runeEntries[id].Divisibility,
}
}
for id, amount := range tx.Burns {
respTx.Burns[id.String()] = amountWithDecimal{
Amount: amount,
Decimals: runeEntries[id].Divisibility,
}
}
if tx.Runestone != nil {
var e *etching
if tx.Runestone.Etching != nil {
var symbol *string
if tx.Runestone.Etching.Symbol != nil {
symbol = lo.ToPtr(string(*tx.Runestone.Etching.Symbol))
}
var t *terms
if tx.Runestone.Etching.Terms != nil {
t = &terms{
Amount: tx.Runestone.Etching.Terms.Amount,
Cap: tx.Runestone.Etching.Terms.Cap,
HeightStart: tx.Runestone.Etching.Terms.HeightStart,
HeightEnd: tx.Runestone.Etching.Terms.HeightEnd,
OffsetStart: tx.Runestone.Etching.Terms.OffsetStart,
OffsetEnd: tx.Runestone.Etching.Terms.OffsetEnd,
}
}
e = &etching{
Divisibility: tx.Runestone.Etching.Divisibility,
Premine: tx.Runestone.Etching.Premine,
Rune: tx.Runestone.Etching.Rune,
Spacers: tx.Runestone.Etching.Spacers,
Symbol: symbol,
Terms: t,
Turbo: tx.Runestone.Etching.Turbo,
}
}
respTx.Extend.Runestone = &runestone{
Cenotaph: tx.Runestone.Cenotaph,
Flaws: lo.Ternary(tx.Runestone.Cenotaph, tx.Runestone.Flaws.CollectAsString(), nil),
Etching: e,
Edicts: lo.Map(tx.Runestone.Edicts, func(ed runes.Edict, _ int) edict {
return edict{
Id: ed.Id,
Amount: ed.Amount,
Output: ed.Output,
}
}),
Mint: tx.Runestone.Mint,
Pointer: tx.Runestone.Pointer,
}
}
txList = append(txList, respTx)
}
// sort by block height ASC, then index ASC
slices.SortFunc(txList, func(t1, t2 transaction) int {
if t1.BlockHeight != t2.BlockHeight {
return int(t1.BlockHeight - t2.BlockHeight)
}
return int(t1.Index - t2.Index)
})
resp := getTransactionsResponse{
Result: &getTransactionsResult{
List: txList,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,146 @@
package httphandler
import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getUTXOsByAddressRequest struct {
Wallet string `params:"wallet"`
Id string `query:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getUTXOsByAddressRequest) Validate() error {
var errList []error
if r.Wallet == "" {
errList = append(errList, errors.New("'wallet' is required"))
}
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type runeBalance struct {
RuneId runes.RuneId `json:"runeId"`
Rune runes.SpacedRune `json:"rune"`
Symbol string `json:"symbol"`
Amount uint128.Uint128 `json:"amount"`
Divisibility uint8 `json:"divisibility"`
}
type utxoExtend struct {
Runes []runeBalance `json:"runes"`
}
type utxo struct {
TxHash chainhash.Hash `json:"txHash"`
OutputIndex uint32 `json:"outputIndex"`
Extend utxoExtend `json:"extend"`
}
type getUTXOsByAddressResult struct {
List []utxo `json:"list"`
BlockHeight uint64 `json:"blockHeight"`
}
type getUTXOsByAddressResponse = HttpResponse[getUTXOsByAddressResult]
func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
var req getUTXOsByAddressRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
pkScript, ok := resolvePkScript(h.network, req.Wallet)
if !ok {
return errs.NewPublicError("unable to resolve pkscript from \"wallet\"")
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
outPointBalances, err := h.usecase.GetUnspentOutPointBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByPkScript")
}
outPointBalanceRuneIds := lo.Map(outPointBalances, func(outPointBalance *entity.OutPointBalance, _ int) runes.RuneId {
return outPointBalance.RuneId
})
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), outPointBalanceRuneIds)
if err != nil {
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
groupedBalances := lo.GroupBy(outPointBalances, func(outPointBalance *entity.OutPointBalance) wire.OutPoint {
return outPointBalance.OutPoint
})
utxoList := make([]utxo, 0, len(groupedBalances))
for outPoint, balances := range groupedBalances {
runeBalances := make([]runeBalance, 0, len(balances))
for _, balance := range balances {
runeEntry := runeEntries[balance.RuneId]
runeBalances = append(runeBalances, runeBalance{
RuneId: balance.RuneId,
Rune: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Amount: balance.Amount,
Divisibility: runeEntry.Divisibility,
})
}
utxoList = append(utxoList, utxo{
TxHash: outPoint.Hash,
OutputIndex: outPoint.Index,
Extend: utxoExtend{
Runes: runeBalances,
},
})
}
// filter by req.Id if exists
{
runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id)
if ok {
utxoList = lo.Filter(utxoList, func(u utxo, _ int) bool {
for _, runeBalance := range u.Extend.Runes {
if runeBalance.RuneId == runeId {
return true
}
}
return false
})
}
}
resp := getUTXOsByAddressResponse{
Result: &getUTXOsByAddressResult{
BlockHeight: blockHeight,
List: utxoList,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,114 @@
package httphandler
import (
"context"
"encoding/hex"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/modules/runes/usecase"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
)
type HttpHandler struct {
usecase *usecase.Usecase
network common.Network
}
func New(network common.Network, usecase *usecase.Usecase) *HttpHandler {
return &HttpHandler{
usecase: usecase,
network: network,
}
}
type HttpResponse[T any] struct {
Error *string `json:"error"`
Result *T `json:"result,omitempty"`
}
func resolvePkScript(network common.Network, wallet string) ([]byte, bool) {
if wallet == "" {
return nil, false
}
defaultNet := func() *chaincfg.Params {
switch network {
case common.NetworkMainnet:
return &chaincfg.MainNetParams
case common.NetworkTestnet:
return &chaincfg.TestNet3Params
}
panic("invalid network")
}()
// attempt to parse as address
address, err := btcutil.DecodeAddress(wallet, defaultNet)
if err == nil {
pkScript, err := txscript.PayToAddrScript(address)
if err != nil {
return nil, false
}
return pkScript, true
}
// attempt to parse as pkscript
pkScript, err := hex.DecodeString(wallet)
if err != nil {
return nil, false
}
return pkScript, true
}
// TODO: extract this function somewhere else
// addressFromPkScript returns the address from the given pkScript. If the pkScript is invalid or not standard, it returns empty string.
func addressFromPkScript(pkScript []byte, network common.Network) string {
_, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, network.ChainParams())
if err != nil {
logger.Debug("unable to extract address from pkscript", slogx.Error(err))
return ""
}
if len(addrs) != 1 {
logger.Debug("invalid number of addresses extracted from pkscript. Expected only 1.", slogx.Int("numAddresses", len(addrs)))
return ""
}
return addrs[0].EncodeAddress()
}
func (h *HttpHandler) resolveRuneId(ctx context.Context, id string) (runes.RuneId, bool) {
if id == "" {
return runes.RuneId{}, false
}
// attempt to parse as rune id
runeId, err := runes.NewRuneIdFromString(id)
if err == nil {
return runeId, true
}
// attempt to parse as rune
rune, err := runes.NewRuneFromString(id)
if err == nil {
runeId, err := h.usecase.GetRuneIdFromRune(ctx, rune)
if err != nil {
return runes.RuneId{}, false
}
return runeId, true
}
return runes.RuneId{}, false
}
func isRuneIdOrRuneName(id string) bool {
if _, err := runes.NewRuneIdFromString(id); err == nil {
return true
}
if _, err := runes.NewRuneFromString(id); err == nil {
return true
}
return false
}

View File

@@ -0,0 +1,18 @@
package httphandler
import (
"github.com/gofiber/fiber/v2"
)
func (h *HttpHandler) Mount(router fiber.Router) error {
r := router.Group("/v2/runes")
r.Post("/balances/wallet/batch", h.GetBalancesByAddressBatch)
r.Get("/balances/wallet/:wallet", h.GetBalancesByAddress)
r.Get("/transactions", h.GetTransactions)
r.Get("/holders/:id", h.GetHolders)
r.Get("/info/:id", h.GetTokenInfo)
r.Get("/utxos/wallet/:wallet", h.GetUTXOsByAddress)
r.Get("/block", h.GetCurrentBlock)
return nil
}

View File

@@ -0,0 +1,10 @@
package config
import "github.com/gaze-network/indexer-network/internal/postgres"
type Config struct {
Datasource string `mapstructure:"datasource"` // Datasource to fetch bitcoin data for Meta-Protocol e.g. `bitcoin-node` | `database`
Database string `mapstructure:"database"` // Database to store runes data.
APIHandlers []string `mapstructure:"api_handlers"` // List of API handlers to enable. (e.g. `http`)
Postgres postgres.Config `mapstructure:"postgres"`
}

View File

@@ -0,0 +1,27 @@
package runes
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/core/types"
)
const (
Version = "v0.0.1"
DBVersion = 1
EventHashVersion = 1
)
var startingBlockHeader = map[common.Network]types.BlockHeader{
common.NetworkMainnet: {
Height: 839999,
Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000172014ba58d66455762add0512355ad651207918494ab")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000000000001dcce6ce7c8a45872cafd1fb04732b447a14a91832591")),
},
common.NetworkTestnet: {
Height: 2583200,
Hash: *utils.Must(chainhash.NewHashFromStr("000000000006c5f0dfcd9e0e81f27f97a87aef82087ffe69cd3c390325bb6541")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000668f3bafac992f53424774515440cb47e1cb9e73af3f496139e28")),
},
}

View File

@@ -0,0 +1,14 @@
BEGIN;
DROP TABLE IF EXISTS "runes_indexer_stats";
DROP TABLE IF EXISTS "runes_indexer_db_version";
DROP TABLE IF EXISTS "runes_processor_state";
DROP TABLE IF EXISTS "runes_indexed_blocks";
DROP TABLE IF EXISTS "runes_entries";
DROP TABLE IF EXISTS "runes_entry_states";
DROP TABLE IF EXISTS "runes_transactions";
DROP TABLE IF EXISTS "runes_runestones";
DROP TABLE IF EXISTS "runes_outpoint_balances";
DROP TABLE IF EXISTS "runes_balances";
COMMIT;

View File

@@ -0,0 +1,122 @@
BEGIN;
-- Indexer Client Information
CREATE TABLE IF NOT EXISTS "runes_indexer_stats" (
"id" BIGSERIAL PRIMARY KEY,
"client_version" TEXT NOT NULL,
"network" TEXT NOT NULL,
"created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS "runes_indexer_state" (
"id" BIGSERIAL PRIMARY KEY,
"db_version" INT NOT NULL,
"event_hash_version" INT NOT NULL,
"created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS runes_indexer_state_created_at_idx ON "runes_indexer_state" USING BTREE ("created_at" DESC);
-- Runes data
CREATE TABLE IF NOT EXISTS "runes_indexed_blocks" (
"height" INT NOT NULL PRIMARY KEY,
"hash" TEXT NOT NULL,
"prev_hash" TEXT NOT NULL,
"event_hash" TEXT NOT NULL,
"cumulative_event_hash" TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS "runes_entries" (
"rune_id" TEXT NOT NULL PRIMARY KEY,
"number" BIGINT NOT NULL, -- sequential number of the rune starting from 0
"rune" TEXT NOT NULL,
"spacers" INT NOT NULL,
"premine" DECIMAL NOT NULL,
"symbol" INT NOT NULL,
"divisibility" SMALLINT NOT NULL,
"terms" BOOLEAN NOT NULL, -- if true, then minting term exists for this entry
"terms_amount" DECIMAL,
"terms_cap" DECIMAL,
"terms_height_start" INT,
"terms_height_end" INT,
"terms_offset_start" INT,
"terms_offset_end" INT,
"turbo" BOOLEAN NOT NULL,
"etching_block" INT NOT NULL,
"etching_tx_hash" TEXT NOT NULL,
"etched_at" TIMESTAMP NOT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS runes_entries_rune_idx ON "runes_entries" USING BTREE ("rune");
CREATE UNIQUE INDEX IF NOT EXISTS runes_entries_number_idx ON "runes_entries" USING BTREE ("number");
CREATE TABLE IF NOT EXISTS "runes_entry_states" (
"rune_id" TEXT NOT NULL,
"block_height" INT NOT NULL,
"mints" DECIMAL NOT NULL,
"burned_amount" DECIMAL NOT NULL,
"completed_at" TIMESTAMP,
"completed_at_height" INT,
PRIMARY KEY ("rune_id", "block_height")
);
CREATE TABLE IF NOT EXISTS "runes_transactions" (
"hash" TEXT NOT NULL PRIMARY KEY,
"block_height" INT NOT NULL,
"index" INT NOT NULL,
"timestamp" TIMESTAMP NOT NULL,
"inputs" JSONB NOT NULL,
"outputs" JSONB NOT NULL,
"mints" JSONB NOT NULL,
"burns" JSONB NOT NULL,
"rune_etched" BOOLEAN NOT NULL
);
CREATE INDEX IF NOT EXISTS runes_transactions_block_height_idx ON "runes_transactions" USING BTREE ("block_height");
CREATE INDEX IF NOT EXISTS runes_transactions_jsonb_idx ON "runes_transactions" USING GIN ("inputs", "outputs", "mints", "burns");
CREATE TABLE IF NOT EXISTS "runes_runestones" (
"tx_hash" TEXT NOT NULL PRIMARY KEY,
"block_height" INT NOT NULL,
"etching" BOOLEAN NOT NULL,
"etching_divisibility" SMALLINT,
"etching_premine" DECIMAL,
"etching_rune" TEXT,
"etching_spacers" INT,
"etching_symbol" INT,
"etching_terms" BOOLEAN,
"etching_terms_amount" DECIMAL,
"etching_terms_cap" DECIMAL,
"etching_terms_height_start" INT,
"etching_terms_height_end" INT,
"etching_terms_offset_start" INT,
"etching_terms_offset_end" INT,
"etching_turbo" BOOLEAN,
"edicts" JSONB NOT NULL DEFAULT '[]',
"mint" TEXT,
"pointer" INT,
"cenotaph" BOOLEAN NOT NULL,
"flaws" INT NOT NULL
);
CREATE TABLE IF NOT EXISTS "runes_outpoint_balances" (
"rune_id" TEXT NOT NULL,
"pkscript" TEXT NOT NULL,
"tx_hash" TEXT NOT NULL,
"tx_idx" INT NOT NULL, -- output index
"amount" DECIMAL NOT NULL,
"block_height" INT NOT NULL, -- block height when this output was created
"spent_height" INT, -- block height when this output was spent
PRIMARY KEY ("rune_id", "tx_hash", "tx_idx")
);
CREATE INDEX IF NOT EXISTS runes_outpoint_balances_tx_hash_tx_idx_idx ON "runes_outpoint_balances" USING BTREE ("tx_hash", "tx_idx");
CREATE INDEX IF NOT EXISTS runes_outpoint_balances_pkscript_block_height_spent_height_idx ON "runes_outpoint_balances" USING BTREE ("pkscript", "block_height", "spent_height");
CREATE TABLE IF NOT EXISTS "runes_balances" (
"pkscript" TEXT NOT NULL,
"block_height" INT NOT NULL,
"rune_id" TEXT NOT NULL,
"amount" DECIMAL NOT NULL,
PRIMARY KEY ("pkscript", "rune_id", "block_height")
);
COMMIT;

View File

@@ -0,0 +1,118 @@
-- name: GetBalancesByPkScript :many
WITH balances AS (
SELECT DISTINCT ON (rune_id) * FROM runes_balances WHERE pkscript = $1 AND block_height <= $2 ORDER BY rune_id, block_height DESC
)
SELECT * FROM balances WHERE amount > 0;
-- name: GetBalancesByRuneId :many
WITH balances AS (
SELECT DISTINCT ON (pkscript) * FROM runes_balances WHERE rune_id = $1 AND block_height <= $2 ORDER BY pkscript, block_height DESC
)
SELECT * FROM balances WHERE amount > 0;
-- name: GetBalanceByPkScriptAndRuneId :one
SELECT * FROM runes_balances WHERE pkscript = $1 AND rune_id = $2 AND block_height <= $3 ORDER BY block_height DESC LIMIT 1;
-- name: GetOutPointBalancesAtOutPoint :many
SELECT * FROM runes_outpoint_balances WHERE tx_hash = $1 AND tx_idx = $2;
-- name: GetUnspentOutPointBalancesByPkScript :many
SELECT * FROM runes_outpoint_balances WHERE pkscript = @pkScript AND block_height <= @block_height AND (spent_height IS NULL OR spent_height > @block_height);
-- name: GetRuneEntriesByRuneIds :many
WITH states AS (
-- select latest state
SELECT DISTINCT ON (rune_id) * FROM runes_entry_states WHERE rune_id = ANY(@rune_ids::text[]) ORDER BY rune_id, block_height DESC
)
SELECT * FROM runes_entries
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
WHERE runes_entries.rune_id = ANY(@rune_ids::text[]);
-- name: GetRuneEntriesByRuneIdsAndHeight :many
WITH states AS (
-- select latest state
SELECT DISTINCT ON (rune_id) * FROM runes_entry_states WHERE rune_id = ANY(@rune_ids::text[]) AND block_height <= @height ORDER BY rune_id, block_height DESC
)
SELECT * FROM runes_entries
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
WHERE runes_entries.rune_id = ANY(@rune_ids::text[]) AND etching_block <= @height;
-- name: GetRuneIdFromRune :one
SELECT rune_id FROM runes_entries WHERE rune = $1;
-- name: GetRuneTransactions :many
SELECT * FROM runes_transactions
LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash
WHERE (
@filter_pk_script::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
OR runes_transactions.outputs @> @pk_script_param::JSONB
OR runes_transactions.inputs @> @pk_script_param::JSONB
) AND (
@filter_rune_id::BOOLEAN = FALSE -- if @filter_rune_id is TRUE, apply rune_id filter
OR runes_transactions.outputs @> @rune_id_param::JSONB
OR runes_transactions.inputs @> @rune_id_param::JSONB
OR runes_transactions.mints ? @rune_id
OR runes_transactions.burns ? @rune_id
OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = @rune_id_block_height AND runes_transactions.index = @rune_id_tx_index)
) AND (
@block_height::INT = 0 OR runes_transactions.block_height = @block_height::INT -- if @block_height > 0, apply block_height filter
);
-- name: CountRuneEntries :one
SELECT COUNT(*) FROM runes_entries;
-- name: CreateRuneEntry :exec
INSERT INTO runes_entries (rune_id, rune, number, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18);
-- name: CreateRuneEntryState :exec
INSERT INTO runes_entry_states (rune_id, block_height, mints, burned_amount, completed_at, completed_at_height) VALUES ($1, $2, $3, $4, $5, $6);
-- name: CreateRuneTransaction :exec
INSERT INTO runes_transactions (hash, block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9);
-- name: CreateRunestone :exec
INSERT INTO runes_runestones (tx_hash, block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21);
-- name: CreateOutPointBalances :batchexec
INSERT INTO runes_outpoint_balances (rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height) VALUES ($1, $2, $3, $4, $5, $6, $7);
-- name: SpendOutPointBalances :exec
UPDATE runes_outpoint_balances SET spent_height = $1 WHERE tx_hash = $2 AND tx_idx = $3;
-- name: CreateRuneBalanceAtBlock :batchexec
INSERT INTO runes_balances (pkscript, block_height, rune_id, amount) VALUES ($1, $2, $3, $4);
-- name: GetLatestIndexedBlock :one
SELECT * FROM runes_indexed_blocks ORDER BY height DESC LIMIT 1;
-- name: GetIndexedBlockByHeight :one
SELECT * FROM runes_indexed_blocks WHERE height = $1;
-- name: CreateIndexedBlock :exec
INSERT INTO runes_indexed_blocks (hash, height, prev_hash, event_hash, cumulative_event_hash) VALUES ($1, $2, $3, $4, $5);
-- name: DeleteIndexedBlockSinceHeight :exec
DELETE FROM runes_indexed_blocks WHERE height >= $1;
-- name: DeleteRuneEntriesSinceHeight :exec
DELETE FROM runes_entries WHERE etching_block >= $1;
-- name: DeleteRuneEntryStatesSinceHeight :exec
DELETE FROM runes_entry_states WHERE block_height >= $1;
-- name: DeleteRuneTransactionsSinceHeight :exec
DELETE FROM runes_transactions WHERE block_height >= $1;
-- name: DeleteRunestonesSinceHeight :exec
DELETE FROM runes_runestones WHERE block_height >= $1;
-- name: DeleteOutPointBalancesSinceHeight :exec
DELETE FROM runes_outpoint_balances WHERE block_height >= $1;
-- name: UnspendOutPointBalancesSinceHeight :exec
UPDATE runes_outpoint_balances SET spent_height = NULL WHERE spent_height >= $1;
-- name: DeleteRuneBalancesSinceHeight :exec
DELETE FROM runes_balances WHERE block_height >= $1;

View File

@@ -0,0 +1,11 @@
-- name: GetLatestIndexerState :one
SELECT * FROM runes_indexer_state ORDER BY created_at DESC LIMIT 1;
-- name: SetIndexerState :exec
INSERT INTO runes_indexer_state (db_version, event_hash_version) VALUES ($1, $2);
-- name: GetLatestIndexerStats :one
SELECT "client_version", "network" FROM runes_indexer_stats ORDER BY id DESC LIMIT 1;
-- name: UpdateIndexerStats :exec
INSERT INTO runes_indexer_stats (client_version, network) VALUES ($1, $2);

View File

@@ -0,0 +1,15 @@
package datagateway
import (
"context"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
)
type IndexerInfoDataGateway interface {
GetLatestIndexerState(ctx context.Context) (entity.IndexerState, error)
GetLatestIndexerStats(ctx context.Context) (version string, network common.Network, err error)
SetIndexerState(ctx context.Context, state entity.IndexerState) error
UpdateIndexerStats(ctx context.Context, clientVersion string, network common.Network) error
}

View File

@@ -0,0 +1,81 @@
package datagateway
import (
"context"
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
)
type RunesDataGateway interface {
RunesReaderDataGateway
RunesWriterDataGateway
// BeginRunesTx returns a new RunesDataGateway with transaction enabled. All write operations performed in this datagateway must be committed to persist changes.
BeginRunesTx(ctx context.Context) (RunesDataGatewayWithTx, error)
}
type RunesDataGatewayWithTx interface {
RunesDataGateway
Tx
}
type RunesReaderDataGateway interface {
GetLatestBlock(ctx context.Context) (types.BlockHeader, error)
GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error)
// GetRuneTransactions returns the runes transactions, filterable by pkScript, runeId and height. If pkScript, runeId or height is zero value, that filter is ignored.
GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, height uint64) ([]*entity.RuneTransaction, error)
GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error)
GetUnspentOutPointBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.OutPointBalance, error)
// GetRuneIdFromRune returns the RuneId for the given rune. Returns errs.NotFound if the rune entry is not found.
GetRuneIdFromRune(ctx context.Context, rune runes.Rune) (runes.RuneId, error)
// GetRuneEntryByRuneId returns the RuneEntry for the given runeId. Returns errs.NotFound if the rune entry is not found.
GetRuneEntryByRuneId(ctx context.Context, runeId runes.RuneId) (*runes.RuneEntry, error)
// GetRuneEntryByRuneIdBatch returns the RuneEntries for the given runeIds.
GetRuneEntryByRuneIdBatch(ctx context.Context, runeIds []runes.RuneId) (map[runes.RuneId]*runes.RuneEntry, error)
// GetRuneEntryByRuneIdAndHeight returns the RuneEntry for the given runeId and block height. Returns errs.NotFound if the rune entry is not found.
GetRuneEntryByRuneIdAndHeight(ctx context.Context, runeId runes.RuneId, blockHeight uint64) (*runes.RuneEntry, error)
// GetRuneEntryByRuneIdAndHeightBatch returns the RuneEntries for the given runeIds and block height.
GetRuneEntryByRuneIdAndHeightBatch(ctx context.Context, runeIds []runes.RuneId, blockHeight uint64) (map[runes.RuneId]*runes.RuneEntry, error)
// CountRuneEntries returns the number of existing rune entries.
CountRuneEntries(ctx context.Context) (uint64, error)
// GetBalancesByPkScript returns the balances for the given pkScript at the given blockHeight.
GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[runes.RuneId]*entity.Balance, error)
// GetBalancesByRuneId returns the balances for the given runeId at the given blockHeight.
// Cannot use []byte as map key, so we're returning as slice.
GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64) ([]*entity.Balance, error)
// GetBalancesByPkScriptAndRuneId returns the balance for the given pkScript and runeId at the given blockHeight.
GetBalanceByPkScriptAndRuneId(ctx context.Context, pkScript []byte, runeId runes.RuneId, blockHeight uint64) (*entity.Balance, error)
}
type RunesWriterDataGateway interface {
CreateRuneEntry(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error
CreateRuneEntryState(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error
CreateOutPointBalances(ctx context.Context, outPointBalances []*entity.OutPointBalance) error
SpendOutPointBalances(ctx context.Context, outPoint wire.OutPoint, blockHeight uint64) error
CreateRuneBalances(ctx context.Context, params []CreateRuneBalancesParams) error
CreateRuneTransaction(ctx context.Context, tx *entity.RuneTransaction) error
CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error
// TODO: collapse these into a single function (ResetStateToHeight)?
DeleteIndexedBlockSinceHeight(ctx context.Context, height uint64) error
DeleteRuneEntriesSinceHeight(ctx context.Context, height uint64) error
DeleteRuneEntryStatesSinceHeight(ctx context.Context, height uint64) error
DeleteRuneTransactionsSinceHeight(ctx context.Context, height uint64) error
DeleteRunestonesSinceHeight(ctx context.Context, height uint64) error
DeleteOutPointBalancesSinceHeight(ctx context.Context, height uint64) error
UnspendOutPointBalancesSinceHeight(ctx context.Context, height uint64) error
DeleteRuneBalancesSinceHeight(ctx context.Context, height uint64) error
}
type CreateRuneBalancesParams struct {
PkScript []byte
RuneId runes.RuneId
Balance uint128.Uint128
BlockHeight uint64
}

View File

@@ -0,0 +1,12 @@
package datagateway
import "context"
type Tx interface {
// Commit commits the DB transaction. All changes made after Begin() will be persisted. Calling Commit() will close the current transaction.
// If Commit() is called without a prior Begin(), it must be a no-op.
Commit(ctx context.Context) error
// Rollback rolls back the DB transaction. All changes made after Begin() will be discarded.
// Rollback() must be safe to call even if no transaction is active. Hence, a defer Rollback() is safe, even if Commit() was called prior with non-error conditions.
Rollback(ctx context.Context) error
}

372
modules/runes/event_hash.go Normal file
View File

@@ -0,0 +1,372 @@
package runes
import (
"bytes"
"encoding/hex"
"slices"
"strconv"
"strings"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
// TODO: implement test to ensure that the event hash is calculated the same way for same version
func (p *Processor) calculateEventHash(header types.BlockHeader) (chainhash.Hash, error) {
payload, err := p.getHashPayload(header)
if err != nil {
return chainhash.Hash{}, errors.Wrap(err, "failed to get hash payload")
}
return chainhash.DoubleHashH(payload), nil
}
func (p *Processor) getHashPayload(header types.BlockHeader) ([]byte, error) {
var sb strings.Builder
sb.WriteString("payload:v" + strconv.Itoa(EventHashVersion) + ":")
sb.WriteString("blockHash:")
sb.Write(header.Hash[:])
// serialize new rune entries
{
runeEntries := lo.Values(p.newRuneEntries)
slices.SortFunc(runeEntries, func(t1, t2 *runes.RuneEntry) int {
return int(t1.Number) - int(t2.Number)
})
for _, entry := range runeEntries {
sb.Write(serializeNewRuneEntry(entry))
}
}
// serialize new rune entry states
{
runeIds := lo.Keys(p.newRuneEntryStates)
slices.SortFunc(runeIds, func(t1, t2 runes.RuneId) int {
return t1.Cmp(t2)
})
for _, runeId := range runeIds {
sb.Write(serializeNewRuneEntryState(p.newRuneEntryStates[runeId]))
}
}
// serialize new out point balances
sb.Write(serializeNewOutPointBalances(p.newOutPointBalances))
// serialize spend out points
sb.Write(serializeSpendOutPoints(p.newSpendOutPoints))
// serialize new balances
{
bytes, err := serializeNewBalances(p.newBalances)
if err != nil {
return nil, errors.Wrap(err, "failed to serialize new balances")
}
sb.Write(bytes)
}
// serialize new txs
// sort txs by block height and index
{
bytes, err := serializeRuneTxs(p.newRuneTxs)
if err != nil {
return nil, errors.Wrap(err, "failed to serialize new rune txs")
}
sb.Write(bytes)
}
return []byte(sb.String()), nil
}
func serializeNewRuneEntry(entry *runes.RuneEntry) []byte {
var sb strings.Builder
sb.WriteString("newRuneEntry:")
// nolint:goconst
sb.WriteString("runeId:" + entry.RuneId.String())
sb.WriteString("number:" + strconv.Itoa(int(entry.Number)))
sb.WriteString("divisibility:" + strconv.Itoa(int(entry.Divisibility)))
sb.WriteString("premine:" + entry.Premine.String())
sb.WriteString("rune:" + entry.SpacedRune.Rune.String())
sb.WriteString("spacers:" + strconv.Itoa(int(entry.SpacedRune.Spacers)))
sb.WriteString("symbol:" + string(entry.Symbol))
if entry.Terms != nil {
sb.WriteString("terms:")
terms := entry.Terms
if terms.Amount != nil {
// nolint:goconst
sb.WriteString("amount:" + terms.Amount.String())
}
if terms.Cap != nil {
sb.WriteString("cap:" + terms.Cap.String())
}
if terms.HeightStart != nil {
sb.WriteString("heightStart:" + strconv.Itoa(int(*terms.HeightStart)))
}
if terms.HeightEnd != nil {
sb.WriteString("heightEnd:" + strconv.Itoa(int(*terms.HeightEnd)))
}
if terms.OffsetStart != nil {
sb.WriteString("offsetStart:" + strconv.Itoa(int(*terms.OffsetStart)))
}
if terms.OffsetEnd != nil {
sb.WriteString("offsetEnd:" + strconv.Itoa(int(*terms.OffsetEnd)))
}
}
sb.WriteString("turbo:" + strconv.FormatBool(entry.Turbo))
sb.WriteString("etchingBlock:" + strconv.Itoa(int(entry.EtchingBlock)))
sb.WriteString("etchingTxHash:" + entry.EtchingTxHash.String())
sb.WriteString("etchedAt:" + strconv.Itoa(int(entry.EtchedAt.Unix())))
sb.WriteString(";")
return []byte(sb.String())
}
func serializeNewRuneEntryState(entry *runes.RuneEntry) []byte {
var sb strings.Builder
sb.WriteString("newRuneEntryState:")
// write only mutable states
sb.WriteString("runeId:" + entry.RuneId.String())
sb.WriteString("mints:" + entry.Mints.String())
sb.WriteString("burnedAmount:" + entry.BurnedAmount.String())
if entry.CompletedAtHeight != nil {
sb.WriteString("completedAtHeight:" + strconv.Itoa(int(*entry.CompletedAtHeight)))
sb.WriteString("completedAt:" + strconv.Itoa(int(entry.CompletedAt.Unix())))
}
sb.WriteString(";")
return []byte(sb.String())
}
func serializeNewOutPointBalances(outPointBalances map[wire.OutPoint][]*entity.OutPointBalance) []byte {
var sb strings.Builder
sb.WriteString("newOutPointBalances:")
// collect balance values
newBalances := make([]*entity.OutPointBalance, 0)
for _, balances := range outPointBalances {
newBalances = append(newBalances, balances...)
}
// sort balances to ensure order
slices.SortFunc(newBalances, func(t1, t2 *entity.OutPointBalance) int {
// sort by outpoint first
if t1.OutPoint != t2.OutPoint {
if t1.OutPoint.Hash != t2.OutPoint.Hash {
return bytes.Compare(t1.OutPoint.Hash[:], t2.OutPoint.Hash[:])
}
return int(t1.OutPoint.Index) - int(t2.OutPoint.Index)
}
// sort by runeId
return t1.RuneId.Cmp(t2.RuneId)
})
for _, balance := range newBalances {
sb.WriteString("outPoint:")
sb.WriteString("hash:")
sb.Write(balance.OutPoint.Hash[:])
sb.WriteString("index:" + strconv.Itoa(int(balance.OutPoint.Index)))
sb.WriteString("pkScript:")
sb.Write(balance.PkScript)
sb.WriteString("runeId:" + balance.RuneId.String())
sb.WriteString("amount:" + balance.Amount.String())
sb.WriteString(";")
}
return []byte(sb.String())
}
func serializeSpendOutPoints(spendOutPoints []wire.OutPoint) []byte {
var sb strings.Builder
sb.WriteString("spendOutPoints:")
// sort outpoints to ensure order
slices.SortFunc(spendOutPoints, func(t1, t2 wire.OutPoint) int {
if t1.Hash != t2.Hash {
return bytes.Compare(t1.Hash[:], t2.Hash[:])
}
return int(t1.Index) - int(t2.Index)
})
for _, outPoint := range spendOutPoints {
sb.WriteString("hash:")
sb.Write(outPoint.Hash[:])
sb.WriteString("index:" + strconv.Itoa(int(outPoint.Index)))
sb.WriteString(";")
}
return []byte(sb.String())
}
func serializeNewBalances(balances map[string]map[runes.RuneId]uint128.Uint128) ([]byte, error) {
var sb strings.Builder
sb.WriteString("newBalances:")
pkScriptStrs := lo.Keys(balances)
// sort pkScripts to ensure order
slices.SortFunc(pkScriptStrs, func(t1, t2 string) int {
return strings.Compare(t1, t2)
})
for _, pkScriptStr := range pkScriptStrs {
runeIds := lo.Keys(balances[pkScriptStr])
// sort runeIds to ensure order
slices.SortFunc(runeIds, func(t1, t2 runes.RuneId) int {
return t1.Cmp(t2)
})
pkScript, err := hex.DecodeString(pkScriptStr)
if err != nil {
return nil, errors.Wrap(err, "failed to decode pkScript")
}
for _, runeId := range runeIds {
sb.WriteString("pkScript:")
sb.Write(pkScript)
sb.WriteString("runeId:" + runeId.String())
sb.WriteString("amount:" + balances[pkScriptStr][runeId].String())
sb.WriteString(";")
}
}
return []byte(sb.String()), nil
}
func serializeRuneTxs(txs []*entity.RuneTransaction) ([]byte, error) {
var sb strings.Builder
slices.SortFunc(txs, func(t1, t2 *entity.RuneTransaction) int {
if t1.BlockHeight != t2.BlockHeight {
return int(t1.BlockHeight) - int(t2.BlockHeight)
}
return int(t1.Index) - int(t2.Index)
})
sb.WriteString("txs:")
for _, tx := range txs {
sb.WriteString("hash:")
sb.Write(tx.Hash[:])
sb.WriteString("blockHeight:" + strconv.Itoa(int(tx.BlockHeight)))
sb.WriteString("index:" + strconv.Itoa(int(tx.Index)))
writeOutPointBalance := func(ob *entity.TxInputOutput) {
sb.WriteString("pkScript:")
sb.Write(ob.PkScript)
sb.WriteString("runeId:" + ob.RuneId.String())
sb.WriteString("amount:" + ob.Amount.String())
sb.WriteString("index:" + strconv.Itoa(int(ob.Index)))
sb.WriteString("txHash:")
sb.Write(ob.TxHash[:])
sb.WriteString("txOutIndex:" + strconv.Itoa(int(ob.TxOutIndex)))
sb.WriteString(";")
}
// sort inputs to ensure order
slices.SortFunc(tx.Inputs, func(t1, t2 *entity.TxInputOutput) int {
if t1.Index != t2.Index {
return int(t1.Index) - int(t2.Index)
}
return t1.RuneId.Cmp(t2.RuneId)
})
sb.WriteString("in:")
for _, in := range tx.Inputs {
writeOutPointBalance(in)
}
// sort outputs to ensure order
slices.SortFunc(tx.Inputs, func(t1, t2 *entity.TxInputOutput) int {
if t1.Index != t2.Index {
return int(t1.Index) - int(t2.Index)
}
return t1.RuneId.Cmp(t2.RuneId)
})
sb.WriteString("out:")
for _, out := range tx.Outputs {
writeOutPointBalance(out)
}
mintsKeys := lo.Keys(tx.Mints)
slices.SortFunc(mintsKeys, func(t1, t2 runes.RuneId) int {
return t1.Cmp(t2)
})
sb.WriteString("mints:")
for _, runeId := range mintsKeys {
amount := tx.Mints[runeId]
sb.WriteString(runeId.String())
sb.WriteString(amount.String())
sb.WriteString(";")
}
burnsKeys := lo.Keys(tx.Burns)
slices.SortFunc(mintsKeys, func(t1, t2 runes.RuneId) int {
return t1.Cmp(t2)
})
sb.WriteString("burns:")
for _, runeId := range burnsKeys {
amount := tx.Burns[runeId]
sb.WriteString(runeId.String())
sb.WriteString(amount.String())
sb.WriteString(";")
}
sb.WriteString("runeEtched:" + strconv.FormatBool(tx.RuneEtched))
sb.Write(serializeRunestoneForEventHash(tx.Runestone))
sb.WriteString(";")
}
return []byte(sb.String()), nil
}
func serializeRunestoneForEventHash(r *runes.Runestone) []byte {
if r == nil {
return []byte("rune:nil")
}
var sb strings.Builder
sb.WriteString("rune:")
if r.Etching != nil {
etching := r.Etching
sb.WriteString("etching:")
if etching.Divisibility != nil {
sb.WriteString("divisibility:" + strconv.Itoa(int(*etching.Divisibility)))
}
if etching.Premine != nil {
sb.WriteString("premine:" + etching.Premine.String())
}
if etching.Rune != nil {
sb.WriteString("rune:" + etching.Rune.String())
}
if etching.Spacers != nil {
sb.WriteString("spacers:" + strconv.Itoa(int(*etching.Spacers)))
}
if etching.Symbol != nil {
sb.WriteString("symbol:" + string(*etching.Symbol))
}
if etching.Terms != nil {
terms := etching.Terms
if terms.Amount != nil {
sb.WriteString("amount:" + terms.Amount.String())
}
if terms.Cap != nil {
sb.WriteString("cap:" + terms.Cap.String())
}
if terms.HeightStart != nil {
sb.WriteString("heightStart:" + strconv.Itoa(int(*terms.HeightStart)))
}
if terms.HeightEnd != nil {
sb.WriteString("heightEnd:" + strconv.Itoa(int(*terms.HeightEnd)))
}
if terms.OffsetStart != nil {
sb.WriteString("offsetStart:" + strconv.Itoa(int(*terms.OffsetStart)))
}
if terms.OffsetEnd != nil {
sb.WriteString("offsetEnd:" + strconv.Itoa(int(*terms.OffsetEnd)))
}
}
if etching.Turbo {
sb.WriteString("turbo:" + strconv.FormatBool(etching.Turbo))
}
}
if len(r.Edicts) > 0 {
sb.WriteString("edicts:")
// don't sort edicts, order must be kept the same because of delta encoding
for _, edict := range r.Edicts {
sb.WriteString(edict.Id.String() + edict.Amount.String() + strconv.Itoa(edict.Output) + ";")
}
}
if r.Mint != nil {
sb.WriteString("mint:" + r.Mint.String())
}
if r.Pointer != nil {
sb.WriteString("pointer:" + strconv.Itoa(int(*r.Pointer)))
}
sb.WriteString("cenotaph:" + strconv.FormatBool(r.Cenotaph))
sb.WriteString("flaws:" + strconv.Itoa(int(r.Flaws)))
return []byte(sb.String())
}

View File

@@ -0,0 +1,14 @@
package entity
import (
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
)
type Balance struct {
PkScript []byte
Amount uint128.Uint128
RuneId runes.RuneId
// BlockHeight last updated block height
BlockHeight uint64
}

View File

@@ -0,0 +1,11 @@
package entity
import "github.com/btcsuite/btcd/chaincfg/chainhash"
type IndexedBlock struct {
Height int64
Hash chainhash.Hash
PrevHash chainhash.Hash
EventHash chainhash.Hash
CumulativeEventHash chainhash.Hash
}

View File

@@ -0,0 +1,9 @@
package entity
import "time"
type IndexerState struct {
CreatedAt time.Time
DBVersion int32
EventHashVersion int32
}

View File

@@ -0,0 +1,16 @@
package entity
import (
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
)
type OutPointBalance struct {
RuneId runes.RuneId
PkScript []byte
OutPoint wire.OutPoint
Amount uint128.Uint128
BlockHeight uint64
SpentHeight *uint64
}

View File

@@ -0,0 +1,76 @@
package entity
import (
"encoding/hex"
"encoding/json"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
)
type TxInputOutput struct {
PkScript []byte
RuneId runes.RuneId
Amount uint128.Uint128
Index uint32
TxHash chainhash.Hash
TxOutIndex uint32
}
type txInputOutputJSON struct {
PkScript string `json:"pkScript"`
RuneId runes.RuneId `json:"runeId"`
Amount uint128.Uint128 `json:"amount"`
Index uint32 `json:"index"`
TxHash chainhash.Hash `json:"txHash"`
TxOutIndex uint32 `json:"txOutIndex"`
}
func (o TxInputOutput) MarshalJSON() ([]byte, error) {
bytes, err := json.Marshal(txInputOutputJSON{
PkScript: hex.EncodeToString(o.PkScript),
RuneId: o.RuneId,
Amount: o.Amount,
Index: o.Index,
TxHash: o.TxHash,
TxOutIndex: o.TxOutIndex,
})
if err != nil {
return nil, errors.WithStack(err)
}
return bytes, nil
}
func (o *TxInputOutput) UnmarshalJSON(data []byte) error {
var aux txInputOutputJSON
if err := json.Unmarshal(data, &aux); err != nil {
return errors.WithStack(err)
}
pkScript, err := hex.DecodeString(aux.PkScript)
if err != nil {
return errors.WithStack(err)
}
o.PkScript = pkScript
o.RuneId = aux.RuneId
o.Amount = aux.Amount
o.Index = aux.Index
o.TxHash = aux.TxHash
o.TxOutIndex = aux.TxOutIndex
return nil
}
type RuneTransaction struct {
Hash chainhash.Hash
BlockHeight uint64
Index uint32
Timestamp time.Time
Inputs []*TxInputOutput
Outputs []*TxInputOutput
Mints map[runes.RuneId]uint128.Uint128
Burns map[runes.RuneId]uint128.Uint128
Runestone *runes.Runestone
RuneEtched bool
}

View File

@@ -0,0 +1,32 @@
package entity
import (
"encoding/hex"
"encoding/json"
"testing"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/stretchr/testify/assert"
)
func TestTxInputOutputJSON(t *testing.T) {
ob := TxInputOutput{
PkScript: utils.Must(hex.DecodeString("51203daaca9b82a51aca960c1491588246029d7e0fc49e0abdbcc8fd17574be5c74b")),
RuneId: runes.RuneId{BlockHeight: 1, TxIndex: 2},
Amount: uint128.From64(100),
Index: 1,
TxHash: *utils.Must(chainhash.NewHashFromStr("3ea1b497b25993adf3f2c8dae1470721316a45c82600798c14d0425039c410ad")),
TxOutIndex: 2,
}
bytes, err := json.Marshal(ob)
assert.NoError(t, err)
t.Log(string(bytes))
var parsedOB TxInputOutput
err = json.Unmarshal(bytes, &parsedOB)
assert.NoError(t, err)
assert.Equal(t, ob, parsedOB)
}

231
modules/runes/processor.go Normal file
View File

@@ -0,0 +1,231 @@
package runes
import (
"context"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/indexers"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/bitcoin/btcclient"
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
var _ indexers.BitcoinProcessor = (*Processor)(nil)
type Processor struct {
runesDg datagateway.RunesDataGateway
indexerInfoDg datagateway.IndexerInfoDataGateway
bitcoinClient btcclient.Contract
bitcoinDataSource indexers.BitcoinDatasource
network common.Network
reportingClient *reportingclient.ReportingClient
newRuneEntries map[runes.RuneId]*runes.RuneEntry
newRuneEntryStates map[runes.RuneId]*runes.RuneEntry
newOutPointBalances map[wire.OutPoint][]*entity.OutPointBalance
newSpendOutPoints []wire.OutPoint
newBalances map[string]map[runes.RuneId]uint128.Uint128 // pkScript(hex) -> runeId -> amount
newRuneTxs []*entity.RuneTransaction
}
func NewProcessor(runesDg datagateway.RunesDataGateway, indexerInfoDg datagateway.IndexerInfoDataGateway, bitcoinClient btcclient.Contract, bitcoinDataSource indexers.BitcoinDatasource, network common.Network, reportingClient *reportingclient.ReportingClient) *Processor {
return &Processor{
runesDg: runesDg,
indexerInfoDg: indexerInfoDg,
bitcoinClient: bitcoinClient,
bitcoinDataSource: bitcoinDataSource,
network: network,
reportingClient: reportingClient,
newRuneEntries: make(map[runes.RuneId]*runes.RuneEntry),
newRuneEntryStates: make(map[runes.RuneId]*runes.RuneEntry),
newOutPointBalances: make(map[wire.OutPoint][]*entity.OutPointBalance),
newSpendOutPoints: make([]wire.OutPoint, 0),
newBalances: make(map[string]map[runes.RuneId]uint128.Uint128),
newRuneTxs: make([]*entity.RuneTransaction, 0),
}
}
var (
ErrDBVersionMismatch = errors.New("db version mismatch: please migrate db")
ErrEventHashVersionMismatch = errors.New("event hash version mismatch: please reset db and reindex")
)
func (p *Processor) VerifyStates(ctx context.Context) error {
// TODO: ensure db is migrated
if err := p.ensureValidState(ctx); err != nil {
return errors.Wrap(err, "error during ensureValidState")
}
if p.network == common.NetworkMainnet {
if err := p.ensureGenesisRune(ctx); err != nil {
return errors.Wrap(err, "error during ensureGenesisRune")
}
}
if p.reportingClient != nil {
if err := p.reportingClient.SubmitNodeReport(ctx, "runes", p.network); err != nil {
return errors.Wrap(err, "failed to submit node report")
}
}
return nil
}
func (p *Processor) ensureValidState(ctx context.Context) error {
indexerState, err := p.indexerInfoDg.GetLatestIndexerState(ctx)
if err != nil && !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "failed to get latest indexer state")
}
// if not found, set indexer state
if errors.Is(err, errs.NotFound) {
if err := p.indexerInfoDg.SetIndexerState(ctx, entity.IndexerState{
DBVersion: DBVersion,
EventHashVersion: EventHashVersion,
}); err != nil {
return errors.Wrap(err, "failed to set indexer state")
}
} else {
if indexerState.DBVersion != DBVersion {
return errors.Wrapf(errs.ConflictSetting, "db version mismatch: current version is %d. Please upgrade to version %d", indexerState.DBVersion, DBVersion)
}
if indexerState.EventHashVersion != EventHashVersion {
return errors.Wrapf(errs.ConflictSetting, "event version mismatch: current version is %d. Please reset rune's db first.", indexerState.EventHashVersion, EventHashVersion)
}
}
_, network, err := p.indexerInfoDg.GetLatestIndexerStats(ctx)
if err != nil && !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "failed to get latest indexer stats")
}
// if found, verify indexer stats
if err == nil {
if network != p.network {
return errors.Wrapf(errs.ConflictSetting, "network mismatch: latest indexed network is %d, configured network is %d. If you want to change the network, please reset the database", network, p.network)
}
}
if err := p.indexerInfoDg.UpdateIndexerStats(ctx, p.network.String(), p.network); err != nil {
return errors.Wrap(err, "failed to update indexer stats")
}
return nil
}
var genesisRuneId = runes.RuneId{BlockHeight: 1, TxIndex: 0}
func (p *Processor) ensureGenesisRune(ctx context.Context) error {
_, err := p.runesDg.GetRuneEntryByRuneId(ctx, genesisRuneId)
if err != nil && !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "failed to get genesis rune entry")
}
if errors.Is(err, errs.NotFound) {
runeEntry := &runes.RuneEntry{
RuneId: genesisRuneId,
Number: 0,
Divisibility: 0,
Premine: uint128.Zero,
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
Symbol: '\u29c9',
Terms: &runes.Terms{
Amount: lo.ToPtr(uint128.From64(1)),
Cap: &uint128.Max,
HeightStart: lo.ToPtr(uint64(common.HalvingInterval * 4)),
HeightEnd: lo.ToPtr(uint64(common.HalvingInterval * 5)),
OffsetStart: nil,
OffsetEnd: nil,
},
Turbo: true,
Mints: uint128.Zero,
BurnedAmount: uint128.Zero,
CompletedAt: time.Time{},
CompletedAtHeight: nil,
EtchingBlock: 1,
EtchingTxHash: chainhash.Hash{},
EtchedAt: time.Time{},
}
if err := p.runesDg.CreateRuneEntry(ctx, runeEntry, genesisRuneId.BlockHeight); err != nil {
return errors.Wrap(err, "failed to create genesis rune entry")
}
}
return nil
}
func (p *Processor) Name() string {
return "runes"
}
func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error) {
blockHeader, err := p.runesDg.GetLatestBlock(ctx)
if err != nil {
if errors.Is(err, errs.NotFound) {
return startingBlockHeader[p.network], nil
}
return types.BlockHeader{}, errors.Wrap(err, "failed to get latest block")
}
return blockHeader, nil
}
// warning: GetIndexedBlock currently returns a types.BlockHeader with only Height, Hash fields populated.
// This is because it is known that all usage of this function only requires these fields. In the future, we may want to populate all fields for type safety.
func (p *Processor) GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error) {
block, err := p.runesDg.GetIndexedBlockByHeight(ctx, height)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to get indexed block")
}
return types.BlockHeader{
Height: block.Height,
Hash: block.Hash,
}, nil
}
func (p *Processor) RevertData(ctx context.Context, from int64) error {
runesDgTx, err := p.runesDg.BeginRunesTx(ctx)
if err != nil {
return errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if err := runesDgTx.Rollback(ctx); err != nil {
logger.WarnContext(ctx, "failed to rollback transaction",
slogx.Error(err),
slogx.String("event", "rollback_runes_revert"),
)
}
}()
if err := runesDgTx.DeleteIndexedBlockSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete indexed blocks")
}
if err := runesDgTx.DeleteRuneEntriesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete rune entries")
}
if err := runesDgTx.DeleteRuneEntryStatesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete rune entry states")
}
if err := runesDgTx.DeleteRuneTransactionsSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete rune transactions")
}
if err := runesDgTx.DeleteRunestonesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete runestones")
}
if err := runesDgTx.DeleteOutPointBalancesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete outpoint balances")
}
if err := runesDgTx.UnspendOutPointBalancesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to unspend outpoint balances")
}
if err := runesDgTx.DeleteRuneBalancesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete rune balances")
}
if err := runesDgTx.Commit(ctx); err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
return nil
}

View File

@@ -0,0 +1,807 @@
package runes
import (
"bytes"
"context"
"encoding/hex"
"log/slog"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
func (p *Processor) Process(ctx context.Context, blocks []*types.Block) error {
for _, block := range blocks {
ctx := logger.WithContext(ctx, slog.Int64("height", block.Header.Height))
logger.DebugContext(ctx, "Processing new block", slog.Int("txs", len(block.Transactions)))
for _, tx := range block.Transactions {
if err := p.processTx(ctx, tx, block.Header); err != nil {
return errors.Wrap(err, "failed to process tx")
}
}
if err := p.flushBlock(ctx, block.Header); err != nil {
return errors.Wrap(err, "failed to flush block")
}
logger.DebugContext(ctx, "Inserted new block")
}
return nil
}
func (p *Processor) processTx(ctx context.Context, tx *types.Transaction, blockHeader types.BlockHeader) error {
if tx.BlockHeight < int64(runes.FirstRuneHeight(p.network)) {
// prevent processing txs before the activation height
return nil
}
runestone, err := runes.DecipherRunestone(tx)
if err != nil {
return errors.Wrap(err, "failed to decipher runestone")
}
inputBalances, err := p.getInputBalances(ctx, tx.TxIn)
if err != nil {
return errors.Wrap(err, "failed to get input balances")
}
if runestone == nil && len(inputBalances) == 0 {
// no runes involved in this tx
return nil
}
unallocated := make(map[runes.RuneId]uint128.Uint128)
allocated := make(map[int]map[runes.RuneId]uint128.Uint128)
for _, balances := range inputBalances {
for runeId, balance := range balances {
unallocated[runeId] = unallocated[runeId].Add(balance.Amount)
p.newSpendOutPoints = append(p.newSpendOutPoints, balance.OutPoint)
}
}
allocate := func(output int, runeId runes.RuneId, amount uint128.Uint128) {
if _, ok := unallocated[runeId]; !ok {
return
}
// cap amount to unallocated amount
if amount.Cmp(unallocated[runeId]) > 0 {
amount = unallocated[runeId]
}
if amount.IsZero() {
return
}
if _, ok := allocated[output]; !ok {
allocated[output] = make(map[runes.RuneId]uint128.Uint128)
}
allocated[output][runeId] = allocated[output][runeId].Add(amount)
unallocated[runeId] = unallocated[runeId].Sub(amount)
}
mints := make(map[runes.RuneId]uint128.Uint128)
var runeEtched bool
if runestone != nil {
if runestone.Mint != nil {
mintRuneId := *runestone.Mint
amount, err := p.mint(ctx, mintRuneId, blockHeader)
if err != nil {
return errors.Wrap(err, "error during mint")
}
if !amount.IsZero() {
unallocated[mintRuneId] = unallocated[mintRuneId].Add(amount)
mints[mintRuneId] = amount
}
}
etching, etchedRuneId, etchedRune, err := p.getEtchedRune(ctx, tx, runestone)
if err != nil {
return errors.Wrap(err, "error during getting etched rune")
}
if etching != nil {
runeEtched = true
}
if !runestone.Cenotaph {
// include premine in unallocated, if exists
if etching != nil {
premine := lo.FromPtr(etching.Premine)
if !premine.IsZero() {
unallocated[etchedRuneId] = unallocated[etchedRuneId].Add(premine)
mints[etchedRuneId] = mints[etchedRuneId].Add(premine)
}
}
// allocate runes
for _, edict := range runestone.Edicts {
// sanity check, should not happen since it is already checked in runes.MessageFromIntegers
if edict.Output > len(tx.TxOut) {
return errors.New("edict output index is out of range")
}
var emptyRuneId runes.RuneId
// if rune id is empty, then use etched rune id
if edict.Id == emptyRuneId {
// empty rune id is only allowed for runestones with etching
if etching == nil {
continue
}
edict.Id = etchedRuneId
}
if edict.Output == len(tx.TxOut) {
// if output == len(tx.TxOut), then allocate the amount to all outputs
// find all non-OP_RETURN outputs
var destinations []int
for i, txOut := range tx.TxOut {
if txOut.IsOpReturn() {
destinations = append(destinations, i)
}
}
if len(destinations) > 0 {
if edict.Amount.IsZero() {
// if amount is zero, divide ALL unallocated amount to all destinations
amount, remainder := unallocated[edict.Id].QuoRem64(uint64(len(destinations)))
for i, dest := range destinations {
// if i < remainder, then add 1 to amount
allocate(dest, edict.Id, lo.Ternary(i < int(remainder), amount.Add64(1), amount))
}
} else {
// if amount is not zero, allocate the amount to all destinations, sequentially.
// If there is no more amount to allocate the rest of outputs, then no more will be allocated.
for _, dest := range destinations {
allocate(dest, edict.Id, edict.Amount)
}
}
}
} else {
// allocate amount to specific output
var amount uint128.Uint128
if edict.Amount.IsZero() {
// if amount is zero, allocate the whole unallocated amount
amount = unallocated[edict.Id]
} else {
amount = edict.Amount
}
allocate(edict.Output, edict.Id, amount)
}
}
}
if etching != nil {
if err := p.createRuneEntry(ctx, runestone, etchedRuneId, etchedRune, tx, blockHeader); err != nil {
return errors.Wrap(err, "failed to create rune entry")
}
}
}
burns := make(map[runes.RuneId]uint128.Uint128)
if runestone != nil && runestone.Cenotaph {
// all input runes and minted runes in a tx with cenotaph are burned
for runeId, amount := range unallocated {
burns[runeId] = burns[runeId].Add(amount)
}
} else {
// assign all un-allocated runes to the default output (pointer), or the first non
// OP_RETURN output if there is no default, or if the default output exceeds the number of outputs
var pointer *uint64
if runestone != nil && !runestone.Cenotaph && runestone.Pointer != nil && *runestone.Pointer < uint64(len(tx.TxOut)) {
pointer = runestone.Pointer
}
// if no pointer is provided, use the first non-OP_RETURN output
if pointer == nil {
for i, txOut := range tx.TxOut {
if !txOut.IsOpReturn() {
pointer = lo.ToPtr(uint64(i))
break
}
}
}
if pointer != nil {
// allocate all unallocated runes to the pointer
output := int(*pointer)
for runeId, amount := range unallocated {
allocate(output, runeId, amount)
}
} else {
// if pointer is still nil, then no output is available. Burn all unallocated runes.
for runeId, amount := range unallocated {
burns[runeId] = burns[runeId].Add(amount)
}
}
}
// update outpoint balances
for output, balances := range allocated {
if tx.TxOut[output].IsOpReturn() {
// burn all allocated runes to OP_RETURN outputs
for runeId, amount := range balances {
burns[runeId] = burns[runeId].Add(amount)
}
continue
}
outPoint := wire.OutPoint{
Hash: tx.TxHash,
Index: uint32(output),
}
for runeId, amount := range balances {
p.newOutPointBalances[outPoint] = append(p.newOutPointBalances[outPoint], &entity.OutPointBalance{
RuneId: runeId,
PkScript: tx.TxOut[output].PkScript,
OutPoint: outPoint,
Amount: amount,
BlockHeight: uint64(tx.BlockHeight),
SpentHeight: nil,
})
}
}
if err := p.updateNewBalances(ctx, tx, inputBalances, allocated); err != nil {
return errors.Wrap(err, "failed to update new balances")
}
// increment burned amounts in rune entries
if err := p.incrementBurnedAmount(ctx, burns); err != nil {
return errors.Wrap(err, "failed to update burned amount")
}
// construct RuneTransaction
runeTx := entity.RuneTransaction{
Hash: tx.TxHash,
BlockHeight: uint64(blockHeader.Height),
Index: tx.Index,
Timestamp: blockHeader.Timestamp,
Inputs: make([]*entity.TxInputOutput, 0),
Outputs: make([]*entity.TxInputOutput, 0),
Mints: mints,
Burns: burns,
Runestone: runestone,
RuneEtched: runeEtched,
}
for inputIndex, balances := range inputBalances {
for runeId, balance := range balances {
runeTx.Inputs = append(runeTx.Inputs, &entity.TxInputOutput{
PkScript: balance.PkScript,
RuneId: runeId,
Amount: balance.Amount,
Index: uint32(inputIndex),
TxHash: tx.TxIn[inputIndex].PreviousOutTxHash,
TxOutIndex: tx.TxIn[inputIndex].PreviousOutIndex,
})
}
}
for outputIndex, balances := range allocated {
pkScript := tx.TxOut[outputIndex].PkScript
for runeId, amount := range balances {
runeTx.Outputs = append(runeTx.Outputs, &entity.TxInputOutput{
PkScript: pkScript,
RuneId: runeId,
Amount: amount,
Index: uint32(outputIndex),
TxHash: tx.TxHash,
TxOutIndex: uint32(outputIndex),
})
}
}
p.newRuneTxs = append(p.newRuneTxs, &runeTx)
return nil
}
func (p *Processor) getInputBalances(ctx context.Context, txInputs []*types.TxIn) (map[int]map[runes.RuneId]*entity.OutPointBalance, error) {
inputBalances := make(map[int]map[runes.RuneId]*entity.OutPointBalance)
for i, txIn := range txInputs {
balances, err := p.getRunesBalancesAtOutPoint(ctx, wire.OutPoint{
Hash: txIn.PreviousOutTxHash,
Index: txIn.PreviousOutIndex,
})
if err != nil {
return nil, errors.Wrap(err, "failed to get runes balances at outpoint")
}
if len(balances) > 0 {
inputBalances[i] = balances
}
}
return inputBalances, nil
}
func (p *Processor) updateNewBalances(ctx context.Context, tx *types.Transaction, inputBalances map[int]map[runes.RuneId]*entity.OutPointBalance, allocated map[int]map[runes.RuneId]uint128.Uint128) error {
// getBalanceFromDg returns the current balance of the pkScript and runeId since last flush
getBalanceFromDg := func(ctx context.Context, pkScript []byte, runeId runes.RuneId) (uint128.Uint128, error) {
balance, err := p.runesDg.GetBalanceByPkScriptAndRuneId(ctx, pkScript, runeId, uint64(tx.BlockHeight-1))
if err != nil {
if errors.Is(err, errs.NotFound) {
return uint128.Zero, nil
}
return uint128.Uint128{}, errors.Wrap(err, "failed to get balance by pk script and rune id")
}
return balance.Amount, nil
}
// deduct balances used in inputs
for _, balances := range inputBalances {
for runeId, balance := range balances {
pkScript := balance.PkScript
pkScriptStr := hex.EncodeToString(pkScript)
if _, ok := p.newBalances[pkScriptStr]; !ok {
p.newBalances[pkScriptStr] = make(map[runes.RuneId]uint128.Uint128)
}
if _, ok := p.newBalances[pkScriptStr][runeId]; !ok {
balance, err := getBalanceFromDg(ctx, pkScript, runeId)
if err != nil {
return errors.WithStack(err)
}
p.newBalances[pkScriptStr][runeId] = balance
}
if p.newBalances[pkScriptStr][runeId].Cmp(balance.Amount) < 0 {
// total pkScript's balance is less that balance in input. This is impossible. Something is wrong.
return errors.Errorf("current balance is less than balance in input: %s", runeId)
}
p.newBalances[pkScriptStr][runeId] = p.newBalances[pkScriptStr][runeId].Sub(balance.Amount)
}
}
// add balances allocated in outputs
for outputIndex, balances := range allocated {
pkScript := tx.TxOut[outputIndex].PkScript
pkScriptStr := hex.EncodeToString(pkScript)
for runeId, amount := range balances {
if _, ok := p.newBalances[pkScriptStr]; !ok {
p.newBalances[pkScriptStr] = make(map[runes.RuneId]uint128.Uint128)
}
if _, ok := p.newBalances[pkScriptStr][runeId]; !ok {
balance, err := getBalanceFromDg(ctx, pkScript, runeId)
if err != nil {
return errors.WithStack(err)
}
p.newBalances[pkScriptStr][runeId] = balance
}
p.newBalances[pkScriptStr][runeId] = p.newBalances[pkScriptStr][runeId].Add(amount)
}
}
return nil
}
func (p *Processor) mint(ctx context.Context, runeId runes.RuneId, blockHeader types.BlockHeader) (uint128.Uint128, error) {
runeEntry, err := p.getRuneEntryByRuneId(ctx, runeId)
if err != nil {
if errors.Is(err, errs.NotFound) {
return uint128.Zero, nil
}
return uint128.Uint128{}, errors.Wrap(err, "failed to get rune entry by rune id")
}
amount, err := runeEntry.GetMintableAmount(uint64(blockHeader.Height))
if err != nil {
return uint128.Zero, nil
}
if err := p.incrementMintCount(ctx, runeId, blockHeader); err != nil {
return uint128.Zero, errors.Wrap(err, "failed to increment mint count")
}
return amount, nil
}
func (p *Processor) getEtchedRune(ctx context.Context, tx *types.Transaction, runestone *runes.Runestone) (*runes.Etching, runes.RuneId, runes.Rune, error) {
if runestone.Etching == nil {
return nil, runes.RuneId{}, runes.Rune{}, nil
}
rune := runestone.Etching.Rune
if rune != nil {
minimumRune := runes.MinimumRuneAtHeight(p.network, uint64(tx.BlockHeight))
if rune.Cmp(minimumRune) < 0 {
return nil, runes.RuneId{}, runes.Rune{}, nil
}
if rune.IsReserved() {
return nil, runes.RuneId{}, runes.Rune{}, nil
}
ok, err := p.isRuneExists(ctx, *rune)
if err != nil {
return nil, runes.RuneId{}, runes.Rune{}, errors.Wrap(err, "error during check rune existence")
}
if ok {
return nil, runes.RuneId{}, runes.Rune{}, nil
}
// check if tx commits to the rune
commit, err := p.txCommitsToRune(ctx, tx, *rune)
if err != nil {
return nil, runes.RuneId{}, runes.Rune{}, errors.Wrap(err, "error during check tx commits to rune")
}
if !commit {
return nil, runes.RuneId{}, runes.Rune{}, nil
}
} else {
rune = lo.ToPtr(runes.GetReservedRune(uint64(tx.BlockHeight), tx.Index))
}
runeId, err := runes.NewRuneId(uint64(tx.BlockHeight), tx.Index)
if err != nil {
return nil, runes.RuneId{}, runes.Rune{}, errors.Wrap(err, "failed to create rune id")
}
return runestone.Etching, runeId, *rune, nil
}
func (p *Processor) txCommitsToRune(ctx context.Context, tx *types.Transaction, rune runes.Rune) (bool, error) {
commitment := rune.Commitment()
for i, txIn := range tx.TxIn {
tapscript, ok := extractTapScript(txIn.Witness)
if !ok {
continue
}
for tapscript.Next() {
// ignore errors and continue to next input
if tapscript.Err() != nil {
break
}
// check opcode is valid
if !runes.IsDataPushOpCode(tapscript.Opcode()) {
continue
}
// tapscript must contain commitment of the rune
if !bytes.Equal(tapscript.Data(), commitment) {
continue
}
// It is impossible to verify that input utxo is a P2TR output with just the input.
// Need to verify with utxo's pk script.
prevTx, err := p.bitcoinClient.GetTransactionByHash(ctx, txIn.PreviousOutTxHash)
if err != nil && errors.Is(err, errs.NotFound) {
continue
}
if err != nil {
return false, errors.Wrapf(err, "can't get previous txout for txin `%v:%v`", tx.TxHash.String(), i)
}
pkScript := prevTx.TxOut[txIn.PreviousOutIndex].PkScript
// input utxo must be P2TR
if !txscript.IsPayToTaproot(pkScript) {
break
}
// input must be mature enough
confirmations := tx.BlockHeight - prevTx.BlockHeight + 1
if confirmations < runes.RUNE_COMMIT_BLOCKS {
continue
}
return true, nil
}
}
return false, nil
}
func extractTapScript(witness [][]byte) (txscript.ScriptTokenizer, bool) {
witness = removeAnnexFromWitness(witness)
if len(witness) < 2 {
return txscript.ScriptTokenizer{}, false
}
script := witness[len(witness)-2]
return txscript.MakeScriptTokenizer(0, script), true
}
func removeAnnexFromWitness(witness [][]byte) [][]byte {
if len(witness) >= 2 && len(witness[len(witness)-1]) > 0 && witness[len(witness)-1][0] == txscript.TaprootAnnexTag {
return witness[:len(witness)-1]
}
return witness
}
func (p *Processor) createRuneEntry(ctx context.Context, runestone *runes.Runestone, runeId runes.RuneId, rune runes.Rune, tx *types.Transaction, blockHeader types.BlockHeader) error {
count, err := p.countRuneEntries(ctx)
if err != nil {
return errors.Wrap(err, "failed to count rune entries")
}
var runeEntry *runes.RuneEntry
if runestone.Cenotaph {
runeEntry = &runes.RuneEntry{
RuneId: runeId,
Number: count,
SpacedRune: runes.NewSpacedRune(rune, 0),
Mints: uint128.Zero,
BurnedAmount: uint128.Zero,
Premine: uint128.Zero,
Symbol: '¤',
Divisibility: 0,
Terms: nil,
Turbo: false,
CompletedAt: time.Time{},
CompletedAtHeight: nil,
EtchingBlock: uint64(tx.BlockHeight),
EtchingTxHash: tx.TxHash,
EtchedAt: blockHeader.Timestamp,
}
} else {
etching := runestone.Etching
runeEntry = &runes.RuneEntry{
RuneId: runeId,
Number: count,
SpacedRune: runes.NewSpacedRune(rune, lo.FromPtr(etching.Spacers)),
Mints: uint128.Zero,
BurnedAmount: uint128.Zero,
Premine: lo.FromPtr(etching.Premine),
Symbol: lo.FromPtrOr(etching.Symbol, '¤'),
Divisibility: lo.FromPtr(etching.Divisibility),
Terms: etching.Terms,
Turbo: etching.Turbo,
CompletedAt: time.Time{},
CompletedAtHeight: nil,
EtchingBlock: uint64(tx.BlockHeight),
EtchingTxHash: tx.TxHash,
EtchedAt: blockHeader.Timestamp,
}
}
p.newRuneEntries[runeId] = runeEntry
p.newRuneEntryStates[runeId] = runeEntry
return nil
}
func (p *Processor) incrementMintCount(ctx context.Context, runeId runes.RuneId, blockHeader types.BlockHeader) (err error) {
runeEntry, err := p.getRuneEntryByRuneId(ctx, runeId)
if err != nil {
return errors.Wrap(err, "failed to get rune entry by rune id")
}
runeEntry.Mints = runeEntry.Mints.Add64(1)
if runeEntry.Mints == lo.FromPtr(runeEntry.Terms.Cap) {
runeEntry.CompletedAt = blockHeader.Timestamp
runeEntry.CompletedAtHeight = lo.ToPtr(uint64(blockHeader.Height))
}
p.newRuneEntryStates[runeId] = runeEntry
return nil
}
func (p *Processor) incrementBurnedAmount(ctx context.Context, burned map[runes.RuneId]uint128.Uint128) (err error) {
runeEntries := make(map[runes.RuneId]*runes.RuneEntry)
runeIdsToFetch := make([]runes.RuneId, 0)
for runeId, amount := range burned {
if amount.IsZero() {
// ignore zero burn amount
continue
}
runeEntry, ok := p.newRuneEntryStates[runeId]
if !ok {
runeIdsToFetch = append(runeIdsToFetch, runeId)
} else {
runeEntries[runeId] = runeEntry
}
}
if len(runeIdsToFetch) > 0 {
for _, runeId := range runeIdsToFetch {
runeEntry, err := p.getRuneEntryByRuneId(ctx, runeId)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "rune entry not found")
}
return errors.Wrap(err, "failed to get rune entry by rune id")
}
runeEntries[runeId] = runeEntry
}
}
// update rune entries
for runeId, amount := range burned {
runeEntry, ok := runeEntries[runeId]
if !ok {
continue
}
runeEntry.BurnedAmount = runeEntry.BurnedAmount.Add(amount)
p.newRuneEntryStates[runeId] = runeEntry
}
return nil
}
func (p *Processor) countRuneEntries(ctx context.Context) (uint64, error) {
runeCountInDB, err := p.runesDg.CountRuneEntries(ctx)
if err != nil {
return 0, errors.Wrap(err, "failed to count rune entries in db")
}
return runeCountInDB + uint64(len(p.newRuneEntries)), nil
}
func (p *Processor) getRuneEntryByRuneId(ctx context.Context, runeId runes.RuneId) (*runes.RuneEntry, error) {
runeEntry, ok := p.newRuneEntryStates[runeId]
if ok {
return runeEntry, nil
}
// not checking from p.newRuneEntries since new rune entries add to p.newRuneEntryStates as well
runeEntry, err := p.runesDg.GetRuneEntryByRuneId(ctx, runeId)
if err != nil {
return nil, errors.Wrap(err, "failed to get rune entry by rune id")
}
return runeEntry, nil
}
func (p *Processor) isRuneExists(ctx context.Context, rune runes.Rune) (bool, error) {
for _, runeEntry := range p.newRuneEntries {
if runeEntry.SpacedRune.Rune == rune {
return true, nil
}
}
_, err := p.runesDg.GetRuneIdFromRune(ctx, rune)
if err != nil {
if errors.Is(err, errs.NotFound) {
return false, nil
}
return false, errors.Wrap(err, "failed to get rune id from rune")
}
return true, nil
}
func (p *Processor) getRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error) {
if outPointBalances, ok := p.newOutPointBalances[outPoint]; ok {
balances := make(map[runes.RuneId]*entity.OutPointBalance)
for _, outPointBalance := range outPointBalances {
balances[outPointBalance.RuneId] = outPointBalance
}
return balances, nil
}
balances, err := p.runesDg.GetRunesBalancesAtOutPoint(ctx, outPoint)
if err != nil {
return nil, errors.Wrap(err, "failed to get runes balances at outpoint")
}
return balances, nil
}
func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeader) error {
runesDgTx, err := p.runesDg.BeginRunesTx(ctx)
if err != nil {
return errors.Wrap(err, "failed to begin runes tx")
}
defer func() {
if err := runesDgTx.Rollback(ctx); err != nil {
logger.WarnContext(ctx, "failed to rollback transaction",
slogx.Error(err),
slogx.String("event", "rollback_runes_insertion"),
)
}
}()
// CreateIndexedBlock must be performed before other flush methods to correctly calculate event hash
eventHash, err := p.calculateEventHash(blockHeader)
if err != nil {
return errors.Wrap(err, "failed to calculate event hash")
}
prevIndexedBlock, err := runesDgTx.GetIndexedBlockByHeight(ctx, blockHeader.Height-1)
if err != nil && errors.Is(err, errs.NotFound) && blockHeader.Height-1 == startingBlockHeader[p.network].Height {
prevIndexedBlock = &entity.IndexedBlock{
Height: startingBlockHeader[p.network].Height,
Hash: startingBlockHeader[p.network].Hash,
EventHash: chainhash.Hash{},
CumulativeEventHash: chainhash.Hash{},
}
err = nil
}
if err != nil {
if errors.Is(err, errs.NotFound) {
return errors.Errorf("indexed block not found for height %d. Indexed block must be created for every Bitcoin block", blockHeader.Height)
}
return errors.Wrap(err, "failed to get indexed block by height")
}
cumulativeEventHash := chainhash.DoubleHashH(append(prevIndexedBlock.CumulativeEventHash[:], eventHash[:]...))
if err := runesDgTx.CreateIndexedBlock(ctx, &entity.IndexedBlock{
Height: blockHeader.Height,
Hash: blockHeader.Hash,
PrevHash: blockHeader.PrevBlock,
EventHash: eventHash,
CumulativeEventHash: cumulativeEventHash,
}); err != nil {
return errors.Wrap(err, "failed to create indexed block")
}
// flush new rune entries
{
for _, runeEntry := range p.newRuneEntries {
if err := runesDgTx.CreateRuneEntry(ctx, runeEntry, uint64(blockHeader.Height)); err != nil {
return errors.Wrap(err, "failed to create rune entry")
}
}
p.newRuneEntries = make(map[runes.RuneId]*runes.RuneEntry)
}
// flush new rune entry states
{
for _, runeEntry := range p.newRuneEntryStates {
if err := runesDgTx.CreateRuneEntryState(ctx, runeEntry, uint64(blockHeader.Height)); err != nil {
return errors.Wrap(err, "failed to create rune entry state")
}
}
p.newRuneEntryStates = make(map[runes.RuneId]*runes.RuneEntry)
}
// flush new outpoint balances
{
newBalances := make([]*entity.OutPointBalance, 0)
for _, balances := range p.newOutPointBalances {
newBalances = append(newBalances, balances...)
}
if err := runesDgTx.CreateOutPointBalances(ctx, newBalances); err != nil {
return errors.Wrap(err, "failed to create outpoint balances")
}
p.newOutPointBalances = make(map[wire.OutPoint][]*entity.OutPointBalance)
}
// flush new spend outpoints
{
for _, outPoint := range p.newSpendOutPoints {
if err := runesDgTx.SpendOutPointBalances(ctx, outPoint, uint64(blockHeader.Height)); err != nil {
return errors.Wrap(err, "failed to create spend outpoint")
}
}
p.newSpendOutPoints = make([]wire.OutPoint, 0)
}
// flush new balances
{
params := make([]datagateway.CreateRuneBalancesParams, 0)
for pkScriptStr, balances := range p.newBalances {
pkScript, err := hex.DecodeString(pkScriptStr)
if err != nil {
return errors.Wrap(err, "failed to decode pk script")
}
for runeId, balance := range balances {
params = append(params, datagateway.CreateRuneBalancesParams{
PkScript: pkScript,
RuneId: runeId,
Balance: balance,
BlockHeight: uint64(blockHeader.Height),
})
}
}
if err := runesDgTx.CreateRuneBalances(ctx, params); err != nil {
return errors.Wrap(err, "failed to create balances at block")
}
p.newBalances = make(map[string]map[runes.RuneId]uint128.Uint128)
}
// flush new rune transactions
{
for _, runeTx := range p.newRuneTxs {
if err := runesDgTx.CreateRuneTransaction(ctx, runeTx); err != nil {
return errors.Wrap(err, "failed to create rune transaction")
}
}
p.newRuneTxs = make([]*entity.RuneTransaction, 0)
}
if err := runesDgTx.Commit(ctx); err != nil {
return errors.Wrap(err, "failed to commit runes tx")
}
// submit event to reporting system
if p.reportingClient != nil {
if err := p.reportingClient.SubmitBlockReport(ctx, reportingclient.SubmitBlockReportPayload{
Type: "runes",
ClientVersion: Version,
DBVersion: DBVersion,
EventHashVersion: EventHashVersion,
Network: p.network,
BlockHeight: uint64(blockHeader.Height),
BlockHash: blockHeader.Hash,
EventHash: eventHash,
CumulativeEventHash: cumulativeEventHash,
}); err != nil {
return errors.Wrap(err, "failed to submit block report")
}
}
return nil
}

View File

@@ -0,0 +1,130 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: batch.go
package gen
import (
"context"
"errors"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgtype"
)
var (
ErrBatchAlreadyClosed = errors.New("batch already closed")
)
const createOutPointBalances = `-- name: CreateOutPointBalances :batchexec
INSERT INTO runes_outpoint_balances (rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height) VALUES ($1, $2, $3, $4, $5, $6, $7)
`
type CreateOutPointBalancesBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateOutPointBalancesParams struct {
RuneID string
Pkscript string
TxHash string
TxIdx int32
Amount pgtype.Numeric
BlockHeight int32
SpentHeight pgtype.Int4
}
func (q *Queries) CreateOutPointBalances(ctx context.Context, arg []CreateOutPointBalancesParams) *CreateOutPointBalancesBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.RuneID,
a.Pkscript,
a.TxHash,
a.TxIdx,
a.Amount,
a.BlockHeight,
a.SpentHeight,
}
batch.Queue(createOutPointBalances, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateOutPointBalancesBatchResults{br, len(arg), false}
}
func (b *CreateOutPointBalancesBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateOutPointBalancesBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createRuneBalanceAtBlock = `-- name: CreateRuneBalanceAtBlock :batchexec
INSERT INTO runes_balances (pkscript, block_height, rune_id, amount) VALUES ($1, $2, $3, $4)
`
type CreateRuneBalanceAtBlockBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateRuneBalanceAtBlockParams struct {
Pkscript string
BlockHeight int32
RuneID string
Amount pgtype.Numeric
}
func (q *Queries) CreateRuneBalanceAtBlock(ctx context.Context, arg []CreateRuneBalanceAtBlockParams) *CreateRuneBalanceAtBlockBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Pkscript,
a.BlockHeight,
a.RuneID,
a.Amount,
}
batch.Queue(createRuneBalanceAtBlock, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateRuneBalanceAtBlockBatchResults{br, len(arg), false}
}
func (b *CreateRuneBalanceAtBlockBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateRuneBalanceAtBlockBatchResults) Close() error {
b.closed = true
return b.br.Close()
}

View File

@@ -0,0 +1,816 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: data.sql
package gen
import (
"context"
"github.com/jackc/pgx/v5/pgtype"
)
const countRuneEntries = `-- name: CountRuneEntries :one
SELECT COUNT(*) FROM runes_entries
`
func (q *Queries) CountRuneEntries(ctx context.Context) (int64, error) {
row := q.db.QueryRow(ctx, countRuneEntries)
var count int64
err := row.Scan(&count)
return count, err
}
const createIndexedBlock = `-- name: CreateIndexedBlock :exec
INSERT INTO runes_indexed_blocks (hash, height, prev_hash, event_hash, cumulative_event_hash) VALUES ($1, $2, $3, $4, $5)
`
type CreateIndexedBlockParams struct {
Hash string
Height int32
PrevHash string
EventHash string
CumulativeEventHash string
}
func (q *Queries) CreateIndexedBlock(ctx context.Context, arg CreateIndexedBlockParams) error {
_, err := q.db.Exec(ctx, createIndexedBlock,
arg.Hash,
arg.Height,
arg.PrevHash,
arg.EventHash,
arg.CumulativeEventHash,
)
return err
}
const createRuneEntry = `-- name: CreateRuneEntry :exec
INSERT INTO runes_entries (rune_id, rune, number, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18)
`
type CreateRuneEntryParams struct {
RuneID string
Rune string
Number int64
Spacers int32
Premine pgtype.Numeric
Symbol int32
Divisibility int16
Terms bool
TermsAmount pgtype.Numeric
TermsCap pgtype.Numeric
TermsHeightStart pgtype.Int4
TermsHeightEnd pgtype.Int4
TermsOffsetStart pgtype.Int4
TermsOffsetEnd pgtype.Int4
Turbo bool
EtchingBlock int32
EtchingTxHash string
EtchedAt pgtype.Timestamp
}
func (q *Queries) CreateRuneEntry(ctx context.Context, arg CreateRuneEntryParams) error {
_, err := q.db.Exec(ctx, createRuneEntry,
arg.RuneID,
arg.Rune,
arg.Number,
arg.Spacers,
arg.Premine,
arg.Symbol,
arg.Divisibility,
arg.Terms,
arg.TermsAmount,
arg.TermsCap,
arg.TermsHeightStart,
arg.TermsHeightEnd,
arg.TermsOffsetStart,
arg.TermsOffsetEnd,
arg.Turbo,
arg.EtchingBlock,
arg.EtchingTxHash,
arg.EtchedAt,
)
return err
}
const createRuneEntryState = `-- name: CreateRuneEntryState :exec
INSERT INTO runes_entry_states (rune_id, block_height, mints, burned_amount, completed_at, completed_at_height) VALUES ($1, $2, $3, $4, $5, $6)
`
type CreateRuneEntryStateParams struct {
RuneID string
BlockHeight int32
Mints pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}
func (q *Queries) CreateRuneEntryState(ctx context.Context, arg CreateRuneEntryStateParams) error {
_, err := q.db.Exec(ctx, createRuneEntryState,
arg.RuneID,
arg.BlockHeight,
arg.Mints,
arg.BurnedAmount,
arg.CompletedAt,
arg.CompletedAtHeight,
)
return err
}
const createRuneTransaction = `-- name: CreateRuneTransaction :exec
INSERT INTO runes_transactions (hash, block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
`
type CreateRuneTransactionParams struct {
Hash string
BlockHeight int32
Index int32
Timestamp pgtype.Timestamp
Inputs []byte
Outputs []byte
Mints []byte
Burns []byte
RuneEtched bool
}
func (q *Queries) CreateRuneTransaction(ctx context.Context, arg CreateRuneTransactionParams) error {
_, err := q.db.Exec(ctx, createRuneTransaction,
arg.Hash,
arg.BlockHeight,
arg.Index,
arg.Timestamp,
arg.Inputs,
arg.Outputs,
arg.Mints,
arg.Burns,
arg.RuneEtched,
)
return err
}
const createRunestone = `-- name: CreateRunestone :exec
INSERT INTO runes_runestones (tx_hash, block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21)
`
type CreateRunestoneParams struct {
TxHash string
BlockHeight int32
Etching bool
EtchingDivisibility pgtype.Int2
EtchingPremine pgtype.Numeric
EtchingRune pgtype.Text
EtchingSpacers pgtype.Int4
EtchingSymbol pgtype.Int4
EtchingTerms pgtype.Bool
EtchingTermsAmount pgtype.Numeric
EtchingTermsCap pgtype.Numeric
EtchingTermsHeightStart pgtype.Int4
EtchingTermsHeightEnd pgtype.Int4
EtchingTermsOffsetStart pgtype.Int4
EtchingTermsOffsetEnd pgtype.Int4
EtchingTurbo pgtype.Bool
Edicts []byte
Mint pgtype.Text
Pointer pgtype.Int4
Cenotaph bool
Flaws int32
}
func (q *Queries) CreateRunestone(ctx context.Context, arg CreateRunestoneParams) error {
_, err := q.db.Exec(ctx, createRunestone,
arg.TxHash,
arg.BlockHeight,
arg.Etching,
arg.EtchingDivisibility,
arg.EtchingPremine,
arg.EtchingRune,
arg.EtchingSpacers,
arg.EtchingSymbol,
arg.EtchingTerms,
arg.EtchingTermsAmount,
arg.EtchingTermsCap,
arg.EtchingTermsHeightStart,
arg.EtchingTermsHeightEnd,
arg.EtchingTermsOffsetStart,
arg.EtchingTermsOffsetEnd,
arg.EtchingTurbo,
arg.Edicts,
arg.Mint,
arg.Pointer,
arg.Cenotaph,
arg.Flaws,
)
return err
}
const deleteIndexedBlockSinceHeight = `-- name: DeleteIndexedBlockSinceHeight :exec
DELETE FROM runes_indexed_blocks WHERE height >= $1
`
func (q *Queries) DeleteIndexedBlockSinceHeight(ctx context.Context, height int32) error {
_, err := q.db.Exec(ctx, deleteIndexedBlockSinceHeight, height)
return err
}
const deleteOutPointBalancesSinceHeight = `-- name: DeleteOutPointBalancesSinceHeight :exec
DELETE FROM runes_outpoint_balances WHERE block_height >= $1
`
func (q *Queries) DeleteOutPointBalancesSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteOutPointBalancesSinceHeight, blockHeight)
return err
}
const deleteRuneBalancesSinceHeight = `-- name: DeleteRuneBalancesSinceHeight :exec
DELETE FROM runes_balances WHERE block_height >= $1
`
func (q *Queries) DeleteRuneBalancesSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteRuneBalancesSinceHeight, blockHeight)
return err
}
const deleteRuneEntriesSinceHeight = `-- name: DeleteRuneEntriesSinceHeight :exec
DELETE FROM runes_entries WHERE etching_block >= $1
`
func (q *Queries) DeleteRuneEntriesSinceHeight(ctx context.Context, etchingBlock int32) error {
_, err := q.db.Exec(ctx, deleteRuneEntriesSinceHeight, etchingBlock)
return err
}
const deleteRuneEntryStatesSinceHeight = `-- name: DeleteRuneEntryStatesSinceHeight :exec
DELETE FROM runes_entry_states WHERE block_height >= $1
`
func (q *Queries) DeleteRuneEntryStatesSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteRuneEntryStatesSinceHeight, blockHeight)
return err
}
const deleteRuneTransactionsSinceHeight = `-- name: DeleteRuneTransactionsSinceHeight :exec
DELETE FROM runes_transactions WHERE block_height >= $1
`
func (q *Queries) DeleteRuneTransactionsSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteRuneTransactionsSinceHeight, blockHeight)
return err
}
const deleteRunestonesSinceHeight = `-- name: DeleteRunestonesSinceHeight :exec
DELETE FROM runes_runestones WHERE block_height >= $1
`
func (q *Queries) DeleteRunestonesSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteRunestonesSinceHeight, blockHeight)
return err
}
const getBalanceByPkScriptAndRuneId = `-- name: GetBalanceByPkScriptAndRuneId :one
SELECT pkscript, block_height, rune_id, amount FROM runes_balances WHERE pkscript = $1 AND rune_id = $2 AND block_height <= $3 ORDER BY block_height DESC LIMIT 1
`
type GetBalanceByPkScriptAndRuneIdParams struct {
Pkscript string
RuneID string
BlockHeight int32
}
func (q *Queries) GetBalanceByPkScriptAndRuneId(ctx context.Context, arg GetBalanceByPkScriptAndRuneIdParams) (RunesBalance, error) {
row := q.db.QueryRow(ctx, getBalanceByPkScriptAndRuneId, arg.Pkscript, arg.RuneID, arg.BlockHeight)
var i RunesBalance
err := row.Scan(
&i.Pkscript,
&i.BlockHeight,
&i.RuneID,
&i.Amount,
)
return i, err
}
const getBalancesByPkScript = `-- name: GetBalancesByPkScript :many
WITH balances AS (
SELECT DISTINCT ON (rune_id) pkscript, block_height, rune_id, amount FROM runes_balances WHERE pkscript = $1 AND block_height <= $2 ORDER BY rune_id, block_height DESC
)
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0
`
type GetBalancesByPkScriptParams struct {
Pkscript string
BlockHeight int32
}
type GetBalancesByPkScriptRow struct {
Pkscript string
BlockHeight int32
RuneID string
Amount pgtype.Numeric
}
func (q *Queries) GetBalancesByPkScript(ctx context.Context, arg GetBalancesByPkScriptParams) ([]GetBalancesByPkScriptRow, error) {
rows, err := q.db.Query(ctx, getBalancesByPkScript, arg.Pkscript, arg.BlockHeight)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetBalancesByPkScriptRow
for rows.Next() {
var i GetBalancesByPkScriptRow
if err := rows.Scan(
&i.Pkscript,
&i.BlockHeight,
&i.RuneID,
&i.Amount,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getBalancesByRuneId = `-- name: GetBalancesByRuneId :many
WITH balances AS (
SELECT DISTINCT ON (pkscript) pkscript, block_height, rune_id, amount FROM runes_balances WHERE rune_id = $1 AND block_height <= $2 ORDER BY pkscript, block_height DESC
)
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0
`
type GetBalancesByRuneIdParams struct {
RuneID string
BlockHeight int32
}
type GetBalancesByRuneIdRow struct {
Pkscript string
BlockHeight int32
RuneID string
Amount pgtype.Numeric
}
func (q *Queries) GetBalancesByRuneId(ctx context.Context, arg GetBalancesByRuneIdParams) ([]GetBalancesByRuneIdRow, error) {
rows, err := q.db.Query(ctx, getBalancesByRuneId, arg.RuneID, arg.BlockHeight)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetBalancesByRuneIdRow
for rows.Next() {
var i GetBalancesByRuneIdRow
if err := rows.Scan(
&i.Pkscript,
&i.BlockHeight,
&i.RuneID,
&i.Amount,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getIndexedBlockByHeight = `-- name: GetIndexedBlockByHeight :one
SELECT height, hash, prev_hash, event_hash, cumulative_event_hash FROM runes_indexed_blocks WHERE height = $1
`
func (q *Queries) GetIndexedBlockByHeight(ctx context.Context, height int32) (RunesIndexedBlock, error) {
row := q.db.QueryRow(ctx, getIndexedBlockByHeight, height)
var i RunesIndexedBlock
err := row.Scan(
&i.Height,
&i.Hash,
&i.PrevHash,
&i.EventHash,
&i.CumulativeEventHash,
)
return i, err
}
const getLatestIndexedBlock = `-- name: GetLatestIndexedBlock :one
SELECT height, hash, prev_hash, event_hash, cumulative_event_hash FROM runes_indexed_blocks ORDER BY height DESC LIMIT 1
`
func (q *Queries) GetLatestIndexedBlock(ctx context.Context) (RunesIndexedBlock, error) {
row := q.db.QueryRow(ctx, getLatestIndexedBlock)
var i RunesIndexedBlock
err := row.Scan(
&i.Height,
&i.Hash,
&i.PrevHash,
&i.EventHash,
&i.CumulativeEventHash,
)
return i, err
}
const getOutPointBalancesAtOutPoint = `-- name: GetOutPointBalancesAtOutPoint :many
SELECT rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height FROM runes_outpoint_balances WHERE tx_hash = $1 AND tx_idx = $2
`
type GetOutPointBalancesAtOutPointParams struct {
TxHash string
TxIdx int32
}
func (q *Queries) GetOutPointBalancesAtOutPoint(ctx context.Context, arg GetOutPointBalancesAtOutPointParams) ([]RunesOutpointBalance, error) {
rows, err := q.db.Query(ctx, getOutPointBalancesAtOutPoint, arg.TxHash, arg.TxIdx)
if err != nil {
return nil, err
}
defer rows.Close()
var items []RunesOutpointBalance
for rows.Next() {
var i RunesOutpointBalance
if err := rows.Scan(
&i.RuneID,
&i.Pkscript,
&i.TxHash,
&i.TxIdx,
&i.Amount,
&i.BlockHeight,
&i.SpentHeight,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getRuneEntriesByRuneIds = `-- name: GetRuneEntriesByRuneIds :many
WITH states AS (
-- select latest state
SELECT DISTINCT ON (rune_id) rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entry_states WHERE rune_id = ANY($1::text[]) ORDER BY rune_id, block_height DESC
)
SELECT runes_entries.rune_id, number, rune, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at, states.rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entries
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
WHERE runes_entries.rune_id = ANY($1::text[])
`
type GetRuneEntriesByRuneIdsRow struct {
RuneID string
Number int64
Rune string
Spacers int32
Premine pgtype.Numeric
Symbol int32
Divisibility int16
Terms bool
TermsAmount pgtype.Numeric
TermsCap pgtype.Numeric
TermsHeightStart pgtype.Int4
TermsHeightEnd pgtype.Int4
TermsOffsetStart pgtype.Int4
TermsOffsetEnd pgtype.Int4
Turbo bool
EtchingBlock int32
EtchingTxHash string
EtchedAt pgtype.Timestamp
RuneID_2 pgtype.Text
BlockHeight pgtype.Int4
Mints pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}
func (q *Queries) GetRuneEntriesByRuneIds(ctx context.Context, runeIds []string) ([]GetRuneEntriesByRuneIdsRow, error) {
rows, err := q.db.Query(ctx, getRuneEntriesByRuneIds, runeIds)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetRuneEntriesByRuneIdsRow
for rows.Next() {
var i GetRuneEntriesByRuneIdsRow
if err := rows.Scan(
&i.RuneID,
&i.Number,
&i.Rune,
&i.Spacers,
&i.Premine,
&i.Symbol,
&i.Divisibility,
&i.Terms,
&i.TermsAmount,
&i.TermsCap,
&i.TermsHeightStart,
&i.TermsHeightEnd,
&i.TermsOffsetStart,
&i.TermsOffsetEnd,
&i.Turbo,
&i.EtchingBlock,
&i.EtchingTxHash,
&i.EtchedAt,
&i.RuneID_2,
&i.BlockHeight,
&i.Mints,
&i.BurnedAmount,
&i.CompletedAt,
&i.CompletedAtHeight,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getRuneEntriesByRuneIdsAndHeight = `-- name: GetRuneEntriesByRuneIdsAndHeight :many
WITH states AS (
-- select latest state
SELECT DISTINCT ON (rune_id) rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entry_states WHERE rune_id = ANY($1::text[]) AND block_height <= $2 ORDER BY rune_id, block_height DESC
)
SELECT runes_entries.rune_id, number, rune, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at, states.rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entries
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
WHERE runes_entries.rune_id = ANY($1::text[]) AND etching_block <= $2
`
type GetRuneEntriesByRuneIdsAndHeightParams struct {
RuneIds []string
Height int32
}
type GetRuneEntriesByRuneIdsAndHeightRow struct {
RuneID string
Number int64
Rune string
Spacers int32
Premine pgtype.Numeric
Symbol int32
Divisibility int16
Terms bool
TermsAmount pgtype.Numeric
TermsCap pgtype.Numeric
TermsHeightStart pgtype.Int4
TermsHeightEnd pgtype.Int4
TermsOffsetStart pgtype.Int4
TermsOffsetEnd pgtype.Int4
Turbo bool
EtchingBlock int32
EtchingTxHash string
EtchedAt pgtype.Timestamp
RuneID_2 pgtype.Text
BlockHeight pgtype.Int4
Mints pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}
func (q *Queries) GetRuneEntriesByRuneIdsAndHeight(ctx context.Context, arg GetRuneEntriesByRuneIdsAndHeightParams) ([]GetRuneEntriesByRuneIdsAndHeightRow, error) {
rows, err := q.db.Query(ctx, getRuneEntriesByRuneIdsAndHeight, arg.RuneIds, arg.Height)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetRuneEntriesByRuneIdsAndHeightRow
for rows.Next() {
var i GetRuneEntriesByRuneIdsAndHeightRow
if err := rows.Scan(
&i.RuneID,
&i.Number,
&i.Rune,
&i.Spacers,
&i.Premine,
&i.Symbol,
&i.Divisibility,
&i.Terms,
&i.TermsAmount,
&i.TermsCap,
&i.TermsHeightStart,
&i.TermsHeightEnd,
&i.TermsOffsetStart,
&i.TermsOffsetEnd,
&i.Turbo,
&i.EtchingBlock,
&i.EtchingTxHash,
&i.EtchedAt,
&i.RuneID_2,
&i.BlockHeight,
&i.Mints,
&i.BurnedAmount,
&i.CompletedAt,
&i.CompletedAtHeight,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getRuneIdFromRune = `-- name: GetRuneIdFromRune :one
SELECT rune_id FROM runes_entries WHERE rune = $1
`
func (q *Queries) GetRuneIdFromRune(ctx context.Context, rune string) (string, error) {
row := q.db.QueryRow(ctx, getRuneIdFromRune, rune)
var rune_id string
err := row.Scan(&rune_id)
return rune_id, err
}
const getRuneTransactions = `-- name: GetRuneTransactions :many
SELECT hash, runes_transactions.block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched, tx_hash, runes_runestones.block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws FROM runes_transactions
LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash
WHERE (
$1::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
OR runes_transactions.outputs @> $2::JSONB
OR runes_transactions.inputs @> $2::JSONB
) AND (
$3::BOOLEAN = FALSE -- if @filter_rune_id is TRUE, apply rune_id filter
OR runes_transactions.outputs @> $4::JSONB
OR runes_transactions.inputs @> $4::JSONB
OR runes_transactions.mints ? $5
OR runes_transactions.burns ? $5
OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = $6 AND runes_transactions.index = $7)
) AND (
$8::INT = 0 OR runes_transactions.block_height = $8::INT -- if @block_height > 0, apply block_height filter
)
`
type GetRuneTransactionsParams struct {
FilterPkScript bool
PkScriptParam []byte
FilterRuneID bool
RuneIDParam []byte
RuneID []byte
RuneIDBlockHeight int32
RuneIDTxIndex int32
BlockHeight int32
}
type GetRuneTransactionsRow struct {
Hash string
BlockHeight int32
Index int32
Timestamp pgtype.Timestamp
Inputs []byte
Outputs []byte
Mints []byte
Burns []byte
RuneEtched bool
TxHash pgtype.Text
BlockHeight_2 pgtype.Int4
Etching pgtype.Bool
EtchingDivisibility pgtype.Int2
EtchingPremine pgtype.Numeric
EtchingRune pgtype.Text
EtchingSpacers pgtype.Int4
EtchingSymbol pgtype.Int4
EtchingTerms pgtype.Bool
EtchingTermsAmount pgtype.Numeric
EtchingTermsCap pgtype.Numeric
EtchingTermsHeightStart pgtype.Int4
EtchingTermsHeightEnd pgtype.Int4
EtchingTermsOffsetStart pgtype.Int4
EtchingTermsOffsetEnd pgtype.Int4
EtchingTurbo pgtype.Bool
Edicts []byte
Mint pgtype.Text
Pointer pgtype.Int4
Cenotaph pgtype.Bool
Flaws pgtype.Int4
}
func (q *Queries) GetRuneTransactions(ctx context.Context, arg GetRuneTransactionsParams) ([]GetRuneTransactionsRow, error) {
rows, err := q.db.Query(ctx, getRuneTransactions,
arg.FilterPkScript,
arg.PkScriptParam,
arg.FilterRuneID,
arg.RuneIDParam,
arg.RuneID,
arg.RuneIDBlockHeight,
arg.RuneIDTxIndex,
arg.BlockHeight,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetRuneTransactionsRow
for rows.Next() {
var i GetRuneTransactionsRow
if err := rows.Scan(
&i.Hash,
&i.BlockHeight,
&i.Index,
&i.Timestamp,
&i.Inputs,
&i.Outputs,
&i.Mints,
&i.Burns,
&i.RuneEtched,
&i.TxHash,
&i.BlockHeight_2,
&i.Etching,
&i.EtchingDivisibility,
&i.EtchingPremine,
&i.EtchingRune,
&i.EtchingSpacers,
&i.EtchingSymbol,
&i.EtchingTerms,
&i.EtchingTermsAmount,
&i.EtchingTermsCap,
&i.EtchingTermsHeightStart,
&i.EtchingTermsHeightEnd,
&i.EtchingTermsOffsetStart,
&i.EtchingTermsOffsetEnd,
&i.EtchingTurbo,
&i.Edicts,
&i.Mint,
&i.Pointer,
&i.Cenotaph,
&i.Flaws,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getUnspentOutPointBalancesByPkScript = `-- name: GetUnspentOutPointBalancesByPkScript :many
SELECT rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height FROM runes_outpoint_balances WHERE pkscript = $1 AND block_height <= $2 AND (spent_height IS NULL OR spent_height > $2)
`
type GetUnspentOutPointBalancesByPkScriptParams struct {
Pkscript string
BlockHeight int32
}
func (q *Queries) GetUnspentOutPointBalancesByPkScript(ctx context.Context, arg GetUnspentOutPointBalancesByPkScriptParams) ([]RunesOutpointBalance, error) {
rows, err := q.db.Query(ctx, getUnspentOutPointBalancesByPkScript, arg.Pkscript, arg.BlockHeight)
if err != nil {
return nil, err
}
defer rows.Close()
var items []RunesOutpointBalance
for rows.Next() {
var i RunesOutpointBalance
if err := rows.Scan(
&i.RuneID,
&i.Pkscript,
&i.TxHash,
&i.TxIdx,
&i.Amount,
&i.BlockHeight,
&i.SpentHeight,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const spendOutPointBalances = `-- name: SpendOutPointBalances :exec
UPDATE runes_outpoint_balances SET spent_height = $1 WHERE tx_hash = $2 AND tx_idx = $3
`
type SpendOutPointBalancesParams struct {
SpentHeight pgtype.Int4
TxHash string
TxIdx int32
}
func (q *Queries) SpendOutPointBalances(ctx context.Context, arg SpendOutPointBalancesParams) error {
_, err := q.db.Exec(ctx, spendOutPointBalances, arg.SpentHeight, arg.TxHash, arg.TxIdx)
return err
}
const unspendOutPointBalancesSinceHeight = `-- name: UnspendOutPointBalancesSinceHeight :exec
UPDATE runes_outpoint_balances SET spent_height = NULL WHERE spent_height >= $1
`
func (q *Queries) UnspendOutPointBalancesSinceHeight(ctx context.Context, spentHeight pgtype.Int4) error {
_, err := q.db.Exec(ctx, unspendOutPointBalancesSinceHeight, spentHeight)
return err
}

View File

@@ -0,0 +1,33 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
package gen
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
)
type DBTX interface {
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
QueryRow(context.Context, string, ...interface{}) pgx.Row
SendBatch(context.Context, *pgx.Batch) pgx.BatchResults
}
func New(db DBTX) *Queries {
return &Queries{db: db}
}
type Queries struct {
db DBTX
}
func (q *Queries) WithTx(tx pgx.Tx) *Queries {
return &Queries{
db: tx,
}
}

View File

@@ -0,0 +1,70 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: info.sql
package gen
import (
"context"
)
const getLatestIndexerState = `-- name: GetLatestIndexerState :one
SELECT id, db_version, event_hash_version, created_at FROM runes_indexer_state ORDER BY created_at DESC LIMIT 1
`
func (q *Queries) GetLatestIndexerState(ctx context.Context) (RunesIndexerState, error) {
row := q.db.QueryRow(ctx, getLatestIndexerState)
var i RunesIndexerState
err := row.Scan(
&i.Id,
&i.DbVersion,
&i.EventHashVersion,
&i.CreatedAt,
)
return i, err
}
const getLatestIndexerStats = `-- name: GetLatestIndexerStats :one
SELECT "client_version", "network" FROM runes_indexer_stats ORDER BY id DESC LIMIT 1
`
type GetLatestIndexerStatsRow struct {
ClientVersion string
Network string
}
func (q *Queries) GetLatestIndexerStats(ctx context.Context) (GetLatestIndexerStatsRow, error) {
row := q.db.QueryRow(ctx, getLatestIndexerStats)
var i GetLatestIndexerStatsRow
err := row.Scan(&i.ClientVersion, &i.Network)
return i, err
}
const setIndexerState = `-- name: SetIndexerState :exec
INSERT INTO runes_indexer_state (db_version, event_hash_version) VALUES ($1, $2)
`
type SetIndexerStateParams struct {
DbVersion int32
EventHashVersion int32
}
func (q *Queries) SetIndexerState(ctx context.Context, arg SetIndexerStateParams) error {
_, err := q.db.Exec(ctx, setIndexerState, arg.DbVersion, arg.EventHashVersion)
return err
}
const updateIndexerStats = `-- name: UpdateIndexerStats :exec
INSERT INTO runes_indexer_stats (client_version, network) VALUES ($1, $2)
`
type UpdateIndexerStatsParams struct {
ClientVersion string
Network string
}
func (q *Queries) UpdateIndexerStats(ctx context.Context, arg UpdateIndexerStatsParams) error {
_, err := q.db.Exec(ctx, updateIndexerStats, arg.ClientVersion, arg.Network)
return err
}

View File

@@ -0,0 +1,114 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
package gen
import (
"github.com/jackc/pgx/v5/pgtype"
)
type RunesBalance struct {
Pkscript string
BlockHeight int32
RuneID string
Amount pgtype.Numeric
}
type RunesEntry struct {
RuneID string
Number int64
Rune string
Spacers int32
Premine pgtype.Numeric
Symbol int32
Divisibility int16
Terms bool
TermsAmount pgtype.Numeric
TermsCap pgtype.Numeric
TermsHeightStart pgtype.Int4
TermsHeightEnd pgtype.Int4
TermsOffsetStart pgtype.Int4
TermsOffsetEnd pgtype.Int4
Turbo bool
EtchingBlock int32
EtchingTxHash string
EtchedAt pgtype.Timestamp
}
type RunesEntryState struct {
RuneID string
BlockHeight int32
Mints pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}
type RunesIndexedBlock struct {
Height int32
Hash string
PrevHash string
EventHash string
CumulativeEventHash string
}
type RunesIndexerStat struct {
Id int64
ClientVersion string
Network string
CreatedAt pgtype.Timestamptz
}
type RunesIndexerState struct {
Id int64
DbVersion int32
EventHashVersion int32
CreatedAt pgtype.Timestamptz
}
type RunesOutpointBalance struct {
RuneID string
Pkscript string
TxHash string
TxIdx int32
Amount pgtype.Numeric
BlockHeight int32
SpentHeight pgtype.Int4
}
type RunesRunestone struct {
TxHash string
BlockHeight int32
Etching bool
EtchingDivisibility pgtype.Int2
EtchingPremine pgtype.Numeric
EtchingRune pgtype.Text
EtchingSpacers pgtype.Int4
EtchingSymbol pgtype.Int4
EtchingTerms pgtype.Bool
EtchingTermsAmount pgtype.Numeric
EtchingTermsCap pgtype.Numeric
EtchingTermsHeightStart pgtype.Int4
EtchingTermsHeightEnd pgtype.Int4
EtchingTermsOffsetStart pgtype.Int4
EtchingTermsOffsetEnd pgtype.Int4
EtchingTurbo pgtype.Bool
Edicts []byte
Mint pgtype.Text
Pointer pgtype.Int4
Cenotaph bool
Flaws int32
}
type RunesTransaction struct {
Hash string
BlockHeight int32
Index int32
Timestamp pgtype.Timestamp
Inputs []byte
Outputs []byte
Mints []byte
Burns []byte
RuneEtched bool
}

View File

@@ -0,0 +1,56 @@
package postgres
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/repository/postgres/gen"
"github.com/jackc/pgx/v5"
)
var _ datagateway.IndexerInfoDataGateway = (*Repository)(nil)
func (r *Repository) GetLatestIndexerState(ctx context.Context) (entity.IndexerState, error) {
indexerStateModel, err := r.queries.GetLatestIndexerState(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return entity.IndexerState{}, errors.WithStack(errs.NotFound)
}
return entity.IndexerState{}, errors.Wrap(err, "error during query")
}
indexerState := mapIndexerStateModelToType(indexerStateModel)
return indexerState, nil
}
func (r *Repository) GetLatestIndexerStats(ctx context.Context) (string, common.Network, error) {
stats, err := r.queries.GetLatestIndexerStats(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return "", "", errors.WithStack(errs.NotFound)
}
return "", "", errors.Wrap(err, "error during query")
}
return stats.ClientVersion, common.Network(stats.Network), nil
}
func (r *Repository) SetIndexerState(ctx context.Context, state entity.IndexerState) error {
params := mapIndexerStateTypeToParams(state)
if err := r.queries.SetIndexerState(ctx, params); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) UpdateIndexerStats(ctx context.Context, clientVersion string, network common.Network) error {
if err := r.queries.UpdateIndexerStats(ctx, gen.UpdateIndexerStatsParams{
ClientVersion: clientVersion,
Network: string(network),
}); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}

View File

@@ -0,0 +1,693 @@
package postgres
import (
"encoding/hex"
"encoding/json"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/repository/postgres/gen"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/jackc/pgx/v5/pgtype"
"github.com/samber/lo"
)
func uint128FromNumeric(src pgtype.Numeric) (*uint128.Uint128, error) {
if !src.Valid {
return nil, nil
}
bytes, err := src.MarshalJSON()
if err != nil {
return nil, errors.WithStack(err)
}
result, err := uint128.FromString(string(bytes))
if err != nil {
return nil, errors.WithStack(err)
}
return &result, nil
}
func numericFromUint128(src *uint128.Uint128) (pgtype.Numeric, error) {
if src == nil {
return pgtype.Numeric{}, nil
}
bytes := []byte(src.String())
var result pgtype.Numeric
err := result.UnmarshalJSON(bytes)
if err != nil {
return pgtype.Numeric{}, errors.WithStack(err)
}
return result, nil
}
func mapIndexerStateModelToType(src gen.RunesIndexerState) entity.IndexerState {
var createdAt time.Time
if src.CreatedAt.Valid {
createdAt = src.CreatedAt.Time
}
return entity.IndexerState{
DBVersion: src.DbVersion,
EventHashVersion: src.EventHashVersion,
CreatedAt: createdAt,
}
}
func mapIndexerStateTypeToParams(src entity.IndexerState) gen.SetIndexerStateParams {
return gen.SetIndexerStateParams{
DbVersion: src.DBVersion,
EventHashVersion: src.EventHashVersion,
}
}
func mapRuneEntryModelToType(src gen.GetRuneEntriesByRuneIdsRow) (runes.RuneEntry, error) {
runeId, err := runes.NewRuneIdFromString(src.RuneID)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse rune id")
}
burnedAmount, err := uint128FromNumeric(src.BurnedAmount)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse burned amount")
}
rune, err := runes.NewRuneFromString(src.Rune)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse rune")
}
mints, err := uint128FromNumeric(src.Mints)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse mints")
}
premine, err := uint128FromNumeric(src.Premine)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse premine")
}
var completedAt time.Time
if src.CompletedAt.Valid {
completedAt = src.CompletedAt.Time
}
var completedAtHeight *uint64
if src.CompletedAtHeight.Valid {
completedAtHeight = lo.ToPtr(uint64(src.CompletedAtHeight.Int32))
}
var terms *runes.Terms
if src.Terms {
terms = &runes.Terms{}
if src.TermsAmount.Valid {
amount, err := uint128FromNumeric(src.TermsAmount)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse terms amount")
}
terms.Amount = amount
}
if src.TermsCap.Valid {
cap, err := uint128FromNumeric(src.TermsCap)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse terms cap")
}
terms.Cap = cap
}
if src.TermsHeightStart.Valid {
heightStart := uint64(src.TermsHeightStart.Int32)
terms.HeightStart = &heightStart
}
if src.TermsHeightEnd.Valid {
heightEnd := uint64(src.TermsHeightEnd.Int32)
terms.HeightEnd = &heightEnd
}
if src.TermsOffsetStart.Valid {
offsetStart := uint64(src.TermsOffsetStart.Int32)
terms.OffsetStart = &offsetStart
}
if src.TermsOffsetEnd.Valid {
offsetEnd := uint64(src.TermsOffsetEnd.Int32)
terms.OffsetEnd = &offsetEnd
}
}
etchingTxHash, err := chainhash.NewHashFromStr(src.EtchingTxHash)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse etching tx hash")
}
var etchedAt time.Time
if src.EtchedAt.Valid {
etchedAt = src.EtchedAt.Time
}
return runes.RuneEntry{
RuneId: runeId,
Number: uint64(src.Number),
Divisibility: uint8(src.Divisibility),
Premine: lo.FromPtr(premine),
SpacedRune: runes.NewSpacedRune(rune, uint32(src.Spacers)),
Symbol: src.Symbol,
Terms: terms,
Turbo: src.Turbo,
Mints: lo.FromPtr(mints),
BurnedAmount: lo.FromPtr(burnedAmount),
CompletedAt: completedAt,
CompletedAtHeight: completedAtHeight,
EtchingBlock: uint64(src.EtchingBlock),
EtchingTxHash: *etchingTxHash,
EtchedAt: etchedAt,
}, nil
}
func mapRuneEntryTypeToParams(src runes.RuneEntry, blockHeight uint64) (gen.CreateRuneEntryParams, gen.CreateRuneEntryStateParams, error) {
runeId := src.RuneId.String()
rune := src.SpacedRune.Rune.String()
spacers := int32(src.SpacedRune.Spacers)
mints, err := numericFromUint128(&src.Mints)
if err != nil {
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse mints")
}
burnedAmount, err := numericFromUint128(&src.BurnedAmount)
if err != nil {
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse burned amount")
}
premine, err := numericFromUint128(&src.Premine)
if err != nil {
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse premine")
}
var completedAt pgtype.Timestamp
if !src.CompletedAt.IsZero() {
completedAt.Time = src.CompletedAt
completedAt.Valid = true
}
var completedAtHeight pgtype.Int4
if src.CompletedAtHeight != nil {
completedAtHeight.Int32 = int32(*src.CompletedAtHeight)
completedAtHeight.Valid = true
}
var terms bool
var termsAmount, termsCap pgtype.Numeric
var termsHeightStart, termsHeightEnd, termsOffsetStart, termsOffsetEnd pgtype.Int4
if src.Terms != nil {
terms = true
if src.Terms.Amount != nil {
termsAmount, err = numericFromUint128(src.Terms.Amount)
if err != nil {
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse terms amount")
}
}
if src.Terms.Cap != nil {
termsCap, err = numericFromUint128(src.Terms.Cap)
if err != nil {
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse terms cap")
}
}
if src.Terms.HeightStart != nil {
termsHeightStart = pgtype.Int4{
Int32: int32(*src.Terms.HeightStart),
Valid: true,
}
}
if src.Terms.HeightEnd != nil {
termsHeightEnd = pgtype.Int4{
Int32: int32(*src.Terms.HeightEnd),
Valid: true,
}
}
if src.Terms.OffsetStart != nil {
termsOffsetStart = pgtype.Int4{
Int32: int32(*src.Terms.OffsetStart),
Valid: true,
}
}
if src.Terms.OffsetEnd != nil {
termsOffsetEnd = pgtype.Int4{
Int32: int32(*src.Terms.OffsetEnd),
Valid: true,
}
}
}
etchedAt := pgtype.Timestamp{Time: time.Time{}, Valid: true}
return gen.CreateRuneEntryParams{
RuneID: runeId,
Rune: rune,
Number: int64(src.Number),
Spacers: spacers,
Premine: premine,
Symbol: src.Symbol,
Divisibility: int16(src.Divisibility),
Terms: terms,
TermsAmount: termsAmount,
TermsCap: termsCap,
TermsHeightStart: termsHeightStart,
TermsHeightEnd: termsHeightEnd,
TermsOffsetStart: termsOffsetStart,
TermsOffsetEnd: termsOffsetEnd,
Turbo: src.Turbo,
EtchingBlock: int32(src.EtchingBlock),
EtchingTxHash: src.EtchingTxHash.String(),
EtchedAt: etchedAt,
}, gen.CreateRuneEntryStateParams{
BlockHeight: int32(blockHeight),
RuneID: runeId,
Mints: mints,
BurnedAmount: burnedAmount,
CompletedAt: completedAt,
CompletedAtHeight: completedAtHeight,
}, nil
}
// mapRuneTransactionModelToType returns params for creating a new rune transaction and (optionally) a runestone.
func mapRuneTransactionTypeToParams(src entity.RuneTransaction) (gen.CreateRuneTransactionParams, *gen.CreateRunestoneParams, error) {
var timestamp pgtype.Timestamp
if !src.Timestamp.IsZero() {
timestamp.Time = src.Timestamp
timestamp.Valid = true
}
inputsBytes, err := json.Marshal(src.Inputs)
if err != nil {
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal inputs")
}
outputsBytes, err := json.Marshal(src.Outputs)
if err != nil {
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal outputs")
}
mints := make(map[string]uint128.Uint128)
for key, value := range src.Mints {
mints[key.String()] = value
}
mintsBytes, err := json.Marshal(mints)
if err != nil {
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal mints")
}
burns := make(map[string]uint128.Uint128)
for key, value := range src.Burns {
burns[key.String()] = value
}
burnsBytes, err := json.Marshal(burns)
if err != nil {
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal burns")
}
var runestoneParams *gen.CreateRunestoneParams
if src.Runestone != nil {
params, err := mapRunestoneTypeToParams(*src.Runestone, src.Hash, src.BlockHeight)
if err != nil {
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to map runestone to params")
}
runestoneParams = &params
}
return gen.CreateRuneTransactionParams{
Hash: src.Hash.String(),
BlockHeight: int32(src.BlockHeight),
Index: int32(src.Index),
Timestamp: timestamp,
Inputs: inputsBytes,
Outputs: outputsBytes,
Mints: mintsBytes,
Burns: burnsBytes,
RuneEtched: src.RuneEtched,
}, runestoneParams, nil
}
func extractModelRuneTxAndRunestone(src gen.GetRuneTransactionsRow) (gen.RunesTransaction, *gen.RunesRunestone, error) {
var runestone *gen.RunesRunestone
if src.TxHash.Valid {
// these fields should never be null
if !src.Cenotaph.Valid {
return gen.RunesTransaction{}, nil, errors.New("runestone cenotaph is null")
}
if !src.Flaws.Valid {
return gen.RunesTransaction{}, nil, errors.New("runestone flaws is null")
}
runestone = &gen.RunesRunestone{
TxHash: src.TxHash.String,
BlockHeight: src.BlockHeight,
Etching: src.Etching.Bool,
EtchingDivisibility: src.EtchingDivisibility,
EtchingPremine: src.EtchingPremine,
EtchingRune: src.EtchingRune,
EtchingSpacers: src.EtchingSpacers,
EtchingSymbol: src.EtchingSymbol,
EtchingTerms: src.EtchingTerms,
EtchingTermsAmount: src.EtchingTermsAmount,
EtchingTermsCap: src.EtchingTermsCap,
EtchingTermsHeightStart: src.EtchingTermsHeightStart,
EtchingTermsHeightEnd: src.EtchingTermsHeightEnd,
EtchingTermsOffsetStart: src.EtchingTermsOffsetStart,
EtchingTermsOffsetEnd: src.EtchingTermsOffsetEnd,
Edicts: src.Edicts,
Mint: src.Mint,
Pointer: src.Pointer,
Cenotaph: src.Cenotaph.Bool,
Flaws: src.Flaws.Int32,
}
}
return gen.RunesTransaction{
Hash: src.Hash,
BlockHeight: src.BlockHeight,
Index: src.Index,
Timestamp: src.Timestamp,
Inputs: src.Inputs,
Outputs: src.Outputs,
Mints: src.Mints,
Burns: src.Burns,
RuneEtched: src.RuneEtched,
}, runestone, nil
}
func mapRuneTransactionModelToType(src gen.RunesTransaction) (entity.RuneTransaction, error) {
hash, err := chainhash.NewHashFromStr(src.Hash)
if err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to parse transaction hash")
}
var timestamp time.Time
if src.Timestamp.Valid {
timestamp = src.Timestamp.Time
}
inputs := make([]*entity.TxInputOutput, 0)
if err := json.Unmarshal(src.Inputs, &inputs); err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to unmarshal inputs")
}
outputs := make([]*entity.TxInputOutput, 0)
if err := json.Unmarshal(src.Outputs, &outputs); err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to unmarshal outputs")
}
mintsRaw := make(map[string]uint128.Uint128)
if err := json.Unmarshal(src.Mints, &mintsRaw); err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to unmarshal mints")
}
mints := make(map[runes.RuneId]uint128.Uint128)
for key, value := range mintsRaw {
runeId, err := runes.NewRuneIdFromString(key)
if err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to parse rune id")
}
mints[runeId] = value
}
burnsRaw := make(map[string]uint128.Uint128)
if err := json.Unmarshal(src.Burns, &burnsRaw); err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to unmarshal burns")
}
burns := make(map[runes.RuneId]uint128.Uint128)
for key, value := range burnsRaw {
runeId, err := runes.NewRuneIdFromString(key)
if err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to parse rune id")
}
burns[runeId] = value
}
return entity.RuneTransaction{
Hash: *hash,
BlockHeight: uint64(src.BlockHeight),
Index: uint32(src.Index),
Timestamp: timestamp,
Inputs: inputs,
Outputs: outputs,
Mints: mints,
Burns: burns,
RuneEtched: src.RuneEtched,
}, nil
}
func mapRunestoneTypeToParams(src runes.Runestone, txHash chainhash.Hash, blockHeight uint64) (gen.CreateRunestoneParams, error) {
var runestoneParams gen.CreateRunestoneParams
// TODO: optimize serialized edicts
edictsBytes, err := json.Marshal(src.Edicts)
if err != nil {
return gen.CreateRunestoneParams{}, errors.Wrap(err, "failed to marshal runestone edicts")
}
runestoneParams = gen.CreateRunestoneParams{
TxHash: txHash.String(),
BlockHeight: int32(blockHeight),
Edicts: edictsBytes,
Cenotaph: src.Cenotaph,
Flaws: int32(src.Flaws),
}
if src.Etching != nil {
runestoneParams.Etching = true
etching := *src.Etching
if etching.Divisibility != nil {
runestoneParams.EtchingDivisibility = pgtype.Int2{Int16: int16(*etching.Divisibility), Valid: true}
}
if etching.Premine != nil {
premine, err := numericFromUint128(etching.Premine)
if err != nil {
return gen.CreateRunestoneParams{}, errors.Wrap(err, "failed to parse etching premine")
}
runestoneParams.EtchingPremine = premine
}
if etching.Rune != nil {
runestoneParams.EtchingRune = pgtype.Text{String: etching.Rune.String(), Valid: true}
}
if etching.Spacers != nil {
runestoneParams.EtchingSpacers = pgtype.Int4{Int32: int32(*etching.Spacers), Valid: true}
}
if etching.Symbol != nil {
runestoneParams.EtchingSymbol = pgtype.Int4{Int32: *etching.Symbol, Valid: true}
}
if etching.Terms != nil {
runestoneParams.EtchingTerms = pgtype.Bool{Bool: true, Valid: true}
terms := *etching.Terms
if terms.Amount != nil {
amount, err := numericFromUint128(terms.Amount)
if err != nil {
return gen.CreateRunestoneParams{}, errors.Wrap(err, "failed to parse etching terms amount")
}
runestoneParams.EtchingTermsAmount = amount
}
if terms.Cap != nil {
cap, err := numericFromUint128(terms.Cap)
if err != nil {
return gen.CreateRunestoneParams{}, errors.Wrap(err, "failed to parse etching terms cap")
}
runestoneParams.EtchingTermsCap = cap
}
if terms.HeightStart != nil {
runestoneParams.EtchingTermsHeightStart = pgtype.Int4{Int32: int32(*terms.HeightStart), Valid: true}
}
if terms.HeightEnd != nil {
runestoneParams.EtchingTermsHeightEnd = pgtype.Int4{Int32: int32(*terms.HeightEnd), Valid: true}
}
if terms.OffsetStart != nil {
runestoneParams.EtchingTermsOffsetStart = pgtype.Int4{Int32: int32(*terms.OffsetStart), Valid: true}
}
if terms.OffsetEnd != nil {
runestoneParams.EtchingTermsOffsetEnd = pgtype.Int4{Int32: int32(*terms.OffsetEnd), Valid: true}
}
}
runestoneParams.EtchingTurbo = pgtype.Bool{Bool: etching.Turbo, Valid: true}
}
if src.Mint != nil {
runestoneParams.Mint = pgtype.Text{String: src.Mint.String(), Valid: true}
}
if src.Pointer != nil {
runestoneParams.Pointer = pgtype.Int4{Int32: int32(*src.Pointer), Valid: true}
}
return runestoneParams, nil
}
func mapRunestoneModelToType(src gen.RunesRunestone) (runes.Runestone, error) {
runestone := runes.Runestone{
Cenotaph: src.Cenotaph,
Flaws: runes.Flaws(src.Flaws),
}
if src.Etching {
etching := runes.Etching{}
if src.EtchingDivisibility.Valid {
divisibility := uint8(src.EtchingDivisibility.Int16)
etching.Divisibility = &divisibility
}
if src.EtchingPremine.Valid {
premine, err := uint128FromNumeric(src.EtchingPremine)
if err != nil {
return runes.Runestone{}, errors.Wrap(err, "failed to parse etching premine")
}
etching.Premine = premine
}
if src.EtchingRune.Valid {
rune, err := runes.NewRuneFromString(src.EtchingRune.String)
if err != nil {
return runes.Runestone{}, errors.Wrap(err, "failed to parse etching rune")
}
etching.Rune = &rune
}
if src.EtchingSpacers.Valid {
spacers := uint32(src.EtchingSpacers.Int32)
etching.Spacers = &spacers
}
if src.EtchingSymbol.Valid {
var symbol rune = src.EtchingSymbol.Int32
etching.Symbol = &symbol
}
if src.EtchingTerms.Valid && src.EtchingTerms.Bool {
terms := runes.Terms{}
if src.EtchingTermsAmount.Valid {
amount, err := uint128FromNumeric(src.EtchingTermsAmount)
if err != nil {
return runes.Runestone{}, errors.Wrap(err, "failed to parse etching terms amount")
}
terms.Amount = amount
}
if src.EtchingTermsCap.Valid {
cap, err := uint128FromNumeric(src.EtchingTermsCap)
if err != nil {
return runes.Runestone{}, errors.Wrap(err, "failed to parse etching terms cap")
}
terms.Cap = cap
}
if src.EtchingTermsHeightStart.Valid {
heightStart := uint64(src.EtchingTermsHeightStart.Int32)
terms.HeightStart = &heightStart
}
if src.EtchingTermsHeightEnd.Valid {
heightEnd := uint64(src.EtchingTermsHeightEnd.Int32)
terms.HeightEnd = &heightEnd
}
if src.EtchingTermsOffsetStart.Valid {
offsetStart := uint64(src.EtchingTermsOffsetStart.Int32)
terms.OffsetStart = &offsetStart
}
if src.EtchingTermsOffsetEnd.Valid {
offsetEnd := uint64(src.EtchingTermsOffsetEnd.Int32)
terms.OffsetEnd = &offsetEnd
}
etching.Terms = &terms
}
etching.Turbo = src.EtchingTurbo.Valid && src.EtchingTurbo.Bool
runestone.Etching = &etching
}
if src.Mint.Valid {
mint, err := runes.NewRuneIdFromString(src.Mint.String)
if err != nil {
return runes.Runestone{}, errors.Wrap(err, "failed to parse mint")
}
runestone.Mint = &mint
}
if src.Pointer.Valid {
pointer := uint64(src.Pointer.Int32)
runestone.Pointer = &pointer
}
// Edicts
{
if err := json.Unmarshal(src.Edicts, &runestone.Edicts); err != nil {
return runes.Runestone{}, errors.Wrap(err, "failed to unmarshal edicts")
}
if len(runestone.Edicts) == 0 {
runestone.Edicts = nil
}
}
return runestone, nil
}
func mapBalanceModelToType(src gen.RunesBalance) (*entity.Balance, error) {
runeId, err := runes.NewRuneIdFromString(src.RuneID)
if err != nil {
return nil, errors.Wrap(err, "failed to parse rune id")
}
amount, err := uint128FromNumeric(src.Amount)
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance")
}
pkScript, err := hex.DecodeString(src.Pkscript)
if err != nil {
return nil, errors.Wrap(err, "failed to parse pkscript")
}
return &entity.Balance{
PkScript: pkScript,
RuneId: runeId,
Amount: lo.FromPtr(amount),
BlockHeight: uint64(src.BlockHeight),
}, nil
}
func mapIndexedBlockModelToType(src gen.RunesIndexedBlock) (*entity.IndexedBlock, error) {
hash, err := chainhash.NewHashFromStr(src.Hash)
if err != nil {
return nil, errors.Wrap(err, "failed to parse block hash")
}
prevBlockHash, err := chainhash.NewHashFromStr(src.PrevHash)
if err != nil {
return nil, errors.Wrap(err, "failed to parse prev block hash")
}
eventHash, err := chainhash.NewHashFromStr(src.EventHash)
if err != nil {
return nil, errors.Wrap(err, "failed to parse event hash")
}
cumulativeEventHash, err := chainhash.NewHashFromStr(src.CumulativeEventHash)
if err != nil {
return nil, errors.Wrap(err, "failed to parse cumulative event hash")
}
return &entity.IndexedBlock{
Height: int64(src.Height),
Hash: *hash,
PrevHash: *prevBlockHash,
EventHash: *eventHash,
CumulativeEventHash: *cumulativeEventHash,
}, nil
}
func mapIndexedBlockTypeToParams(src entity.IndexedBlock) (gen.CreateIndexedBlockParams, error) {
return gen.CreateIndexedBlockParams{
Height: int32(src.Height),
Hash: src.Hash.String(),
PrevHash: src.PrevHash.String(),
EventHash: src.EventHash.String(),
CumulativeEventHash: src.CumulativeEventHash.String(),
}, nil
}
func mapOutPointBalanceModelToType(src gen.RunesOutpointBalance) (entity.OutPointBalance, error) {
runeId, err := runes.NewRuneIdFromString(src.RuneID)
if err != nil {
return entity.OutPointBalance{}, errors.Wrap(err, "failed to parse rune id")
}
amount, err := uint128FromNumeric(src.Amount)
if err != nil {
return entity.OutPointBalance{}, errors.Wrap(err, "failed to parse balance")
}
pkScript, err := hex.DecodeString(src.Pkscript)
if err != nil {
return entity.OutPointBalance{}, errors.Wrap(err, "failed to parse pkscript")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return entity.OutPointBalance{}, errors.Wrap(err, "failed to parse tx hash")
}
var spentHeight *uint64
if src.SpentHeight.Valid {
spentHeight = lo.ToPtr(uint64(src.SpentHeight.Int32))
}
return entity.OutPointBalance{
PkScript: pkScript,
RuneId: runeId,
Amount: lo.FromPtr(amount),
OutPoint: wire.OutPoint{
Hash: *txHash,
Index: uint32(src.TxIdx),
},
BlockHeight: uint64(src.BlockHeight),
SpentHeight: spentHeight,
}, nil
}
func mapOutPointBalanceTypeToParams(src entity.OutPointBalance) (gen.CreateOutPointBalancesParams, error) {
amount, err := numericFromUint128(&src.Amount)
if err != nil {
return gen.CreateOutPointBalancesParams{}, errors.Wrap(err, "failed to parse amount")
}
var spentHeight pgtype.Int4
if src.SpentHeight != nil {
spentHeight = pgtype.Int4{Int32: int32(*src.SpentHeight), Valid: true}
}
return gen.CreateOutPointBalancesParams{
TxHash: src.OutPoint.Hash.String(),
TxIdx: int32(src.OutPoint.Index),
Pkscript: hex.EncodeToString(src.PkScript),
RuneID: src.RuneId.String(),
Amount: amount,
BlockHeight: int32(src.BlockHeight),
SpentHeight: spentHeight,
}, nil
}

View File

@@ -0,0 +1,61 @@
package postgres
import (
"testing"
"github.com/gaze-network/uint128"
"github.com/jackc/pgx/v5/pgtype"
"github.com/stretchr/testify/assert"
)
func TestUint128FromNumeric(t *testing.T) {
t.Run("normal", func(t *testing.T) {
numeric := pgtype.Numeric{}
numeric.ScanInt64(pgtype.Int8{
Int64: 1000,
Valid: true,
})
expected := uint128.From64(1000)
result, err := uint128FromNumeric(numeric)
assert.NoError(t, err)
assert.Equal(t, &expected, result)
})
t.Run("nil", func(t *testing.T) {
numeric := pgtype.Numeric{}
numeric.ScanInt64(pgtype.Int8{
Valid: false,
})
result, err := uint128FromNumeric(numeric)
assert.NoError(t, err)
assert.Nil(t, result)
})
}
func TestNumericFromUint128(t *testing.T) {
t.Run("normal", func(t *testing.T) {
u128 := uint128.From64(1)
expected := pgtype.Numeric{}
expected.ScanInt64(pgtype.Int8{
Int64: 1,
Valid: true,
})
result, err := numericFromUint128(&u128)
assert.NoError(t, err)
assert.Equal(t, expected, result)
})
t.Run("nil", func(t *testing.T) {
expected := pgtype.Numeric{}
expected.ScanInt64(pgtype.Int8{
Valid: false,
})
result, err := numericFromUint128(nil)
assert.NoError(t, err)
assert.Equal(t, expected, result)
})
}

View File

@@ -0,0 +1,20 @@
package postgres
import (
"github.com/gaze-network/indexer-network/internal/postgres"
"github.com/gaze-network/indexer-network/modules/runes/repository/postgres/gen"
"github.com/jackc/pgx/v5"
)
type Repository struct {
db postgres.DB
queries *gen.Queries
tx pgx.Tx
}
func NewRepository(db postgres.DB) *Repository {
return &Repository{
db: db,
queries: gen.New(db),
}
}

View File

@@ -0,0 +1,483 @@
package postgres
import (
"context"
"encoding/hex"
"fmt"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/repository/postgres/gen"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgtype"
"github.com/samber/lo"
)
var _ datagateway.RunesDataGateway = (*Repository)(nil)
// warning: GetLatestBlock currently returns a types.BlockHeader with only Height, Hash, and PrevBlock fields populated.
// This is because it is known that all usage of this function only requires these fields. In the future, we may want to populate all fields for type safety.
func (r *Repository) GetLatestBlock(ctx context.Context) (types.BlockHeader, error) {
block, err := r.queries.GetLatestIndexedBlock(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return types.BlockHeader{}, errors.WithStack(errs.NotFound)
}
return types.BlockHeader{}, errors.Wrap(err, "error during query")
}
hash, err := chainhash.NewHashFromStr(block.Hash)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to parse block hash")
}
prevHash, err := chainhash.NewHashFromStr(block.PrevHash)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to parse prev block hash")
}
return types.BlockHeader{
Height: int64(block.Height),
Hash: *hash,
PrevBlock: *prevHash,
}, nil
}
func (r *Repository) GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error) {
indexedBlockModel, err := r.queries.GetIndexedBlockByHeight(ctx, int32(height))
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, errors.WithStack(errs.NotFound)
}
return nil, errors.Wrap(err, "error during query")
}
indexedBlock, err := mapIndexedBlockModelToType(indexedBlockModel)
if err != nil {
return nil, errors.Wrap(err, "failed to parse indexed block model")
}
return indexedBlock, nil
}
func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, height uint64) ([]*entity.RuneTransaction, error) {
pkScriptParam := []byte(fmt.Sprintf(`[{"pkScript":"%s"}]`, hex.EncodeToString(pkScript)))
runeIdParam := []byte(fmt.Sprintf(`[{"runeId":"%s"}]`, runeId.String()))
rows, err := r.queries.GetRuneTransactions(ctx, gen.GetRuneTransactionsParams{
FilterPkScript: pkScript != nil,
PkScriptParam: pkScriptParam,
FilterRuneID: runeId != runes.RuneId{},
RuneIDParam: runeIdParam,
RuneID: []byte(runeId.String()),
RuneIDBlockHeight: int32(runeId.BlockHeight),
RuneIDTxIndex: int32(runeId.TxIndex),
BlockHeight: int32(height),
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
runeTxs := make([]*entity.RuneTransaction, 0, len(rows))
for _, row := range rows {
runeTxModel, runestoneModel, err := extractModelRuneTxAndRunestone(row)
if err != nil {
return nil, errors.Wrap(err, "failed to extract rune transaction and runestone from row")
}
runeTx, err := mapRuneTransactionModelToType(runeTxModel)
if err != nil {
return nil, errors.Wrap(err, "failed to parse rune transaction model")
}
if runestoneModel != nil {
runestone, err := mapRunestoneModelToType(*runestoneModel)
if err != nil {
return nil, errors.Wrap(err, "failed to parse runestone model")
}
runeTx.Runestone = &runestone
}
runeTxs = append(runeTxs, &runeTx)
}
return runeTxs, nil
}
func (r *Repository) GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error) {
balances, err := r.queries.GetOutPointBalancesAtOutPoint(ctx, gen.GetOutPointBalancesAtOutPointParams{
TxHash: outPoint.Hash.String(),
TxIdx: int32(outPoint.Index),
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
result := make(map[runes.RuneId]*entity.OutPointBalance, len(balances))
for _, balanceModel := range balances {
balance, err := mapOutPointBalanceModelToType(balanceModel)
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
result[balance.RuneId] = &balance
}
return result, nil
}
func (r *Repository) GetUnspentOutPointBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.OutPointBalance, error) {
balances, err := r.queries.GetUnspentOutPointBalancesByPkScript(ctx, gen.GetUnspentOutPointBalancesByPkScriptParams{
Pkscript: hex.EncodeToString(pkScript),
BlockHeight: int32(blockHeight),
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
result := make([]*entity.OutPointBalance, 0, len(balances))
for _, balanceModel := range balances {
balance, err := mapOutPointBalanceModelToType(balanceModel)
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
result = append(result, &balance)
}
return result, nil
}
func (r *Repository) GetRuneIdFromRune(ctx context.Context, rune runes.Rune) (runes.RuneId, error) {
runeIdStr, err := r.queries.GetRuneIdFromRune(ctx, rune.String())
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return runes.RuneId{}, errors.WithStack(errs.NotFound)
}
return runes.RuneId{}, errors.Wrap(err, "error during query")
}
runeId, err := runes.NewRuneIdFromString(runeIdStr)
if err != nil {
return runes.RuneId{}, errors.Wrap(err, "failed to parse RuneId")
}
return runeId, nil
}
func (r *Repository) GetRuneEntryByRuneId(ctx context.Context, runeId runes.RuneId) (*runes.RuneEntry, error) {
runeEntries, err := r.GetRuneEntryByRuneIdBatch(ctx, []runes.RuneId{runeId})
if err != nil {
return nil, errors.Wrap(err, "failed to get rune entries by rune id")
}
runeEntry, ok := runeEntries[runeId]
if !ok {
return nil, errors.WithStack(errs.NotFound)
}
return runeEntry, nil
}
func (r *Repository) GetRuneEntryByRuneIdBatch(ctx context.Context, runeIds []runes.RuneId) (map[runes.RuneId]*runes.RuneEntry, error) {
rows, err := r.queries.GetRuneEntriesByRuneIds(ctx, lo.Map(runeIds, func(runeId runes.RuneId, _ int) string {
return runeId.String()
}))
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
runeEntries := make(map[runes.RuneId]*runes.RuneEntry, len(rows))
var errs []error
for i, runeEntryModel := range rows {
runeEntry, err := mapRuneEntryModelToType(runeEntryModel)
if err != nil {
errs = append(errs, errors.Wrapf(err, "failed to parse rune entry model index %d", i))
continue
}
runeEntries[runeEntry.RuneId] = &runeEntry
}
if len(errs) > 0 {
return nil, errors.Join(errs...)
}
return runeEntries, nil
}
func (r *Repository) GetRuneEntryByRuneIdAndHeight(ctx context.Context, runeId runes.RuneId, blockHeight uint64) (*runes.RuneEntry, error) {
runeEntries, err := r.GetRuneEntryByRuneIdBatch(ctx, []runes.RuneId{runeId})
if err != nil {
return nil, errors.Wrap(err, "failed to get rune entries by rune id")
}
runeEntry, ok := runeEntries[runeId]
if !ok {
return nil, errors.WithStack(errs.NotFound)
}
return runeEntry, nil
}
func (r *Repository) GetRuneEntryByRuneIdAndHeightBatch(ctx context.Context, runeIds []runes.RuneId, blockHeight uint64) (map[runes.RuneId]*runes.RuneEntry, error) {
rows, err := r.queries.GetRuneEntriesByRuneIdsAndHeight(ctx, gen.GetRuneEntriesByRuneIdsAndHeightParams{
RuneIds: lo.Map(runeIds, func(runeId runes.RuneId, _ int) string {
return runeId.String()
}),
Height: int32(blockHeight),
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
runeEntries := make(map[runes.RuneId]*runes.RuneEntry, len(rows))
var errs []error
for i, runeEntryModel := range rows {
runeEntry, err := mapRuneEntryModelToType(gen.GetRuneEntriesByRuneIdsRow(runeEntryModel))
if err != nil {
errs = append(errs, errors.Wrapf(err, "failed to parse rune entry model index %d", i))
continue
}
runeEntries[runeEntry.RuneId] = &runeEntry
}
if len(errs) > 0 {
return nil, errors.Join(errs...)
}
return runeEntries, nil
}
func (r *Repository) CountRuneEntries(ctx context.Context) (uint64, error) {
count, err := r.queries.CountRuneEntries(ctx)
if err != nil {
return 0, errors.Wrap(err, "error during query")
}
return uint64(count), nil
}
func (r *Repository) GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[runes.RuneId]*entity.Balance, error) {
balances, err := r.queries.GetBalancesByPkScript(ctx, gen.GetBalancesByPkScriptParams{
Pkscript: hex.EncodeToString(pkScript),
BlockHeight: int32(blockHeight),
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
result := make(map[runes.RuneId]*entity.Balance, len(balances))
for _, balanceModel := range balances {
balance, err := mapBalanceModelToType(gen.RunesBalance(balanceModel))
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
result[balance.RuneId] = balance
}
return result, nil
}
func (r *Repository) GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64) ([]*entity.Balance, error) {
balances, err := r.queries.GetBalancesByRuneId(ctx, gen.GetBalancesByRuneIdParams{
RuneID: runeId.String(),
BlockHeight: int32(blockHeight),
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
result := make([]*entity.Balance, 0, len(balances))
for _, balanceModel := range balances {
balance, err := mapBalanceModelToType(gen.RunesBalance(balanceModel))
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
result = append(result, balance)
}
return result, nil
}
func (r *Repository) GetBalanceByPkScriptAndRuneId(ctx context.Context, pkScript []byte, runeId runes.RuneId, blockHeight uint64) (*entity.Balance, error) {
balance, err := r.queries.GetBalanceByPkScriptAndRuneId(ctx, gen.GetBalanceByPkScriptAndRuneIdParams{
Pkscript: hex.EncodeToString(pkScript),
RuneID: runeId.String(),
BlockHeight: int32(blockHeight),
})
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, errors.WithStack(errs.NotFound)
}
return nil, errors.Wrap(err, "error during query")
}
result, err := mapBalanceModelToType(balance)
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
return result, nil
}
func (r *Repository) CreateRuneTransaction(ctx context.Context, tx *entity.RuneTransaction) error {
if tx == nil {
return nil
}
txParams, runestoneParams, err := mapRuneTransactionTypeToParams(*tx)
if err != nil {
return errors.Wrap(err, "failed to map rune transaction to params")
}
if err = r.queries.CreateRuneTransaction(ctx, txParams); err != nil {
return errors.Wrap(err, "error during exec CreateRuneTransaction")
}
if runestoneParams != nil {
if err = r.queries.CreateRunestone(ctx, *runestoneParams); err != nil {
return errors.Wrap(err, "error during exec CreateRunestone")
}
}
return nil
}
func (r *Repository) CreateRuneEntry(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error {
if entry == nil {
return nil
}
createParams, _, err := mapRuneEntryTypeToParams(*entry, blockHeight)
if err != nil {
return errors.Wrap(err, "failed to map rune entry to params")
}
if err = r.queries.CreateRuneEntry(ctx, createParams); err != nil {
return errors.Wrap(err, "error during exec CreateRuneEntry")
}
return nil
}
func (r *Repository) CreateRuneEntryState(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error {
if entry == nil {
return nil
}
_, createStateParams, err := mapRuneEntryTypeToParams(*entry, blockHeight)
if err != nil {
return errors.Wrap(err, "failed to map rune entry to params")
}
if err = r.queries.CreateRuneEntryState(ctx, createStateParams); err != nil {
return errors.Wrap(err, "error during exec CreateRuneEntryState")
}
return nil
}
func (r *Repository) CreateOutPointBalances(ctx context.Context, outPointBalances []*entity.OutPointBalance) error {
params := make([]gen.CreateOutPointBalancesParams, 0, len(outPointBalances))
for _, balance := range outPointBalances {
param, err := mapOutPointBalanceTypeToParams(*balance)
if err != nil {
return errors.Wrap(err, "failed to map outpoint balance to params")
}
params = append(params, param)
}
result := r.queries.CreateOutPointBalances(ctx, params)
var execErrors []error
result.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) SpendOutPointBalances(ctx context.Context, outPoint wire.OutPoint, blockHeight uint64) error {
if err := r.queries.SpendOutPointBalances(ctx, gen.SpendOutPointBalancesParams{
TxHash: outPoint.Hash.String(),
TxIdx: int32(outPoint.Index),
SpentHeight: pgtype.Int4{Int32: int32(blockHeight), Valid: true},
}); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) CreateRuneBalances(ctx context.Context, params []datagateway.CreateRuneBalancesParams) error {
insertParams := make([]gen.CreateRuneBalanceAtBlockParams, 0, len(params))
for _, param := range params {
param := param
amount, err := numericFromUint128(&param.Balance)
if err != nil {
return errors.Wrap(err, "failed to convert balance to numeric")
}
insertParams = append(insertParams, gen.CreateRuneBalanceAtBlockParams{
Pkscript: hex.EncodeToString(param.PkScript),
BlockHeight: int32(param.BlockHeight),
RuneID: param.RuneId.String(),
Amount: amount,
})
}
result := r.queries.CreateRuneBalanceAtBlock(ctx, insertParams)
var execErrors []error
result.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error {
if block == nil {
return nil
}
params, err := mapIndexedBlockTypeToParams(*block)
if err != nil {
return errors.Wrap(err, "failed to map indexed block to params")
}
if err = r.queries.CreateIndexedBlock(ctx, params); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteIndexedBlockSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteIndexedBlockSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteRuneEntriesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteRuneEntriesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteRuneEntryStatesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteRuneEntryStatesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteRuneTransactionsSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteRuneTransactionsSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteRunestonesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteRunestonesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteOutPointBalancesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteOutPointBalancesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) UnspendOutPointBalancesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.UnspendOutPointBalancesSinceHeight(ctx, pgtype.Int4{Int32: int32(height), Valid: true}); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteRuneBalancesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteRuneBalancesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More