mirror of
https://github.com/alexgo-io/gaze-indexer.git
synced 2026-01-12 22:43:22 +08:00
Compare commits
47 Commits
v0.1.0
...
fix/invali
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aace33b382 | ||
|
|
8760baf42b | ||
|
|
5aca9f7f19 | ||
|
|
07aa84019f | ||
|
|
a5fc803371 | ||
|
|
72ca151fd3 | ||
|
|
53a4d1a4c3 | ||
|
|
3322f4a034 | ||
|
|
dcb220bddb | ||
|
|
b6ff7e41bd | ||
|
|
7cb717af11 | ||
|
|
0d1ae0ef5e | ||
|
|
81ba7792ea | ||
|
|
b5851a39ab | ||
|
|
b44fb870a3 | ||
|
|
373ea50319 | ||
|
|
a1d7524615 | ||
|
|
415a476478 | ||
|
|
f63505e173 | ||
|
|
65a69ddb68 | ||
|
|
4f5d1f077b | ||
|
|
c133006c82 | ||
|
|
51fd1f6636 | ||
|
|
a7bc6257c4 | ||
|
|
3bb7500c87 | ||
|
|
8c92893d4a | ||
|
|
d84e30ed11 | ||
|
|
d9fa217977 | ||
|
|
d4b694aa57 | ||
|
|
9febf40e81 | ||
|
|
709b00ec0e | ||
|
|
50ae103502 | ||
|
|
c0242bd555 | ||
|
|
6d4f1d0e87 | ||
|
|
b9fac74026 | ||
|
|
62ecd7ea49 | ||
|
|
66ea2766a0 | ||
|
|
575c144428 | ||
|
|
f8fbd67bd8 | ||
|
|
c75b62bdf9 | ||
|
|
cc2649dd64 | ||
|
|
d96370454b | ||
|
|
c9a5c6d217 | ||
|
|
86716c1915 | ||
|
|
371d1fe008 | ||
|
|
c6057d9511 | ||
|
|
d37be5997b |
18
.dockerignore
Normal file
18
.dockerignore
Normal file
@@ -0,0 +1,18 @@
|
||||
.git
|
||||
.gitignore
|
||||
.github
|
||||
.vscode
|
||||
**/*.md
|
||||
**/*.log
|
||||
.DS_Store
|
||||
|
||||
# Docker
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
docker-compose.yml
|
||||
|
||||
# Go
|
||||
.golangci.yaml
|
||||
cmd.local
|
||||
config.*.y*ml
|
||||
config.y*ml
|
||||
13
Dockerfile
13
Dockerfile
@@ -3,15 +3,15 @@ FROM golang:1.22 as builder
|
||||
WORKDIR /app
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
RUN --mount=type=cache,target=/go/pkg/mod/ go mod download
|
||||
|
||||
COPY ./ ./
|
||||
|
||||
ENV GOOS=linux
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
RUN go build \
|
||||
-o main ./main.go
|
||||
RUN --mount=type=cache,target=/go/pkg/mod/ \
|
||||
go build -o main ./main.go
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
@@ -19,9 +19,10 @@ WORKDIR /app
|
||||
|
||||
RUN apk --no-cache add ca-certificates tzdata
|
||||
|
||||
|
||||
COPY --from=builder /app/main .
|
||||
COPY --from=builder /app/modules ./modules
|
||||
|
||||
# You can set `TZ` environment variable to change the timezone
|
||||
# You can set TZ identifier to change the timezone, See https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
|
||||
# ENV TZ=US/Central
|
||||
|
||||
CMD ["/app/main", "run"]
|
||||
ENTRYPOINT ["/app/main"]
|
||||
|
||||
53
README.md
53
README.md
@@ -2,18 +2,15 @@
|
||||
|
||||
# Gaze Indexer
|
||||
|
||||
Gaze Indexer is an open-source and modular indexing client for Bitcoin meta-protocols. It has support for Bitcoin and Runes out of the box, with **Unified Consistent APIs** across fungible token protocols.
|
||||
Gaze Indexer is an open-source and modular indexing client for Bitcoin meta-protocols with **Unified Consistent APIs** across fungible token protocols.
|
||||
|
||||
Gaze Indexer is built with **modularity** in mind, allowing users to run all modules in one monolithic instance with a single command, or as a distributed cluster of micro-services.
|
||||
|
||||
Gaze Indexer serves as a foundation for building ANY meta-protocol indexers, with efficient data fetching, reorg detection, and database migration tool.
|
||||
This allows developers to focus on what **truly** matters: Meta-protocol indexing logic. New meta-protocols can be easily added by implementing new modules.
|
||||
|
||||
Gaze Indexer also comes with a block reporting system for verifying data integrity of indexers. Visit the [Gaze Network dashboard](https://dash.gaze.network) to see the status of other indexers.
|
||||
|
||||
- [Modules](#modules)
|
||||
- [1. Bitcoin](#1-bitcoin)
|
||||
- [2. Runes](#2-runes)
|
||||
- [1. Runes](#1-runes)
|
||||
- [Installation](#installation)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [1. Hardware Requirements](#1-hardware-requirements)
|
||||
@@ -25,15 +22,10 @@ Gaze Indexer also comes with a block reporting system for verifying data integri
|
||||
|
||||
## Modules
|
||||
|
||||
### 1. Bitcoin
|
||||
|
||||
The Bitcoin Indexer, the heart of every meta-protocol, is responsible for indexing **Bitcoin transactions, blocks, and UTXOs**. It requires a Bitcoin Core RPC as source of Bitcoin transactions,
|
||||
and stores the indexed data in database to be used by other modules.
|
||||
|
||||
### 2. Runes
|
||||
### 1. Runes
|
||||
|
||||
The Runes Indexer is our first meta-protocol indexer. It indexes Runes states, transactions, runestones, and balances using Bitcoin transactions.
|
||||
It comes with a set of APIs for querying historical Runes data. See our [API Reference](https://documenter.getpostman.com/view/28396285/2sA3Bn7Cxr) for full details.
|
||||
It comes with a set of APIs for querying historical Runes data. See our [API Reference](https://api-docs.gaze.network) for full details.
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -42,10 +34,9 @@ It comes with a set of APIs for querying historical Runes data. See our [API Ref
|
||||
#### 1. Hardware Requirements
|
||||
|
||||
Each module requires different hardware requirements.
|
||||
| Module | CPU | RAM |
|
||||
| ------- | ---------- | ------ |
|
||||
| Bitcoin | 0.25 cores | 256 MB |
|
||||
| Runes | 0.5 cores | 1 GB |
|
||||
| Module | CPU | RAM |
|
||||
| ------ | --------- | ---- |
|
||||
| Runes | 0.5 cores | 1 GB |
|
||||
|
||||
#### 2. Prepare Bitcoin Core RPC server.
|
||||
|
||||
@@ -56,10 +47,9 @@ To self host a Bitcoin Core, see https://bitcoin.org/en/full-node.
|
||||
|
||||
Gaze Indexer has first-class support for PostgreSQL. If you wish to use other databases, you can implement your own database repository that satisfies each module's Data Gateway interface.
|
||||
Here is our minimum database disk space requirement for each module.
|
||||
| Module | Database Storage |
|
||||
| ------- | ---------------- |
|
||||
| Bitcoin | 240 GB |
|
||||
| Runes | 150 GB |
|
||||
| Module | Database Storage (current) | Database Storage (in 1 year) |
|
||||
| ------ | -------------------------- | ---------------------------- |
|
||||
| Runes | 10 GB | 150 GB |
|
||||
|
||||
#### 4. Prepare `config.yaml` file.
|
||||
|
||||
@@ -93,21 +83,10 @@ http_server:
|
||||
|
||||
# Meta-protocol modules configuration options.
|
||||
modules:
|
||||
# Configuration options for Bitcoin module. Can be removed if not used.
|
||||
bitcoin:
|
||||
database: "postgres" # Database to store bitcoin data. current supported databases: "postgres"
|
||||
postgres:
|
||||
host: "localhost"
|
||||
port: 5432
|
||||
user: "postgres"
|
||||
password: "password"
|
||||
db_name: "postgres"
|
||||
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.
|
||||
|
||||
# Configuration options for Runes module. Can be removed if not used.
|
||||
runes:
|
||||
database: "postgres" # Database to store Runes data. current supported databases: "postgres"
|
||||
datasource: "database" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node" | "database". If "database" is used, it will use the database config in bitcoin module as datasource.
|
||||
datasource: "bitcoin-node" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node".
|
||||
api_handlers: # API handlers to enable. current supported handlers: "http"
|
||||
- http
|
||||
postgres:
|
||||
@@ -127,14 +106,14 @@ We will be using `docker-compose` for our installation guide. Make sure the `doc
|
||||
# docker-compose.yaml
|
||||
services:
|
||||
gaze-indexer:
|
||||
image: ghcr.io/gaze-network/gaze-indexer:v1.0.0
|
||||
image: ghcr.io/gaze-network/gaze-indexer:v0.2.1
|
||||
container_name: gaze-indexer
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- 8080:8080 # Expose HTTP server port to host
|
||||
volumes:
|
||||
- "./config.yaml:/app/config.yaml" # mount config.yaml file to the container as "/app/config.yaml"
|
||||
command: ["/app/main", "run", "--bitcoin", "--runes"] # Put module flags after "run" commands to select which modules to run.
|
||||
command: ["/app/main", "run", "--modules", "runes"] # Put module flags after "run" commands to select which modules to run.
|
||||
```
|
||||
|
||||
### Install from source
|
||||
@@ -160,17 +139,17 @@ go build -o gaze main.go
|
||||
4. Run database migrations with the `migrate` command and module flags.
|
||||
|
||||
```bash
|
||||
./gaze migrate up --bitcoin --runes --database postgres://postgres:password@localhost:5432/postgres
|
||||
./gaze migrate up --runes --database postgres://postgres:password@localhost:5432/postgres
|
||||
```
|
||||
|
||||
5. Start the indexer with the `run` command and module flags.
|
||||
|
||||
```bash
|
||||
./gaze run --bitcoin --runes
|
||||
./gaze run --modules runes
|
||||
```
|
||||
|
||||
If `config.yaml` is not located at `./app/config.yaml`, use the `--config` flag to specify the path to the `config.yaml` file.
|
||||
|
||||
```bash
|
||||
./gaze run --bitcoin --runes --config /path/to/config.yaml
|
||||
./gaze run --modules runes --config /path/to/config.yaml
|
||||
```
|
||||
|
||||
381
cmd/cmd_run.go
381
cmd/cmd_run.go
@@ -15,79 +15,65 @@ import (
|
||||
"github.com/btcsuite/btcd/rpcclient"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/datasources"
|
||||
"github.com/gaze-network/indexer-network/core/indexers"
|
||||
"github.com/gaze-network/indexer-network/core/indexer"
|
||||
"github.com/gaze-network/indexer-network/internal/config"
|
||||
"github.com/gaze-network/indexer-network/internal/postgres"
|
||||
"github.com/gaze-network/indexer-network/modules/bitcoin"
|
||||
"github.com/gaze-network/indexer-network/modules/bitcoin/btcclient"
|
||||
btcdatagateway "github.com/gaze-network/indexer-network/modules/bitcoin/datagateway"
|
||||
btcpostgres "github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres"
|
||||
"github.com/gaze-network/indexer-network/modules/runes"
|
||||
runesapi "github.com/gaze-network/indexer-network/modules/runes/api"
|
||||
runesdatagateway "github.com/gaze-network/indexer-network/modules/runes/datagateway"
|
||||
runespostgres "github.com/gaze-network/indexer-network/modules/runes/repository/postgres"
|
||||
runesusecase "github.com/gaze-network/indexer-network/modules/runes/usecase"
|
||||
"github.com/gaze-network/indexer-network/pkg/errorhandler"
|
||||
"github.com/gaze-network/indexer-network/pkg/automaxprocs"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
|
||||
"github.com/gaze-network/indexer-network/pkg/middleware/errorhandler"
|
||||
"github.com/gaze-network/indexer-network/pkg/middleware/requestcontext"
|
||||
"github.com/gaze-network/indexer-network/pkg/middleware/requestlogger"
|
||||
"github.com/gaze-network/indexer-network/pkg/reportingclient"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/compress"
|
||||
"github.com/gofiber/fiber/v2/middleware/cors"
|
||||
"github.com/gofiber/fiber/v2/middleware/favicon"
|
||||
fiberrecover "github.com/gofiber/fiber/v2/middleware/recover"
|
||||
"github.com/gofiber/fiber/v2/middleware/requestid"
|
||||
"github.com/samber/do/v2"
|
||||
"github.com/samber/lo"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
shutdownTimeout = 60 * time.Second
|
||||
// Register Modules
|
||||
var Modules = do.Package(
|
||||
do.LazyNamed("runes", runes.New),
|
||||
)
|
||||
|
||||
type runCmdOptions struct {
|
||||
APIOnly bool
|
||||
Bitcoin bool
|
||||
Runes bool
|
||||
}
|
||||
|
||||
func NewRunCommand() *cobra.Command {
|
||||
opts := &runCmdOptions{}
|
||||
|
||||
// Create command
|
||||
runCmd := &cobra.Command{
|
||||
Use: "run",
|
||||
Short: "Start indexer-network service",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runHandler(opts, cmd, args)
|
||||
if err := automaxprocs.Init(); err != nil {
|
||||
logger.Error("Failed to set GOMAXPROCS", slogx.Error(err))
|
||||
}
|
||||
return runHandler(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
// TODO: separate flags and bind flags to each module cmd package.
|
||||
|
||||
// Add local flags
|
||||
flags := runCmd.Flags()
|
||||
flags.BoolVar(&opts.APIOnly, "api-only", false, "Run only API server")
|
||||
flags.BoolVar(&opts.Bitcoin, "bitcoin", false, "Enable Bitcoin indexer module")
|
||||
flags.String("bitcoin-db", "postgres", `Database to store bitcoin data. current supported databases: "postgres"`)
|
||||
flags.BoolVar(&opts.Runes, "runes", false, "Enable Runes indexer module")
|
||||
flags.String("runes-db", "postgres", `Database to store runes data. current supported databases: "postgres"`)
|
||||
flags.String("runes-datasource", "bitcoin-node", `Datasource to fetch bitcoin data for processing Meta-Protocol data. current supported datasources: "bitcoin-node" | "database"`)
|
||||
flags.Bool("api-only", false, "Run only API server")
|
||||
flags.String("modules", "", "Enable specific modules to run. E.g. `runes,brc20`")
|
||||
|
||||
// Bind flags to configuration
|
||||
config.BindPFlag("modules.bitcoin.database", flags.Lookup("bitcoin-db"))
|
||||
config.BindPFlag("modules.runes.database", flags.Lookup("runes-db"))
|
||||
config.BindPFlag("modules.runes.datasource", flags.Lookup("runes-datasource"))
|
||||
config.BindPFlag("api_only", flags.Lookup("api-only"))
|
||||
config.BindPFlag("enable_modules", flags.Lookup("modules"))
|
||||
|
||||
return runCmd
|
||||
}
|
||||
|
||||
type HttpHandler interface {
|
||||
Mount(router fiber.Router) error
|
||||
}
|
||||
const (
|
||||
shutdownTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
func runHandler(opts *runCmdOptions, cmd *cobra.Command, _ []string) error {
|
||||
func runHandler(cmd *cobra.Command, _ []string) error {
|
||||
conf := config.Load()
|
||||
|
||||
// Validate inputs
|
||||
// Validate inputs and configurations
|
||||
{
|
||||
if !conf.Network.IsSupported() {
|
||||
return errors.Wrapf(errs.Unsupported, "%q network is not supported", conf.Network.String())
|
||||
@@ -98,202 +84,78 @@ func runHandler(opts *runCmdOptions, cmd *cobra.Command, _ []string) error {
|
||||
ctx, stop := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
// Initialize worker context to separate worker's lifecycle from main process
|
||||
ctxWorker, stopWorker := context.WithCancel(context.Background())
|
||||
defer stopWorker()
|
||||
injector := do.New(Modules)
|
||||
do.ProvideValue(injector, conf)
|
||||
do.ProvideValue(injector, ctx)
|
||||
|
||||
// Add logger context
|
||||
ctxWorker = logger.WithContext(ctxWorker, slogx.Stringer("network", conf.Network))
|
||||
// Initialize Bitcoin RPC client
|
||||
do.Provide(injector, func(i do.Injector) (*rpcclient.Client, error) {
|
||||
conf := do.MustInvoke[config.Config](i)
|
||||
|
||||
// Initialize Bitcoin Core RPC Client
|
||||
client, err := rpcclient.New(&rpcclient.ConnConfig{
|
||||
Host: conf.BitcoinNode.Host,
|
||||
User: conf.BitcoinNode.User,
|
||||
Pass: conf.BitcoinNode.Pass,
|
||||
DisableTLS: conf.BitcoinNode.DisableTLS,
|
||||
HTTPPostMode: true,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
logger.PanicContext(ctx, "Invalid Bitcoin node configuration", slogx.Error(err))
|
||||
}
|
||||
defer client.Shutdown()
|
||||
|
||||
// Check Bitcoin RPC connection
|
||||
{
|
||||
start := time.Now()
|
||||
logger.InfoContext(ctx, "Connecting to Bitcoin Core RPC Server...", slogx.String("host", conf.BitcoinNode.Host))
|
||||
if err := client.Ping(); err != nil {
|
||||
logger.PanicContext(ctx, "Can't connect to Bitcoin Core RPC Server", slogx.String("host", conf.BitcoinNode.Host), slogx.Error(err))
|
||||
client, err := rpcclient.New(&rpcclient.ConnConfig{
|
||||
Host: conf.BitcoinNode.Host,
|
||||
User: conf.BitcoinNode.User,
|
||||
Pass: conf.BitcoinNode.Pass,
|
||||
DisableTLS: conf.BitcoinNode.DisableTLS,
|
||||
HTTPPostMode: true,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid Bitcoin node configuration")
|
||||
}
|
||||
logger.InfoContext(ctx, "Connected to Bitcoin Core RPC Server", slog.Duration("latency", time.Since(start)))
|
||||
}
|
||||
|
||||
// TODO: create module command package.
|
||||
// each module should have its own command package and main package will routing the command to the module command package.
|
||||
// Check Bitcoin RPC connection
|
||||
{
|
||||
start := time.Now()
|
||||
logger.InfoContext(ctx, "Connecting to Bitcoin Core RPC Server...", slogx.String("host", conf.BitcoinNode.Host))
|
||||
if err := client.Ping(); err != nil {
|
||||
return nil, errors.Wrapf(err, "can't connect to Bitcoin Core RPC Server %q", conf.BitcoinNode.Host)
|
||||
}
|
||||
logger.InfoContext(ctx, "Connected to Bitcoin Core RPC Server", slog.Duration("latency", time.Since(start)))
|
||||
}
|
||||
|
||||
// TODO: refactor module name to specific type instead of string?
|
||||
httpHandlers := make(map[string]HttpHandler, 0)
|
||||
return client, nil
|
||||
})
|
||||
|
||||
var reportingClient *reportingclient.ReportingClient
|
||||
if !conf.Reporting.Disabled {
|
||||
reportingClient, err = reportingclient.New(conf.Reporting)
|
||||
// Initialize reporting client
|
||||
do.Provide(injector, func(i do.Injector) (*reportingclient.ReportingClient, error) {
|
||||
conf := do.MustInvoke[config.Config](i)
|
||||
if conf.Reporting.Disabled {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
reportingClient, err := reportingclient.New(conf.Reporting)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.InvalidArgument) {
|
||||
logger.PanicContext(ctx, "Invalid reporting configuration", slogx.Error(err))
|
||||
return nil, errors.Wrap(err, "invalid reporting configuration")
|
||||
}
|
||||
logger.PanicContext(ctx, "Something went wrong, can't create reporting client", slogx.Error(err))
|
||||
return nil, errors.Wrap(err, "can't create reporting client")
|
||||
}
|
||||
}
|
||||
return reportingClient, nil
|
||||
})
|
||||
|
||||
// Initialize Bitcoin Indexer
|
||||
if opts.Bitcoin {
|
||||
ctx := logger.WithContext(ctx, slogx.String("module", "bitcoin"))
|
||||
var (
|
||||
btcDB btcdatagateway.BitcoinDataGateway
|
||||
indexerInfoDB btcdatagateway.IndexerInformationDataGateway
|
||||
)
|
||||
switch strings.ToLower(conf.Modules.Bitcoin.Database) {
|
||||
case "postgresql", "postgres", "pg":
|
||||
pg, err := postgres.NewPool(ctx, conf.Modules.Bitcoin.Postgres)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.InvalidArgument) {
|
||||
logger.PanicContext(ctx, "Invalid Postgres configuration for indexer", slogx.Error(err))
|
||||
}
|
||||
logger.PanicContext(ctx, "Something went wrong, can't create Postgres connection pool", slogx.Error(err))
|
||||
}
|
||||
defer pg.Close()
|
||||
repo := btcpostgres.NewRepository(pg)
|
||||
btcDB = repo
|
||||
indexerInfoDB = repo
|
||||
default:
|
||||
return errors.Wrapf(errs.Unsupported, "%q database for indexer is not supported", conf.Modules.Bitcoin.Database)
|
||||
}
|
||||
if !opts.APIOnly {
|
||||
processor := bitcoin.NewProcessor(conf, btcDB, indexerInfoDB)
|
||||
datasource := datasources.NewBitcoinNode(client)
|
||||
indexer := indexers.NewBitcoinIndexer(processor, datasource)
|
||||
defer func() {
|
||||
if err := indexer.ShutdownWithTimeout(shutdownTimeout); err != nil {
|
||||
logger.ErrorContext(ctx, "Error during shutdown indexer", slogx.Error(err))
|
||||
return
|
||||
}
|
||||
logger.InfoContext(ctx, "Indexer stopped gracefully")
|
||||
}()
|
||||
|
||||
// Verify states before running Indexer
|
||||
if err := processor.VerifyStates(ctx); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Run Indexer
|
||||
go func() {
|
||||
// stop main process if indexer stopped
|
||||
defer stop()
|
||||
|
||||
logger.InfoContext(ctx, "Starting Gaze Indexer")
|
||||
if err := indexer.Run(ctxWorker); err != nil {
|
||||
logger.PanicContext(ctx, "Something went wrong, error during running indexer", slogx.Error(err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize Runes Indexer
|
||||
if opts.Runes {
|
||||
ctx := logger.WithContext(ctx, slogx.String("module", "runes"))
|
||||
var (
|
||||
runesDg runesdatagateway.RunesDataGateway
|
||||
indexerInfoDg runesdatagateway.IndexerInfoDataGateway
|
||||
)
|
||||
switch strings.ToLower(conf.Modules.Runes.Database) {
|
||||
case "postgresql", "postgres", "pg":
|
||||
pg, err := postgres.NewPool(ctx, conf.Modules.Runes.Postgres)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.InvalidArgument) {
|
||||
logger.PanicContext(ctx, "Invalid Postgres configuration for indexer", slogx.Error(err))
|
||||
}
|
||||
logger.PanicContext(ctx, "Something went wrong, can't create Postgres connection pool", slogx.Error(err))
|
||||
}
|
||||
defer pg.Close()
|
||||
runesRepo := runespostgres.NewRepository(pg)
|
||||
runesDg = runesRepo
|
||||
indexerInfoDg = runesRepo
|
||||
default:
|
||||
return errors.Wrapf(errs.Unsupported, "%q database for indexer is not supported", conf.Modules.Runes.Database)
|
||||
}
|
||||
var bitcoinDatasource indexers.BitcoinDatasource
|
||||
var bitcoinClient btcclient.Contract
|
||||
switch strings.ToLower(conf.Modules.Runes.Datasource) {
|
||||
case "bitcoin-node":
|
||||
bitcoinNodeDatasource := datasources.NewBitcoinNode(client)
|
||||
bitcoinDatasource = bitcoinNodeDatasource
|
||||
bitcoinClient = bitcoinNodeDatasource
|
||||
case "database":
|
||||
pg, err := postgres.NewPool(ctx, conf.Modules.Bitcoin.Postgres)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.InvalidArgument) {
|
||||
logger.PanicContext(ctx, "Invalid Postgres configuration for datasource", slogx.Error(err))
|
||||
}
|
||||
logger.PanicContext(ctx, "Something went wrong, can't create Postgres connection pool", slogx.Error(err))
|
||||
}
|
||||
defer pg.Close()
|
||||
btcRepo := btcpostgres.NewRepository(pg)
|
||||
btcClientDB := btcclient.NewClientDatabase(btcRepo)
|
||||
bitcoinDatasource = btcClientDB
|
||||
bitcoinClient = btcClientDB
|
||||
default:
|
||||
return errors.Wrapf(errs.Unsupported, "%q datasource is not supported", conf.Modules.Runes.Datasource)
|
||||
}
|
||||
|
||||
if !opts.APIOnly {
|
||||
processor := runes.NewProcessor(runesDg, indexerInfoDg, bitcoinClient, bitcoinDatasource, conf.Network, reportingClient)
|
||||
indexer := indexers.NewBitcoinIndexer(processor, bitcoinDatasource)
|
||||
defer func() {
|
||||
if err := indexer.ShutdownWithTimeout(shutdownTimeout); err != nil {
|
||||
logger.ErrorContext(ctx, "Error during shutdown indexer", slogx.Error(err))
|
||||
return
|
||||
}
|
||||
logger.InfoContext(ctx, "Indexer stopped gracefully")
|
||||
}()
|
||||
|
||||
if err := processor.VerifyStates(ctx); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Run Indexer
|
||||
go func() {
|
||||
// stop main process if indexer stopped
|
||||
defer stop()
|
||||
|
||||
logger.InfoContext(ctx, "Starting Gaze Indexer")
|
||||
if err := indexer.Run(ctxWorker); err != nil {
|
||||
logger.PanicContext(ctx, "Something went wrong, error during running indexer", slogx.Error(err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Mount API
|
||||
apiHandlers := lo.Uniq(conf.Modules.Runes.APIHandlers)
|
||||
for _, handler := range apiHandlers {
|
||||
switch handler { // TODO: support more handlers (e.g. gRPC)
|
||||
case "http":
|
||||
runesUsecase := runesusecase.New(runesDg, bitcoinClient)
|
||||
runesHTTPHandler := runesapi.NewHTTPHandler(conf.Network, runesUsecase)
|
||||
httpHandlers["runes"] = runesHTTPHandler
|
||||
default:
|
||||
logger.PanicContext(ctx, "Something went wrong, unsupported API handler", slogx.String("handler", handler))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for interrupt signal to gracefully stop the server with
|
||||
// Setup HTTP server if there are any HTTP handlers
|
||||
if len(httpHandlers) > 0 {
|
||||
// Initialize HTTP server
|
||||
do.Provide(injector, func(i do.Injector) (*fiber.App, error) {
|
||||
app := fiber.New(fiber.Config{
|
||||
AppName: "Gaze Indexer",
|
||||
ErrorHandler: errorhandler.NewHTTPErrorHandler(),
|
||||
AppName: "Gaze Indexer",
|
||||
ErrorHandler: func(c *fiber.Ctx, err error) error {
|
||||
logger.ErrorContext(c.UserContext(), "Something went wrong, unhandled api error",
|
||||
slogx.String("event", "api_unhandled_error"),
|
||||
slogx.Error(err),
|
||||
)
|
||||
return errors.WithStack(c.Status(http.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": "Internal Server Error",
|
||||
}))
|
||||
},
|
||||
})
|
||||
app.
|
||||
Use(favicon.New()).
|
||||
Use(cors.New()).
|
||||
Use(requestid.New()).
|
||||
Use(requestcontext.New(
|
||||
requestcontext.WithRequestId(),
|
||||
requestcontext.WithClientIP(conf.HTTPServer.RequestIP),
|
||||
)).
|
||||
Use(requestlogger.New(conf.HTTPServer.Logger)).
|
||||
Use(fiberrecover.New(fiberrecover.Config{
|
||||
EnableStackTrace: true,
|
||||
StackTraceHandler: func(c *fiber.Ctx, e interface{}) {
|
||||
@@ -302,42 +164,69 @@ func runHandler(opts *runCmdOptions, cmd *cobra.Command, _ []string) error {
|
||||
logger.ErrorContext(c.UserContext(), "Something went wrong, panic in http handler", slogx.Any("panic", e), slog.String("stacktrace", string(buf)))
|
||||
},
|
||||
})).
|
||||
Use(errorhandler.New()).
|
||||
Use(compress.New(compress.Config{
|
||||
Level: compress.LevelDefault,
|
||||
}))
|
||||
|
||||
defer func() {
|
||||
if err := app.ShutdownWithTimeout(shutdownTimeout); err != nil {
|
||||
logger.ErrorContext(ctx, "Error during shutdown HTTP server", slogx.Error(err))
|
||||
return
|
||||
}
|
||||
logger.InfoContext(ctx, "HTTP server stopped gracefully")
|
||||
}()
|
||||
|
||||
// Health check
|
||||
app.Get("/", func(c *fiber.Ctx) error {
|
||||
return errors.WithStack(c.SendStatus(http.StatusOK))
|
||||
})
|
||||
|
||||
// mount http handlers from each http-enabled module
|
||||
for module, handler := range httpHandlers {
|
||||
if err := handler.Mount(app); err != nil {
|
||||
logger.PanicContext(ctx, "Something went wrong, can't mount HTTP handler", slogx.Error(err), slogx.String("module", module))
|
||||
return app, nil
|
||||
})
|
||||
|
||||
// Initialize worker context to separate worker's lifecycle from main process
|
||||
ctxWorker, stopWorker := context.WithCancel(context.Background())
|
||||
defer stopWorker()
|
||||
|
||||
// Add logger context
|
||||
ctxWorker = logger.WithContext(ctxWorker, slogx.Stringer("network", conf.Network))
|
||||
|
||||
// Run modules
|
||||
{
|
||||
modules := lo.Uniq(conf.EnableModules)
|
||||
modules = lo.Map(modules, func(item string, _ int) string { return strings.TrimSpace(item) })
|
||||
modules = lo.Filter(modules, func(item string, _ int) bool { return item != "" })
|
||||
for _, module := range modules {
|
||||
ctx := logger.WithContext(ctxWorker, slogx.String("module", module))
|
||||
|
||||
indexer, err := do.InvokeNamed[indexer.IndexerWorker](injector, module)
|
||||
if err != nil {
|
||||
if errors.Is(err, do.ErrServiceNotFound) {
|
||||
return errors.Errorf("Module %q is not supported", module)
|
||||
}
|
||||
return errors.Wrapf(err, "can't init module %q", module)
|
||||
}
|
||||
|
||||
// Run Indexer
|
||||
if !conf.APIOnly {
|
||||
go func() {
|
||||
// stop main process if indexer stopped
|
||||
defer stop()
|
||||
|
||||
logger.InfoContext(ctx, "Starting Gaze Indexer")
|
||||
if err := indexer.Run(ctx); err != nil {
|
||||
logger.PanicContext(ctx, "Something went wrong, error during running indexer", slogx.Error(err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
logger.InfoContext(ctx, "Mounted HTTP handler", slogx.String("module", module))
|
||||
}
|
||||
|
||||
go func() {
|
||||
// stop main process if API stopped
|
||||
defer stop()
|
||||
|
||||
logger.InfoContext(ctx, "Started HTTP server", slog.Int("port", conf.HTTPServer.Port))
|
||||
if err := app.Listen(fmt.Sprintf(":%d", conf.HTTPServer.Port)); err != nil {
|
||||
logger.PanicContext(ctx, "Something went wrong, error during running HTTP server", slogx.Error(err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Run API server
|
||||
httpServer := do.MustInvoke[*fiber.App](injector)
|
||||
go func() {
|
||||
// stop main process if API stopped
|
||||
defer stop()
|
||||
|
||||
logger.InfoContext(ctx, "Started HTTP server", slog.Int("port", conf.HTTPServer.Port))
|
||||
if err := httpServer.Listen(fmt.Sprintf(":%d", conf.HTTPServer.Port)); err != nil {
|
||||
logger.PanicContext(ctx, "Something went wrong, error during running HTTP server", slogx.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// Stop application if worker context is done
|
||||
go func() {
|
||||
<-ctxWorker.Done()
|
||||
@@ -366,5 +255,9 @@ func runHandler(opts *runCmdOptions, cmd *cobra.Command, _ []string) error {
|
||||
}
|
||||
}()
|
||||
|
||||
if err := injector.Shutdown(); err != nil {
|
||||
logger.PanicContext(ctx, "Failed while gracefully shutting down", slogx.Error(err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,15 +6,13 @@ import (
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/constants"
|
||||
"github.com/gaze-network/indexer-network/modules/bitcoin"
|
||||
"github.com/gaze-network/indexer-network/modules/runes"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var versions = map[string]string{
|
||||
"": constants.Version,
|
||||
"bitcoin": bitcoin.Version,
|
||||
"runes": runes.Version,
|
||||
"": constants.Version,
|
||||
"runes": runes.Version,
|
||||
}
|
||||
|
||||
type versionCmdOptions struct {
|
||||
@@ -33,7 +31,7 @@ func NewVersionCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&opts.Modules, "module", "", `Show version of a specific module. E.g. "bitcoin" | "runes"`)
|
||||
flags.StringVar(&opts.Modules, "module", "", `Show version of a specific module. E.g. "runes"`)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
|
||||
type migrateDownCmdOptions struct {
|
||||
DatabaseURL string
|
||||
Bitcoin bool
|
||||
Runes bool
|
||||
All bool
|
||||
}
|
||||
@@ -60,7 +59,6 @@ func NewMigrateDownCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.Bitcoin, "bitcoin", false, "Apply Bitcoin down migrations")
|
||||
flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes down migrations")
|
||||
flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on")
|
||||
flags.BoolVar(&opts.All, "all", false, "Confirm apply ALL down migrations without prompt")
|
||||
@@ -118,11 +116,6 @@ func migrateDownHandler(opts *migrateDownCmdOptions, _ *cobra.Command, args migr
|
||||
return nil
|
||||
}
|
||||
|
||||
if opts.Bitcoin {
|
||||
if err := applyDownMigrations("Bitcoin", bitcoinMigrationSource, "bitcoin_schema_migrations"); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
if opts.Runes {
|
||||
if err := applyDownMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil {
|
||||
return errors.WithStack(err)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
|
||||
type migrateUpCmdOptions struct {
|
||||
DatabaseURL string
|
||||
Bitcoin bool
|
||||
Runes bool
|
||||
}
|
||||
|
||||
@@ -55,7 +54,6 @@ func NewMigrateUpCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.Bitcoin, "bitcoin", false, "Apply Bitcoin up migrations")
|
||||
flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes up migrations")
|
||||
flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on")
|
||||
|
||||
@@ -103,11 +101,6 @@ func migrateUpHandler(opts *migrateUpCmdOptions, _ *cobra.Command, args migrateU
|
||||
return nil
|
||||
}
|
||||
|
||||
if opts.Bitcoin {
|
||||
if err := applyUpMigrations("Bitcoin", bitcoinMigrationSource, "bitcoin_schema_migrations"); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
if opts.Runes {
|
||||
if err := applyUpMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil {
|
||||
return errors.WithStack(err)
|
||||
|
||||
@@ -3,8 +3,7 @@ package migrate
|
||||
import "net/url"
|
||||
|
||||
const (
|
||||
bitcoinMigrationSource = "modules/bitcoin/database/postgresql/migrations"
|
||||
runesMigrationSource = "modules/runes/database/postgresql/migrations"
|
||||
runesMigrationSource = "modules/runes/database/postgresql/migrations"
|
||||
)
|
||||
|
||||
func cloneURLWithQuery(u *url.URL, newQuery url.Values) *url.URL {
|
||||
|
||||
@@ -23,24 +23,21 @@ reporting:
|
||||
# HTTP server configuration options.
|
||||
http_server:
|
||||
port: 8080 # Port to run the HTTP server on for modules with HTTP API handlers.
|
||||
logger:
|
||||
disable: false # disable logger if logger level is `INFO`
|
||||
request_header: false
|
||||
request_query: false
|
||||
requestip: # Client IP extraction configuration options. This is unnecessary if you don't care about the real client IP or if you're not using a reverse proxy.
|
||||
trusted_proxies_ip: # Cloudflare, GCP Public LB. See: server/internal/middleware/requestcontext/PROXY-IP.md
|
||||
trusted_proxies_header: # X-Real-IP, CF-Connecting-IP
|
||||
enable_reject_malformed_request: false # return 403 if request is malformed (invalid IP)
|
||||
|
||||
# Meta-protocol modules configuration options.
|
||||
modules:
|
||||
# Configuration options for Bitcoin module. Can be removed if not used.
|
||||
bitcoin:
|
||||
database: "postgres" # Database to store bitcoin data. current supported databases: "postgres"
|
||||
postgres:
|
||||
host: "localhost"
|
||||
port: 5432
|
||||
user: "postgres"
|
||||
password: "password"
|
||||
db_name: "postgres"
|
||||
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.
|
||||
|
||||
# Configuration options for Runes module. Can be removed if not used.
|
||||
runes:
|
||||
database: "postgres" # Database to store Runes data. current supported databases: "postgres"
|
||||
datasource: "database" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node" | "database". If "database" is used, it will use the database config in bitcoin module as datasource.
|
||||
datasource: "database" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node".
|
||||
api_handlers: # API handlers to enable. current supported handlers: "http"
|
||||
- http
|
||||
postgres:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
package constants
|
||||
|
||||
const (
|
||||
Version = "v0.0.1"
|
||||
Version = "v0.2.1"
|
||||
)
|
||||
|
||||
@@ -24,7 +24,7 @@ const (
|
||||
)
|
||||
|
||||
// Make sure to implement the BitcoinDatasource interface
|
||||
var _ Datasource[[]*types.Block] = (*BitcoinNodeDatasource)(nil)
|
||||
var _ Datasource[*types.Block] = (*BitcoinNodeDatasource)(nil)
|
||||
|
||||
// BitcoinNodeDatasource fetch data from Bitcoin node for Bitcoin Indexer
|
||||
type BitcoinNodeDatasource struct {
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
// Datasource is an interface for indexer data sources.
|
||||
type Datasource[T any] interface {
|
||||
Name() string
|
||||
Fetch(ctx context.Context, from, to int64) (T, error)
|
||||
FetchAsync(ctx context.Context, from, to int64, ch chan<- T) (*subscription.ClientSubscription[T], error)
|
||||
Fetch(ctx context.Context, from, to int64) ([]T, error)
|
||||
FetchAsync(ctx context.Context, from, to int64, ch chan<- []T) (*subscription.ClientSubscription[[]T], error)
|
||||
GetBlockHeader(ctx context.Context, height int64) (types.BlockHeader, error)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package indexers
|
||||
package indexer
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -16,20 +16,15 @@ import (
|
||||
|
||||
const (
|
||||
maxReorgLookBack = 1000
|
||||
|
||||
// pollingInterval is the default polling interval for the indexer polling worker
|
||||
pollingInterval = 15 * time.Second
|
||||
)
|
||||
|
||||
type (
|
||||
BitcoinProcessor Processor[[]*types.Block]
|
||||
BitcoinDatasource datasources.Datasource[[]*types.Block]
|
||||
)
|
||||
|
||||
// Make sure to implement the IndexerWorker interface
|
||||
var _ IndexerWorker = (*BitcoinIndexer)(nil)
|
||||
|
||||
// BitcoinIndexer is the polling indexer for sync Bitcoin data to the database.
|
||||
type BitcoinIndexer struct {
|
||||
Processor BitcoinProcessor
|
||||
Datasource BitcoinDatasource
|
||||
// Indexer generic indexer for fetching and processing data
|
||||
type Indexer[T Input] struct {
|
||||
Processor Processor[T]
|
||||
Datasource datasources.Datasource[T]
|
||||
currentBlock types.BlockHeader
|
||||
|
||||
quitOnce sync.Once
|
||||
@@ -37,9 +32,9 @@ type BitcoinIndexer struct {
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// NewBitcoinIndexer create new BitcoinIndexer
|
||||
func NewBitcoinIndexer(processor BitcoinProcessor, datasource BitcoinDatasource) *BitcoinIndexer {
|
||||
return &BitcoinIndexer{
|
||||
// New create new generic indexer
|
||||
func New[T Input](processor Processor[T], datasource datasources.Datasource[T]) *Indexer[T] {
|
||||
return &Indexer[T]{
|
||||
Processor: processor,
|
||||
Datasource: datasource,
|
||||
|
||||
@@ -48,21 +43,17 @@ func NewBitcoinIndexer(processor BitcoinProcessor, datasource BitcoinDatasource)
|
||||
}
|
||||
}
|
||||
|
||||
func (*BitcoinIndexer) Type() string {
|
||||
return "bitcoin"
|
||||
}
|
||||
|
||||
func (i *BitcoinIndexer) Shutdown() error {
|
||||
func (i *Indexer[T]) Shutdown() error {
|
||||
return i.ShutdownWithContext(context.Background())
|
||||
}
|
||||
|
||||
func (i *BitcoinIndexer) ShutdownWithTimeout(timeout time.Duration) error {
|
||||
func (i *Indexer[T]) ShutdownWithTimeout(timeout time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
return i.ShutdownWithContext(ctx)
|
||||
}
|
||||
|
||||
func (i *BitcoinIndexer) ShutdownWithContext(ctx context.Context) (err error) {
|
||||
func (i *Indexer[T]) ShutdownWithContext(ctx context.Context) (err error) {
|
||||
i.quitOnce.Do(func() {
|
||||
close(i.quit)
|
||||
select {
|
||||
@@ -76,12 +67,11 @@ func (i *BitcoinIndexer) ShutdownWithContext(ctx context.Context) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (i *BitcoinIndexer) Run(ctx context.Context) (err error) {
|
||||
func (i *Indexer[T]) Run(ctx context.Context) (err error) {
|
||||
defer close(i.done)
|
||||
|
||||
ctx = logger.WithContext(ctx,
|
||||
slog.String("package", "indexers"),
|
||||
slog.String("indexer", i.Type()),
|
||||
slog.String("processor", i.Processor.Name()),
|
||||
slog.String("datasource", i.Datasource.Name()),
|
||||
)
|
||||
@@ -101,6 +91,10 @@ func (i *BitcoinIndexer) Run(ctx context.Context) (err error) {
|
||||
select {
|
||||
case <-i.quit:
|
||||
logger.InfoContext(ctx, "Got quit signal, stopping indexer")
|
||||
if err := i.Processor.Shutdown(ctx); err != nil {
|
||||
logger.ErrorContext(ctx, "Failed to shutdown processor", slogx.Error(err))
|
||||
return errors.Wrap(err, "processor shutdown failed")
|
||||
}
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
@@ -114,15 +108,15 @@ func (i *BitcoinIndexer) Run(ctx context.Context) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (i *BitcoinIndexer) process(ctx context.Context) (err error) {
|
||||
func (i *Indexer[T]) process(ctx context.Context) (err error) {
|
||||
// height range to fetch data
|
||||
from, to := i.currentBlock.Height+1, int64(-1)
|
||||
|
||||
logger.InfoContext(ctx, "Start fetching bitcoin blocks", slog.Int64("from", from))
|
||||
ch := make(chan []*types.Block)
|
||||
logger.InfoContext(ctx, "Start fetching input data", slog.Int64("from", from))
|
||||
ch := make(chan []T)
|
||||
subscription, err := i.Datasource.FetchAsync(ctx, from, to, ch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to fetch data")
|
||||
return errors.Wrap(err, "failed to fetch input data")
|
||||
}
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
@@ -130,21 +124,24 @@ func (i *BitcoinIndexer) process(ctx context.Context) (err error) {
|
||||
select {
|
||||
case <-i.quit:
|
||||
return nil
|
||||
case blocks := <-ch:
|
||||
// empty blocks
|
||||
if len(blocks) == 0 {
|
||||
case inputs := <-ch:
|
||||
// empty inputs
|
||||
if len(inputs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
firstInput := inputs[0]
|
||||
firstInputHeader := firstInput.BlockHeader()
|
||||
|
||||
startAt := time.Now()
|
||||
ctx := logger.WithContext(ctx,
|
||||
slogx.Int64("from", blocks[0].Header.Height),
|
||||
slogx.Int64("to", blocks[len(blocks)-1].Header.Height),
|
||||
slogx.Int64("from", firstInputHeader.Height),
|
||||
slogx.Int64("to", inputs[len(inputs)-1].BlockHeader().Height),
|
||||
)
|
||||
|
||||
// validate reorg from first block
|
||||
// validate reorg from first input
|
||||
{
|
||||
remoteBlockHeader := blocks[0].Header
|
||||
remoteBlockHeader := firstInputHeader
|
||||
if !remoteBlockHeader.PrevBlock.IsEqual(&i.currentBlock.Hash) {
|
||||
logger.WarnContext(ctx, "Detected chain reorganization. Searching for fork point...",
|
||||
slogx.String("event", "reorg_detected"),
|
||||
@@ -210,33 +207,36 @@ func (i *BitcoinIndexer) process(ctx context.Context) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// validate is block is continuous and no reorg
|
||||
for i := 1; i < len(blocks); i++ {
|
||||
if blocks[i].Header.Height != blocks[i-1].Header.Height+1 {
|
||||
return errors.Wrapf(errs.InternalError, "block is not continuous, block[%d] height: %d, block[%d] height: %d", i-1, blocks[i-1].Header.Height, i, blocks[i].Header.Height)
|
||||
// validate is input is continuous and no reorg
|
||||
prevHeader := i.currentBlock
|
||||
for i, input := range inputs {
|
||||
header := input.BlockHeader()
|
||||
if header.Height != prevHeader.Height+1 {
|
||||
return errors.Wrapf(errs.InternalError, "input is not continuous, input[%d] height: %d, input[%d] height: %d", i-1, prevHeader.Height, i, header.Height)
|
||||
}
|
||||
|
||||
if !blocks[i].Header.PrevBlock.IsEqual(&blocks[i-1].Header.Hash) {
|
||||
logger.WarnContext(ctx, "Chain Reorganization occurred in the middle of batch fetching blocks, need to try to fetch again")
|
||||
if !header.PrevBlock.IsEqual(&prevHeader.Hash) {
|
||||
logger.WarnContext(ctx, "Chain Reorganization occurred in the middle of batch fetching inputs, need to try to fetch again")
|
||||
|
||||
// end current round
|
||||
return nil
|
||||
}
|
||||
prevHeader = header
|
||||
}
|
||||
|
||||
ctx = logger.WithContext(ctx, slog.Int("total_blocks", len(blocks)))
|
||||
ctx = logger.WithContext(ctx, slog.Int("total_inputs", len(inputs)))
|
||||
|
||||
// Start processing blocks
|
||||
logger.InfoContext(ctx, "Processing blocks")
|
||||
if err := i.Processor.Process(ctx, blocks); err != nil {
|
||||
// Start processing input
|
||||
logger.InfoContext(ctx, "Processing inputs")
|
||||
if err := i.Processor.Process(ctx, inputs); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Update current state
|
||||
i.currentBlock = blocks[len(blocks)-1].Header
|
||||
i.currentBlock = inputs[len(inputs)-1].BlockHeader()
|
||||
|
||||
logger.InfoContext(ctx, "Processed blocks successfully",
|
||||
slogx.String("event", "processed_blocks"),
|
||||
logger.InfoContext(ctx, "Processed inputs successfully",
|
||||
slogx.String("event", "processed_inputs"),
|
||||
slogx.Int64("current_block", i.currentBlock.Height),
|
||||
slogx.Duration("duration", time.Since(startAt)),
|
||||
)
|
||||
@@ -1,4 +1,4 @@
|
||||
package indexers
|
||||
package indexer
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -7,24 +7,15 @@ import (
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// pollingInterval is the default polling interval for the indexer polling worker
|
||||
pollingInterval = 15 * time.Second
|
||||
)
|
||||
|
||||
type IndexerWorker interface {
|
||||
Type() string
|
||||
Run(ctx context.Context) error
|
||||
Shutdown() error
|
||||
ShutdownWithTimeout(timeout time.Duration) error
|
||||
ShutdownWithContext(ctx context.Context) error
|
||||
type Input interface {
|
||||
BlockHeader() types.BlockHeader
|
||||
}
|
||||
|
||||
type Processor[T any] interface {
|
||||
type Processor[T Input] interface {
|
||||
Name() string
|
||||
|
||||
// Process processes the input data and indexes it.
|
||||
Process(ctx context.Context, inputs T) error
|
||||
Process(ctx context.Context, inputs []T) error
|
||||
|
||||
// CurrentBlock returns the latest indexed block header.
|
||||
CurrentBlock(ctx context.Context) (types.BlockHeader, error)
|
||||
@@ -38,4 +29,14 @@ type Processor[T any] interface {
|
||||
// VerifyStates verifies the states of the indexed data and the indexer
|
||||
// to ensure the last shutdown was graceful and no missing data.
|
||||
VerifyStates(ctx context.Context) error
|
||||
|
||||
// Shutdown gracefully stops the processor. Database connections, network calls, leftover states, etc. should be closed and cleaned up here.
|
||||
Shutdown(ctx context.Context) error
|
||||
}
|
||||
|
||||
type IndexerWorker interface {
|
||||
Shutdown() error
|
||||
ShutdownWithTimeout(timeout time.Duration) error
|
||||
ShutdownWithContext(ctx context.Context) (err error)
|
||||
Run(ctx context.Context) (err error)
|
||||
}
|
||||
@@ -38,6 +38,10 @@ type Block struct {
|
||||
Transactions []*Transaction
|
||||
}
|
||||
|
||||
func (b *Block) BlockHeader() BlockHeader {
|
||||
return b.Header
|
||||
}
|
||||
|
||||
func ParseMsgBlock(src *wire.MsgBlock, height int64) *Block {
|
||||
hash := src.Header.BlockHash()
|
||||
return &Block{
|
||||
|
||||
24
go.mod
24
go.mod
@@ -6,37 +6,39 @@ require (
|
||||
github.com/Cleverse/go-utilities/utils v0.0.0-20240119201306-d71eb577ef11
|
||||
github.com/btcsuite/btcd v0.24.0
|
||||
github.com/btcsuite/btcd/btcutil v1.1.5
|
||||
github.com/btcsuite/btcd/btcutil/psbt v1.1.9
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0
|
||||
github.com/cockroachdb/errors v1.11.1
|
||||
github.com/gaze-network/uint128 v1.3.0
|
||||
github.com/gofiber/fiber/v2 v2.52.4
|
||||
github.com/golang-migrate/migrate/v4 v4.17.1
|
||||
github.com/jackc/pgx v3.6.2+incompatible
|
||||
github.com/jackc/pgx/v5 v5.5.5
|
||||
github.com/mcosta74/pgx-slog v0.3.0
|
||||
github.com/planxnx/concurrent-stream v0.1.5
|
||||
github.com/samber/do/v2 v2.0.0-beta.7
|
||||
github.com/samber/lo v1.39.0
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.18.2
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/valyala/fasthttp v1.51.0
|
||||
go.uber.org/automaxprocs v1.5.3
|
||||
golang.org/x/sync v0.5.0
|
||||
golang.org/x/sync v0.7.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.0.5 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.3 // indirect
|
||||
github.com/bitonicnl/verify-signed-message v0.7.1
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.3 // indirect
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/getsentry/sentry-go v0.18.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
@@ -47,6 +49,7 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx v3.6.2+incompatible // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/klauspost/compress v1.17.0 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
@@ -64,6 +67,7 @@ require (
|
||||
github.com/rogpeppe/go-internal v1.9.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/samber/go-type-to-string v1.4.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
@@ -72,10 +76,10 @@ require (
|
||||
github.com/valyala/tcplisten v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/crypto v0.20.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/crypto v0.23.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d // indirect
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
golang.org/x/text v0.15.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
54
go.sum
54
go.sum
@@ -7,18 +7,23 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
|
||||
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/bitonicnl/verify-signed-message v0.7.1 h1:1Qku9k9WgzobjqBY7tT3CLjWxtTJZxkYNhOV6QeCTjY=
|
||||
github.com/bitonicnl/verify-signed-message v0.7.1/go.mod h1:PR60twfJIaHEo9Wb6eJBh8nBHEZIQQx8CvRwh0YmEPk=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
|
||||
github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A=
|
||||
github.com/btcsuite/btcd v0.24.0 h1:gL3uHE/IaFj6fcZSu03SvqPMSx7s/dPzfpG/atRwWdo=
|
||||
github.com/btcsuite/btcd v0.24.0/go.mod h1:K4IDc1593s8jKXIF7yS7yCTSxrknB9z0STzc2j6XgE4=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.3 h1:6+iXlDKE8RMtKsvK0gshlXIuPbyWM/h84Ensb7o3sC0=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.3/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00=
|
||||
github.com/btcsuite/btcd/btcutil/psbt v1.1.9 h1:UmfOIiWMZcVMOLaN+lxbbLSuoINGS1WmK1TZNI0b4yk=
|
||||
github.com/btcsuite/btcd/btcutil/psbt v1.1.9/go.mod h1:ehBEvU91lxSlXtA+zZz3iFYx7Yq9eqnKx4/kSrnsvMY=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
|
||||
@@ -50,10 +55,12 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/dhui/dktest v0.4.1 h1:/w+IWuDXVymg3IrRJCHHOkMK10m9aNVMOyD0X12YVTg=
|
||||
github.com/dhui/dktest v0.4.1/go.mod h1:DdOqcUpL7vgyP4GlF3X3w7HbSlz8cEQzwewPveYEQbA=
|
||||
@@ -96,8 +103,8 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
|
||||
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
@@ -191,6 +198,10 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/samber/do/v2 v2.0.0-beta.7 h1:tmdLOVSCbTA6uGWLU5poi/nZvMRh5QxXFJ9vHytU+Jk=
|
||||
github.com/samber/do/v2 v2.0.0-beta.7/go.mod h1:+LpV3vu4L81Q1JMZNSkMvSkW9lt4e5eJoXoZHkeBS4c=
|
||||
github.com/samber/go-type-to-string v1.4.0 h1:KXphToZgiFdnJQxryU25brhlh/CqY/cwJVeX2rfmow0=
|
||||
github.com/samber/go-type-to-string v1.4.0/go.mod h1:jpU77vIDoIxkahknKDoEx9C8bQ1ADnh2sotZ8I4QqBU=
|
||||
github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA=
|
||||
github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
@@ -214,8 +225,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
@@ -231,20 +243,22 @@ go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
|
||||
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg=
|
||||
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d h1:N0hmiNbwsSNwHBAvR3QB5w25pUwH4tK0Y/RltD1j1h4=
|
||||
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@@ -259,8 +273,8 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -273,19 +287,19 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -8,10 +8,11 @@ import (
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
btcconfig "github.com/gaze-network/indexer-network/modules/bitcoin/config"
|
||||
runesconfig "github.com/gaze-network/indexer-network/modules/runes/config"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
|
||||
"github.com/gaze-network/indexer-network/pkg/middleware/requestcontext"
|
||||
"github.com/gaze-network/indexer-network/pkg/middleware/requestlogger"
|
||||
"github.com/gaze-network/indexer-network/pkg/reportingclient"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
@@ -25,20 +26,31 @@ var (
|
||||
Output: "TEXT",
|
||||
},
|
||||
Network: common.NetworkMainnet,
|
||||
HTTPServer: HTTPServerConfig{
|
||||
Port: 8080,
|
||||
},
|
||||
BitcoinNode: BitcoinNodeClient{
|
||||
User: "user",
|
||||
Pass: "pass",
|
||||
},
|
||||
Modules: Modules{
|
||||
Runes: runesconfig.Config{
|
||||
Datasource: "bitcoin-node",
|
||||
Database: "postgres",
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Logger logger.Config `mapstructure:"logger"`
|
||||
BitcoinNode BitcoinNodeClient `mapstructure:"bitcoin_node"`
|
||||
Network common.Network `mapstructure:"network"`
|
||||
HTTPServer HTTPServerConfig `mapstructure:"http_server"`
|
||||
Modules Modules `mapstructure:"modules"`
|
||||
Reporting reportingclient.Config `mapstructure:"reporting"`
|
||||
EnableModules []string `mapstructure:"enable_modules"`
|
||||
APIOnly bool `mapstructure:"api_only"`
|
||||
Logger logger.Config `mapstructure:"logger"`
|
||||
BitcoinNode BitcoinNodeClient `mapstructure:"bitcoin_node"`
|
||||
Network common.Network `mapstructure:"network"`
|
||||
HTTPServer HTTPServerConfig `mapstructure:"http_server"`
|
||||
Modules Modules `mapstructure:"modules"`
|
||||
Reporting reportingclient.Config `mapstructure:"reporting"`
|
||||
}
|
||||
|
||||
type BitcoinNodeClient struct {
|
||||
@@ -49,12 +61,13 @@ type BitcoinNodeClient struct {
|
||||
}
|
||||
|
||||
type Modules struct {
|
||||
Bitcoin btcconfig.Config `mapstructure:"bitcoin"`
|
||||
Runes runesconfig.Config `mapstructure:"runes"`
|
||||
Runes runesconfig.Config `mapstructure:"runes"`
|
||||
}
|
||||
|
||||
type HTTPServerConfig struct {
|
||||
Port int `mapstructure:"port"`
|
||||
Port int `mapstructure:"port"`
|
||||
Logger requestlogger.Config `mapstructure:"logger"`
|
||||
RequestIP requestcontext.WithClientIPConfig `mapstructure:"requestip"`
|
||||
}
|
||||
|
||||
// Parse parse the configuration from environment variables
|
||||
|
||||
1
main.go
1
main.go
@@ -7,7 +7,6 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/gaze-network/indexer-network/cmd"
|
||||
_ "go.uber.org/automaxprocs"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
@@ -1,244 +0,0 @@
|
||||
package btcclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/datasources"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/internal/subscription"
|
||||
"github.com/gaze-network/indexer-network/modules/bitcoin/datagateway"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
|
||||
cstream "github.com/planxnx/concurrent-stream"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
// TODO: Refactor this, datasources.BitcoinNode and This package is the same.
|
||||
|
||||
const (
|
||||
blockStreamChunkSize = 100
|
||||
)
|
||||
|
||||
// Make sure to implement the BitcoinDatasource interface
|
||||
var _ datasources.Datasource[[]*types.Block] = (*ClientDatabase)(nil)
|
||||
|
||||
// ClientDatabase is a client to connect to the bitcoin database.
|
||||
type ClientDatabase struct {
|
||||
bitcoinDg datagateway.BitcoinDataGateway
|
||||
}
|
||||
|
||||
func NewClientDatabase(bitcoinDg datagateway.BitcoinDataGateway) *ClientDatabase {
|
||||
return &ClientDatabase{
|
||||
bitcoinDg: bitcoinDg,
|
||||
}
|
||||
}
|
||||
|
||||
func (d ClientDatabase) Name() string {
|
||||
return "bitcoin_database"
|
||||
}
|
||||
|
||||
func (d *ClientDatabase) Fetch(ctx context.Context, from, to int64) ([]*types.Block, error) {
|
||||
ch := make(chan []*types.Block)
|
||||
subscription, err := d.FetchAsync(ctx, from, to, ch)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
blocks := make([]*types.Block, 0)
|
||||
for {
|
||||
select {
|
||||
case b, ok := <-ch:
|
||||
if !ok {
|
||||
return blocks, nil
|
||||
}
|
||||
blocks = append(blocks, b...)
|
||||
case <-subscription.Done():
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, errors.Wrap(err, "context done")
|
||||
}
|
||||
return blocks, nil
|
||||
case err := <-subscription.Err():
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "got error while fetch async")
|
||||
}
|
||||
return blocks, nil
|
||||
case <-ctx.Done():
|
||||
return nil, errors.Wrap(ctx.Err(), "context done")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ClientDatabase) FetchAsync(ctx context.Context, from, to int64, ch chan<- []*types.Block) (*subscription.ClientSubscription[[]*types.Block], error) {
|
||||
ctx = logger.WithContext(ctx,
|
||||
slogx.String("package", "datasources"),
|
||||
slogx.String("datasource", d.Name()),
|
||||
)
|
||||
|
||||
from, to, skip, err := d.prepareRange(ctx, from, to)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to prepare fetch range")
|
||||
}
|
||||
|
||||
subscription := subscription.NewSubscription(ch)
|
||||
if skip {
|
||||
if err := subscription.UnsubscribeWithContext(ctx); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unsubscribe")
|
||||
}
|
||||
return subscription.Client(), nil
|
||||
}
|
||||
|
||||
// Create parallel stream
|
||||
out := make(chan []*types.Block)
|
||||
stream := cstream.NewStream(ctx, 8, out)
|
||||
|
||||
// create slice of block height to fetch
|
||||
blockHeights := make([]int64, 0, to-from+1)
|
||||
for i := from; i <= to; i++ {
|
||||
blockHeights = append(blockHeights, i)
|
||||
}
|
||||
|
||||
// Wait for stream to finish and close out channel
|
||||
go func() {
|
||||
defer close(out)
|
||||
_ = stream.Wait()
|
||||
}()
|
||||
|
||||
// Fan-out blocks to subscription channel
|
||||
go func() {
|
||||
defer func() {
|
||||
// add a bit delay to prevent shutdown before client receive all blocks
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
subscription.Unsubscribe()
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case data, ok := <-out:
|
||||
// stream closed
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// empty blocks
|
||||
if len(data) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// send blocks to subscription channel
|
||||
if err := subscription.Send(ctx, data); err != nil {
|
||||
if errors.Is(err, errs.Closed) {
|
||||
return
|
||||
}
|
||||
logger.WarnContext(ctx, "Failed to send bitcoin blocks to subscription client",
|
||||
slogx.Int64("start", data[0].Header.Height),
|
||||
slogx.Int64("end", data[len(data)-1].Header.Height),
|
||||
slogx.Error(err),
|
||||
)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Parallel fetch blocks from Bitcoin node until complete all block heights
|
||||
// or subscription is done.
|
||||
go func() {
|
||||
defer stream.Close()
|
||||
done := subscription.Done()
|
||||
chunks := lo.Chunk(blockHeights, blockStreamChunkSize)
|
||||
for _, chunk := range chunks {
|
||||
chunk := chunk
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
if len(chunk) == 0 {
|
||||
continue
|
||||
}
|
||||
stream.Go(func() []*types.Block {
|
||||
startAt := time.Now()
|
||||
defer func() {
|
||||
logger.DebugContext(ctx, "Fetched chunk of blocks from Bitcoin node",
|
||||
slogx.Int("total_blocks", len(chunk)),
|
||||
slogx.Int64("from", chunk[0]),
|
||||
slogx.Int64("to", chunk[len(chunk)-1]),
|
||||
slogx.Duration("duration", time.Since(startAt)),
|
||||
)
|
||||
}()
|
||||
|
||||
fromHeight, toHeight := chunk[0], chunk[len(chunk)-1]
|
||||
blocks, err := d.bitcoinDg.GetBlocksByHeightRange(ctx, fromHeight, toHeight)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "Can't get block data from Bitcoin database",
|
||||
slogx.Error(err),
|
||||
slogx.Int64("from", fromHeight),
|
||||
slogx.Int64("to", toHeight),
|
||||
)
|
||||
if err := subscription.SendError(ctx, errors.Wrapf(err, "failed to get blocks: from_height: %d, to_height: %d", fromHeight, toHeight)); err != nil {
|
||||
logger.WarnContext(ctx, "Failed to send datasource error to subscription client", slogx.Error(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return blocks
|
||||
})
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return subscription.Client(), nil
|
||||
}
|
||||
|
||||
func (c *ClientDatabase) GetBlockHeader(ctx context.Context, height int64) (types.BlockHeader, error) {
|
||||
header, err := c.bitcoinDg.GetBlockHeaderByHeight(ctx, height)
|
||||
if err != nil {
|
||||
return types.BlockHeader{}, errors.WithStack(err)
|
||||
}
|
||||
return header, nil
|
||||
}
|
||||
|
||||
func (c *ClientDatabase) prepareRange(ctx context.Context, fromHeight, toHeight int64) (start, end int64, skip bool, err error) {
|
||||
start = fromHeight
|
||||
end = toHeight
|
||||
|
||||
// get current bitcoin block height
|
||||
latestBlock, err := c.bitcoinDg.GetLatestBlockHeader(ctx)
|
||||
if err != nil {
|
||||
return -1, -1, false, errors.Wrap(err, "failed to get block count")
|
||||
}
|
||||
|
||||
// set start to genesis block height
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
|
||||
// set end to current bitcoin block height if
|
||||
// - end is -1
|
||||
// - end is greater that current bitcoin block height
|
||||
if end < 0 || end > latestBlock.Height {
|
||||
end = latestBlock.Height
|
||||
}
|
||||
|
||||
// if start is greater than end, skip this round
|
||||
if start > end {
|
||||
return -1, -1, true, nil
|
||||
}
|
||||
|
||||
return start, end, false, nil
|
||||
}
|
||||
|
||||
// GetTransactionByHash returns a transaction with the given hash. Returns errs.NotFound if transaction does not exist.
|
||||
func (c *ClientDatabase) GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error) {
|
||||
tx, err := c.bitcoinDg.GetTransactionByHash(ctx, txHash)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get transaction by hash")
|
||||
}
|
||||
return tx, nil
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
package config
|
||||
|
||||
import "github.com/gaze-network/indexer-network/internal/postgres"
|
||||
|
||||
type Config struct {
|
||||
Database string `mapstructure:"database"` // Database to store bitcoin data.
|
||||
Postgres postgres.Config `mapstructure:"postgres"`
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package bitcoin
|
||||
|
||||
import (
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
)
|
||||
|
||||
const (
|
||||
Version = "v0.0.1"
|
||||
DBVersion = 1
|
||||
)
|
||||
|
||||
var (
|
||||
// defaultCurrentBlockHeight is the default value for the current block height for first time indexing
|
||||
defaultCurrentBlock = types.BlockHeader{
|
||||
Hash: common.ZeroHash,
|
||||
Height: -1,
|
||||
}
|
||||
|
||||
lastV1Block = types.BlockHeader{
|
||||
Hash: *utils.Must(chainhash.NewHashFromStr("00000000000001aa077d7aa84c532a4d69bdbff519609d1da0835261b7a74eb6")),
|
||||
Height: 227835,
|
||||
}
|
||||
)
|
||||
@@ -1,18 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
-- DROP INDEX
|
||||
DROP INDEX IF EXISTS bitcoin_blocks_block_hash_idx;
|
||||
DROP INDEX IF EXISTS bitcoin_transactions_tx_hash_idx;
|
||||
DROP INDEX IF EXISTS bitcoin_transactions_block_hash_idx;
|
||||
DROP INDEX IF EXISTS bitcoin_transaction_txouts_pkscript_idx;
|
||||
DROP INDEX IF EXISTS bitcoin_transaction_txins_prevout_idx;
|
||||
|
||||
-- DROP TABLE
|
||||
DROP TABLE IF EXISTS "bitcoin_indexer_stats";
|
||||
DROP TABLE IF EXISTS "bitcoin_indexer_db_version";
|
||||
DROP TABLE IF EXISTS "bitcoin_transaction_txins";
|
||||
DROP TABLE IF EXISTS "bitcoin_transaction_txouts";
|
||||
DROP TABLE IF EXISTS "bitcoin_transactions";
|
||||
DROP TABLE IF EXISTS "bitcoin_blocks";
|
||||
|
||||
COMMIT;
|
||||
@@ -1,72 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
-- Indexer Client Information
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "bitcoin_indexer_stats" (
|
||||
"id" BIGSERIAL PRIMARY KEY,
|
||||
"client_version" TEXT NOT NULL,
|
||||
"network" TEXT NOT NULL,
|
||||
"created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "bitcoin_indexer_db_version" (
|
||||
"id" BIGSERIAL PRIMARY KEY,
|
||||
"version" INT NOT NULL,
|
||||
"created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
INSERT INTO "bitcoin_indexer_db_version" ("version") VALUES (1);
|
||||
|
||||
-- Bitcoin Data
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "bitcoin_blocks" (
|
||||
"block_height" INT NOT NULL PRIMARY KEY,
|
||||
"block_hash" TEXT NOT NULL,
|
||||
"version" INT NOT NULL,
|
||||
"merkle_root" TEXT NOT NULL,
|
||||
"prev_block_hash" TEXT NOT NULL,
|
||||
"timestamp" TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
"bits" BIGINT NOT NULL,
|
||||
"nonce" BIGINT NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS bitcoin_blocks_block_hash_idx ON "bitcoin_blocks" USING HASH ("block_hash");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "bitcoin_transactions" (
|
||||
"tx_hash" TEXT NOT NULL, -- can't use as primary key because block v1 has duplicate tx hashes (coinbase tx). See: https://github.com/bitcoin/bitcoin/commit/a206b0ea12eb4606b93323268fc81a4f1f952531
|
||||
"version" INT NOT NULL,
|
||||
"locktime" BIGINT NOT NULL,
|
||||
"block_height" INT NOT NULL,
|
||||
"block_hash" TEXT NOT NULL,
|
||||
"idx" INT NOT NULL,
|
||||
PRIMARY KEY ("block_height", "idx")
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS bitcoin_transactions_tx_hash_idx ON "bitcoin_transactions" USING HASH ("tx_hash");
|
||||
CREATE INDEX IF NOT EXISTS bitcoin_transactions_block_hash_idx ON "bitcoin_transactions" USING HASH ("block_hash");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "bitcoin_transaction_txouts" (
|
||||
"tx_hash" TEXT NOT NULL,
|
||||
"tx_idx" INT NOT NULL,
|
||||
"pkscript" TEXT NOT NULL, -- Hex String
|
||||
"value" BIGINT NOT NULL,
|
||||
"is_spent" BOOLEAN NOT NULL DEFAULT false,
|
||||
PRIMARY KEY ("tx_hash", "tx_idx")
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS bitcoin_transaction_txouts_pkscript_idx ON "bitcoin_transaction_txouts" USING HASH ("pkscript");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "bitcoin_transaction_txins" (
|
||||
"tx_hash" TEXT NOT NULL,
|
||||
"tx_idx" INT NOT NULL,
|
||||
"prevout_tx_hash" TEXT NOT NULL,
|
||||
"prevout_tx_idx" INT NOT NULL,
|
||||
"prevout_pkscript" TEXT NULL, -- Hex String, Can be NULL if the prevout is a coinbase transaction
|
||||
"scriptsig" TEXT NOT NULL, -- Hex String
|
||||
"witness" TEXT NOT NULL DEFAULT '', -- Hex String
|
||||
"sequence" BIGINT NOT NULL,
|
||||
PRIMARY KEY ("tx_hash", "tx_idx")
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS bitcoin_transaction_txins_prevout_idx ON "bitcoin_transaction_txins" USING BTREE ("prevout_tx_hash", "prevout_tx_idx");
|
||||
|
||||
COMMIT;
|
||||
@@ -1,99 +0,0 @@
|
||||
-- name: GetLatestBlockHeader :one
|
||||
SELECT * FROM bitcoin_blocks ORDER BY block_height DESC LIMIT 1;
|
||||
|
||||
-- name: InsertBlock :exec
|
||||
INSERT INTO bitcoin_blocks ("block_height","block_hash","version","merkle_root","prev_block_hash","timestamp","bits","nonce") VALUES ($1, $2, $3, $4, $5, $6, $7, $8);
|
||||
|
||||
-- name: BatchInsertBlocks :exec
|
||||
INSERT INTO bitcoin_blocks ("block_height","block_hash","version","merkle_root","prev_block_hash","timestamp","bits","nonce")
|
||||
VALUES (
|
||||
unnest(@block_height_arr::INT[]),
|
||||
unnest(@block_hash_arr::TEXT[]),
|
||||
unnest(@version_arr::INT[]),
|
||||
unnest(@merkle_root_arr::TEXT[]),
|
||||
unnest(@prev_block_hash_arr::TEXT[]),
|
||||
unnest(@timestamp_arr::TIMESTAMP WITH TIME ZONE[]), -- or use TIMESTAMPTZ
|
||||
unnest(@bits_arr::BIGINT[]),
|
||||
unnest(@nonce_arr::BIGINT[])
|
||||
);
|
||||
|
||||
-- name: BatchInsertTransactions :exec
|
||||
INSERT INTO bitcoin_transactions ("tx_hash","version","locktime","block_height","block_hash","idx")
|
||||
VALUES (
|
||||
unnest(@tx_hash_arr::TEXT[]),
|
||||
unnest(@version_arr::INT[]),
|
||||
unnest(@locktime_arr::BIGINT[]),
|
||||
unnest(@block_height_arr::INT[]),
|
||||
unnest(@block_hash_arr::TEXT[]),
|
||||
unnest(@idx_arr::INT[])
|
||||
);
|
||||
|
||||
-- name: BatchInsertTransactionTxIns :exec
|
||||
WITH update_txout AS (
|
||||
UPDATE "bitcoin_transaction_txouts"
|
||||
SET "is_spent" = true
|
||||
FROM (SELECT unnest(@prevout_tx_hash_arr::TEXT[]) as tx_hash, unnest(@prevout_tx_idx_arr::INT[]) as tx_idx) as txin
|
||||
WHERE "bitcoin_transaction_txouts"."tx_hash" = txin.tx_hash AND "bitcoin_transaction_txouts"."tx_idx" = txin.tx_idx AND "is_spent" = false
|
||||
RETURNING "bitcoin_transaction_txouts"."tx_hash", "bitcoin_transaction_txouts"."tx_idx", "pkscript"
|
||||
), prepare_insert AS (
|
||||
SELECT input.tx_hash, input.tx_idx, prevout_tx_hash, prevout_tx_idx, update_txout.pkscript as prevout_pkscript, scriptsig, witness, sequence
|
||||
FROM (
|
||||
SELECT
|
||||
unnest(@tx_hash_arr::TEXT[]) as tx_hash,
|
||||
unnest(@tx_idx_arr::INT[]) as tx_idx,
|
||||
unnest(@prevout_tx_hash_arr::TEXT[]) as prevout_tx_hash,
|
||||
unnest(@prevout_tx_idx_arr::INT[]) as prevout_tx_idx,
|
||||
unnest(@scriptsig_arr::TEXT[]) as scriptsig,
|
||||
unnest(@witness_arr::TEXT[]) as witness,
|
||||
unnest(@sequence_arr::INT[]) as sequence
|
||||
) input LEFT JOIN update_txout ON "update_txout"."tx_hash" = "input"."prevout_tx_hash" AND "update_txout"."tx_idx" = "input"."prevout_tx_idx"
|
||||
)
|
||||
INSERT INTO bitcoin_transaction_txins ("tx_hash","tx_idx","prevout_tx_hash","prevout_tx_idx", "prevout_pkscript","scriptsig","witness","sequence")
|
||||
SELECT "tx_hash", "tx_idx", "prevout_tx_hash", "prevout_tx_idx", "prevout_pkscript", "scriptsig", "witness", "sequence" FROM prepare_insert;
|
||||
|
||||
-- name: BatchInsertTransactionTxOuts :exec
|
||||
INSERT INTO bitcoin_transaction_txouts ("tx_hash","tx_idx","pkscript","value")
|
||||
VALUES (
|
||||
unnest(@tx_hash_arr::TEXT[]),
|
||||
unnest(@tx_idx_arr::INT[]),
|
||||
unnest(@pkscript_arr::TEXT[]),
|
||||
unnest(@value_arr::BIGINT[])
|
||||
);
|
||||
|
||||
-- name: RevertData :exec
|
||||
WITH delete_tx AS (
|
||||
DELETE FROM "bitcoin_transactions" WHERE "block_height" >= @from_height
|
||||
RETURNING "tx_hash"
|
||||
), delete_txin AS (
|
||||
DELETE FROM "bitcoin_transaction_txins" WHERE "tx_hash" = ANY(SELECT "tx_hash" FROM delete_tx)
|
||||
RETURNING "prevout_tx_hash", "prevout_tx_idx"
|
||||
), delete_txout AS (
|
||||
DELETE FROM "bitcoin_transaction_txouts" WHERE "tx_hash" = ANY(SELECT "tx_hash" FROM delete_tx)
|
||||
RETURNING "tx_hash", "tx_idx"
|
||||
), revert_txout_spent AS (
|
||||
UPDATE "bitcoin_transaction_txouts"
|
||||
SET "is_spent" = false
|
||||
WHERE
|
||||
("tx_hash", "tx_idx") IN (SELECT "prevout_tx_hash", "prevout_tx_idx" FROM delete_txin) AND
|
||||
("tx_hash", "tx_idx") NOT IN (SELECT "tx_hash", "tx_idx" FROM delete_txout) -- avoid to modified same row twice (modified the same row twice in a single statement is not supported)
|
||||
RETURNING NULL
|
||||
)
|
||||
DELETE FROM "bitcoin_blocks" WHERE "bitcoin_blocks"."block_height" >= @from_height;
|
||||
|
||||
-- name: GetBlockByHeight :one
|
||||
SELECT * FROM bitcoin_blocks WHERE block_height = $1;
|
||||
|
||||
-- name: GetBlocksByHeightRange :many
|
||||
SELECT * FROM bitcoin_blocks WHERE block_height >= @from_height AND block_height <= @to_height ORDER BY block_height ASC;
|
||||
|
||||
-- name: GetTransactionsByHeightRange :many
|
||||
SELECT * FROM bitcoin_transactions WHERE block_height >= @from_height AND block_height <= @to_height;
|
||||
|
||||
-- name: GetTransactionByHash :one
|
||||
SELECT * FROM bitcoin_transactions WHERE tx_hash = $1;
|
||||
|
||||
-- name: GetTransactionTxOutsByTxHashes :many
|
||||
SELECT * FROM bitcoin_transaction_txouts WHERE tx_hash = ANY(@tx_hashes::TEXT[]);
|
||||
|
||||
-- name: GetTransactionTxInsByTxHashes :many
|
||||
SELECT * FROM bitcoin_transaction_txins WHERE tx_hash = ANY(@tx_hashes::TEXT[]);
|
||||
@@ -1,8 +0,0 @@
|
||||
-- name: GetCurrentDBVersion :one
|
||||
SELECT "version" FROM bitcoin_indexer_db_version ORDER BY id DESC LIMIT 1;
|
||||
|
||||
-- name: GetCurrentIndexerStats :one
|
||||
SELECT "client_version", "network" FROM bitcoin_indexer_stats ORDER BY id DESC LIMIT 1;
|
||||
|
||||
-- name: UpdateIndexerStats :exec
|
||||
INSERT INTO bitcoin_indexer_stats (client_version, network) VALUES ($1, $2);
|
||||
@@ -1,25 +0,0 @@
|
||||
package datagateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
)
|
||||
|
||||
type BitcoinDataGateway interface {
|
||||
BitcoinWriterDataDataGateway
|
||||
BitcoinReaderDataDataGateway
|
||||
}
|
||||
|
||||
type BitcoinWriterDataDataGateway interface {
|
||||
InsertBlocks(ctx context.Context, blocks []*types.Block) error
|
||||
RevertBlocks(context.Context, int64) error
|
||||
}
|
||||
|
||||
type BitcoinReaderDataDataGateway interface {
|
||||
GetLatestBlockHeader(context.Context) (types.BlockHeader, error)
|
||||
GetBlockHeaderByHeight(ctx context.Context, blockHeight int64) (types.BlockHeader, error)
|
||||
GetBlocksByHeightRange(ctx context.Context, from int64, to int64) ([]*types.Block, error)
|
||||
GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error)
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package datagateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
)
|
||||
|
||||
type IndexerInformationDataGateway interface {
|
||||
GetCurrentDBVersion(ctx context.Context) (int32, error)
|
||||
GetLatestIndexerStats(ctx context.Context) (version string, network common.Network, err error)
|
||||
UpdateIndexerStats(ctx context.Context, clientVersion string, network common.Network) error
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
package bitcoin
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/indexers"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/internal/config"
|
||||
"github.com/gaze-network/indexer-network/modules/bitcoin/datagateway"
|
||||
)
|
||||
|
||||
// Make sure to implement the BitcoinProcessor interface
|
||||
var _ indexers.BitcoinProcessor = (*Processor)(nil)
|
||||
|
||||
type Processor struct {
|
||||
config config.Config
|
||||
bitcoinDg datagateway.BitcoinDataGateway
|
||||
indexerInfoDg datagateway.IndexerInformationDataGateway
|
||||
}
|
||||
|
||||
func NewProcessor(config config.Config, bitcoinDg datagateway.BitcoinDataGateway, indexerInfoDg datagateway.IndexerInformationDataGateway) *Processor {
|
||||
return &Processor{
|
||||
config: config,
|
||||
bitcoinDg: bitcoinDg,
|
||||
indexerInfoDg: indexerInfoDg,
|
||||
}
|
||||
}
|
||||
|
||||
func (p Processor) Name() string {
|
||||
return "bitcoin"
|
||||
}
|
||||
|
||||
func (p *Processor) Process(ctx context.Context, inputs []*types.Block) error {
|
||||
if len(inputs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Process the given blocks before inserting to the database
|
||||
inputs, err := p.process(ctx, inputs)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Insert blocks
|
||||
if err := p.bitcoinDg.InsertBlocks(ctx, inputs); err != nil {
|
||||
return errors.Wrapf(err, "error during insert blocks, from: %d, to: %d", inputs[0].Header.Height, inputs[len(inputs)-1].Header.Height)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error) {
|
||||
b, err := p.bitcoinDg.GetLatestBlockHeader(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return defaultCurrentBlock, nil
|
||||
}
|
||||
return types.BlockHeader{}, errors.WithStack(err)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (p *Processor) GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error) {
|
||||
header, err := p.bitcoinDg.GetBlockHeaderByHeight(ctx, height)
|
||||
if err != nil {
|
||||
return types.BlockHeader{}, errors.WithStack(err)
|
||||
}
|
||||
return header, nil
|
||||
}
|
||||
|
||||
func (p *Processor) RevertData(ctx context.Context, from int64) error {
|
||||
// to prevent remove txin/txout of duplicated coinbase transaction in the blocks 91842 and 91880
|
||||
// if you really want to revert the data before the block `227835`, you should reset the database and reindex the data instead.
|
||||
if from <= lastV1Block.Height {
|
||||
return errors.Wrapf(errs.InvalidArgument, "can't revert data before block version 2, height: %d", lastV1Block.Height)
|
||||
}
|
||||
|
||||
if err := p.bitcoinDg.RevertBlocks(ctx, from); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) VerifyStates(ctx context.Context) error {
|
||||
// Check current db version with the required db version
|
||||
{
|
||||
dbVersion, err := p.indexerInfoDg.GetCurrentDBVersion(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "can't get current db version")
|
||||
}
|
||||
|
||||
if dbVersion != DBVersion {
|
||||
return errors.Wrapf(errs.ConflictSetting, "db version mismatch, please upgrade to version %d", DBVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the latest indexed network is mismatched with configured network
|
||||
{
|
||||
_, network, err := p.indexerInfoDg.GetLatestIndexerStats(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
goto end
|
||||
}
|
||||
return errors.Wrap(err, "can't get latest indexer stats")
|
||||
}
|
||||
|
||||
if network != p.config.Network {
|
||||
return errors.Wrapf(errs.ConflictSetting, "network mismatch, latest indexed network: %q, configured network: %q. If you want to change the network, please reset the database", network, p.config.Network)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Verify the states of the indexed data to ensure the last shutdown was graceful and no missing data.
|
||||
|
||||
end:
|
||||
if err := p.indexerInfoDg.UpdateIndexerStats(ctx, Version, p.config.Network); err != nil {
|
||||
return errors.Wrap(err, "can't update indexer stats")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package bitcoin
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
)
|
||||
|
||||
// process is a processing rules for the given blocks before inserting to the database
|
||||
//
|
||||
// this function will modify the given data directly.
|
||||
func (p *Processor) process(ctx context.Context, blocks []*types.Block) ([]*types.Block, error) {
|
||||
if len(blocks) == 0 {
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// Sort ASC by block height
|
||||
slices.SortFunc(blocks, func(t1, t2 *types.Block) int {
|
||||
return cmp.Compare(t1.Header.Height, t2.Header.Height)
|
||||
})
|
||||
|
||||
if !p.isContinueFromLatestIndexedBlock(ctx, blocks[0]) {
|
||||
return nil, errors.New("given blocks are not continue from the latest indexed block")
|
||||
}
|
||||
|
||||
if !p.isBlocksSequential(blocks) {
|
||||
return nil, errors.New("given blocks are not in sequence")
|
||||
}
|
||||
|
||||
p.removeDuplicateCoinbaseTxInputsOutputs(blocks)
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// check if the given blocks are continue from the latest indexed block
|
||||
// to prevent inserting out-of-order blocks or duplicate blocks
|
||||
func (p *Processor) isBlocksSequential(blocks []*types.Block) bool {
|
||||
if len(blocks) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for i, block := range blocks {
|
||||
if i == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if block.Header.Height != blocks[i-1].Header.Height+1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// check if the given blocks are continue from the latest indexed block
|
||||
// to prevent inserting out-of-order blocks or duplicate blocks
|
||||
func (p *Processor) isContinueFromLatestIndexedBlock(ctx context.Context, block *types.Block) bool {
|
||||
latestBlock, err := p.CurrentBlock(ctx)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return block.Header.Height == latestBlock.Height+1
|
||||
}
|
||||
|
||||
// there 2 coinbase transaction that are duplicated in the blocks 91842 and 91880.
|
||||
// if the given block version is v1 and height is `91842` or `91880`,
|
||||
// then remove transaction inputs/outputs to prevent duplicate txin/txout error when inserting to the database.
|
||||
//
|
||||
// Theses duplicated coinbase transactions are having the same transaction input/output and
|
||||
// utxo from these 2 duplicated coinbase txs can redeem only once. so, it's safe to remove them and can
|
||||
// use inputs/outputs from the previous block.
|
||||
//
|
||||
// Duplicate Coinbase Transactions:
|
||||
// - `454279874213763724535987336644243549a273058910332236515429488599` in blocks 91812, 91842
|
||||
// - `e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468` in blocks 91722, 91880
|
||||
//
|
||||
// This function will modify the given data directly.
|
||||
func (p *Processor) removeDuplicateCoinbaseTxInputsOutputs(blocks []*types.Block) {
|
||||
for _, block := range blocks {
|
||||
header := block.Header
|
||||
if header.Version == 1 && (header.Height == 91842 || header.Height == 91880) {
|
||||
// remove transaction inputs/outputs from coinbase transaction (first transaction)
|
||||
block.Transactions[0].TxIn = nil
|
||||
block.Transactions[0].TxOut = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
package bitcoin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDuplicateCoinbaseTxHashHandling(t *testing.T) {
|
||||
processor := Processor{}
|
||||
generator := func() []*types.Block {
|
||||
return []*types.Block{
|
||||
{
|
||||
Header: types.BlockHeader{Height: 91842, Version: 1},
|
||||
Transactions: []*types.Transaction{
|
||||
{
|
||||
TxIn: []*types.TxIn{{}, {}, {}, {}},
|
||||
TxOut: []*types.TxOut{{}, {}, {}, {}},
|
||||
},
|
||||
{
|
||||
TxIn: []*types.TxIn{{}, {}, {}, {}},
|
||||
TxOut: []*types.TxOut{{}, {}, {}, {}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Header: types.BlockHeader{Height: 91880, Version: 1},
|
||||
Transactions: []*types.Transaction{
|
||||
{
|
||||
TxIn: []*types.TxIn{{}, {}, {}, {}},
|
||||
TxOut: []*types.TxOut{{}, {}, {}, {}},
|
||||
},
|
||||
{
|
||||
TxIn: []*types.TxIn{{}, {}, {}, {}},
|
||||
TxOut: []*types.TxOut{{}, {}, {}, {}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("all_duplicated_txs", func(t *testing.T) {
|
||||
blocks := generator()
|
||||
processor.removeDuplicateCoinbaseTxInputsOutputs(blocks)
|
||||
|
||||
assert.Len(t, blocks, 2, "should not remove any blocks")
|
||||
for _, block := range blocks {
|
||||
assert.Len(t, block.Transactions, 2, "should not remove any transactions")
|
||||
assert.Len(t, block.Transactions[0].TxIn, 0, "should remove tx inputs from coinbase transaction")
|
||||
assert.Len(t, block.Transactions[0].TxOut, 0, "should remove tx outputs from coinbase transaction")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("not_duplicated_txs", func(t *testing.T) {
|
||||
blocks := []*types.Block{
|
||||
{
|
||||
Header: types.BlockHeader{Height: 91812, Version: 1},
|
||||
Transactions: []*types.Transaction{
|
||||
{
|
||||
TxIn: []*types.TxIn{{}, {}, {}, {}},
|
||||
TxOut: []*types.TxOut{{}, {}, {}, {}},
|
||||
},
|
||||
{
|
||||
TxIn: []*types.TxIn{{}, {}, {}, {}},
|
||||
TxOut: []*types.TxOut{{}, {}, {}, {}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Header: types.BlockHeader{Height: 91722, Version: 1},
|
||||
Transactions: []*types.Transaction{
|
||||
{
|
||||
TxIn: []*types.TxIn{{}, {}, {}, {}},
|
||||
TxOut: []*types.TxOut{{}, {}, {}, {}},
|
||||
},
|
||||
{
|
||||
TxIn: []*types.TxIn{{}, {}, {}, {}},
|
||||
TxOut: []*types.TxOut{{}, {}, {}, {}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
processor.removeDuplicateCoinbaseTxInputsOutputs(blocks)
|
||||
|
||||
assert.Len(t, blocks, 2, "should not remove any blocks")
|
||||
for _, block := range blocks {
|
||||
assert.Len(t, block.Transactions, 2, "should not remove any transactions")
|
||||
assert.Len(t, block.Transactions[0].TxIn, 4, "should not remove tx inputs from coinbase transaction")
|
||||
assert.Len(t, block.Transactions[0].TxOut, 4, "should not remove tx outputs from coinbase transaction")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("mixed", func(t *testing.T) {
|
||||
blocks := []*types.Block{
|
||||
{
|
||||
Header: types.BlockHeader{Height: 91812, Version: 1},
|
||||
Transactions: []*types.Transaction{
|
||||
{
|
||||
TxIn: []*types.TxIn{{}, {}, {}, {}},
|
||||
TxOut: []*types.TxOut{{}, {}, {}, {}},
|
||||
},
|
||||
{
|
||||
TxIn: []*types.TxIn{{}, {}, {}, {}},
|
||||
TxOut: []*types.TxOut{{}, {}, {}, {}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blocks = append(blocks, generator()...)
|
||||
blocks = append(blocks, &types.Block{
|
||||
Header: types.BlockHeader{Height: 91722, Version: 1},
|
||||
Transactions: []*types.Transaction{
|
||||
{
|
||||
TxIn: []*types.TxIn{{}, {}, {}, {}},
|
||||
TxOut: []*types.TxOut{{}, {}, {}, {}},
|
||||
},
|
||||
{
|
||||
TxIn: []*types.TxIn{{}, {}, {}, {}},
|
||||
TxOut: []*types.TxOut{{}, {}, {}, {}},
|
||||
},
|
||||
},
|
||||
})
|
||||
processor.removeDuplicateCoinbaseTxInputsOutputs(blocks)
|
||||
|
||||
assert.Len(t, blocks, 4, "should not remove any blocks")
|
||||
|
||||
// only 2nd and 3rd blocks should be modified
|
||||
for i, block := range blocks {
|
||||
t.Run(fmt.Sprint(i), func(t *testing.T) {
|
||||
if i == 1 || i == 2 {
|
||||
assert.Len(t, block.Transactions, 2, "should not remove any transactions")
|
||||
assert.Len(t, block.Transactions[0].TxIn, 0, "should remove tx inputs from coinbase transaction")
|
||||
assert.Len(t, block.Transactions[0].TxOut, 0, "should remove tx outputs from coinbase transaction")
|
||||
} else {
|
||||
assert.Len(t, block.Transactions, 2, "should not remove any transactions")
|
||||
assert.Lenf(t, block.Transactions[0].TxIn, 4, "should not remove tx inputs from coinbase transaction")
|
||||
assert.Len(t, block.Transactions[0].TxOut, 4, "should not remove tx outputs from coinbase transaction")
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,169 +0,0 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres/gen"
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func (r *Repository) GetLatestBlockHeader(ctx context.Context) (types.BlockHeader, error) {
|
||||
model, err := r.queries.GetLatestBlockHeader(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, pgx.ErrNoRows) {
|
||||
return types.BlockHeader{}, errors.Join(errs.NotFound, err)
|
||||
}
|
||||
return types.BlockHeader{}, errors.Wrap(err, "failed to get latest block header")
|
||||
}
|
||||
|
||||
data, err := mapBlockHeaderModelToType(model)
|
||||
if err != nil {
|
||||
return types.BlockHeader{}, errors.Wrap(err, "failed to map block header model to type")
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (r *Repository) InsertBlocks(ctx context.Context, blocks []*types.Block) error {
|
||||
if len(blocks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
blockParams, txParams, txoutParams, txinParams := mapBlocksTypeToParams(blocks)
|
||||
|
||||
tx, err := r.db.Begin(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to begin transaction")
|
||||
}
|
||||
defer tx.Rollback(ctx)
|
||||
|
||||
queries := r.queries.WithTx(tx)
|
||||
|
||||
if err := queries.BatchInsertBlocks(ctx, blockParams); err != nil {
|
||||
return errors.Wrap(err, "failed to batch insert block headers")
|
||||
}
|
||||
|
||||
if err := queries.BatchInsertTransactions(ctx, txParams); err != nil {
|
||||
return errors.Wrap(err, "failed to batch insert transactions")
|
||||
}
|
||||
|
||||
// Should insert txout first, then txin
|
||||
// Because txin references txout
|
||||
if err := queries.BatchInsertTransactionTxOuts(ctx, txoutParams); err != nil {
|
||||
return errors.Wrap(err, "failed to batch insert transaction txins")
|
||||
}
|
||||
|
||||
if err := queries.BatchInsertTransactionTxIns(ctx, txinParams); err != nil {
|
||||
return errors.Wrap(err, "failed to batch insert transaction txins")
|
||||
}
|
||||
|
||||
if err := tx.Commit(ctx); err != nil {
|
||||
return errors.Wrap(err, "failed to commit transaction")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) RevertBlocks(ctx context.Context, from int64) error {
|
||||
tx, err := r.db.Begin(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to begin transaction")
|
||||
}
|
||||
defer tx.Rollback(ctx)
|
||||
|
||||
queries := r.queries.WithTx(tx)
|
||||
if err := queries.RevertData(ctx, int32(from)); err != nil && !errors.Is(err, pgx.ErrNoRows) {
|
||||
return errors.Wrap(err, "failed to revert data")
|
||||
}
|
||||
|
||||
if err := tx.Commit(ctx); err != nil {
|
||||
return errors.Wrap(err, "failed to commit transaction")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetBlockHeaderByHeight(ctx context.Context, blockHeight int64) (types.BlockHeader, error) {
|
||||
blockModel, err := r.queries.GetBlockByHeight(ctx, int32(blockHeight))
|
||||
if err != nil {
|
||||
if errors.Is(err, pgx.ErrNoRows) {
|
||||
return types.BlockHeader{}, errors.Join(errs.NotFound, err)
|
||||
}
|
||||
return types.BlockHeader{}, errors.Wrap(err, "failed to get block by height")
|
||||
}
|
||||
|
||||
data, err := mapBlockHeaderModelToType(blockModel)
|
||||
if err != nil {
|
||||
return types.BlockHeader{}, errors.Wrap(err, "failed to map block header model to type")
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetBlocksByHeightRange(ctx context.Context, from int64, to int64) ([]*types.Block, error) {
|
||||
blocks, err := r.queries.GetBlocksByHeightRange(ctx, gen.GetBlocksByHeightRangeParams{
|
||||
FromHeight: int32(from),
|
||||
ToHeight: int32(to),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get blocks by height range")
|
||||
}
|
||||
|
||||
if len(blocks) == 0 {
|
||||
return []*types.Block{}, nil
|
||||
}
|
||||
|
||||
txs, err := r.queries.GetTransactionsByHeightRange(ctx, gen.GetTransactionsByHeightRangeParams{
|
||||
FromHeight: int32(from),
|
||||
ToHeight: int32(to),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get transactions by height range")
|
||||
}
|
||||
|
||||
txHashes := lo.Map(txs, func(tx gen.BitcoinTransaction, _ int) string { return tx.TxHash })
|
||||
|
||||
txOuts, err := r.queries.GetTransactionTxOutsByTxHashes(ctx, txHashes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get transaction txouts by tx hashes")
|
||||
}
|
||||
|
||||
txIns, err := r.queries.GetTransactionTxInsByTxHashes(ctx, txHashes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get transaction txins by tx hashes")
|
||||
}
|
||||
|
||||
// Grouping result by block height and tx hash
|
||||
groupedTxs := lo.GroupBy(txs, func(tx gen.BitcoinTransaction) int32 { return tx.BlockHeight })
|
||||
groupedTxOuts := lo.GroupBy(txOuts, func(txOut gen.BitcoinTransactionTxout) string { return txOut.TxHash })
|
||||
groupedTxIns := lo.GroupBy(txIns, func(txIn gen.BitcoinTransactionTxin) string { return txIn.TxHash })
|
||||
|
||||
var errs []error
|
||||
result := lo.Map(blocks, func(blockModel gen.BitcoinBlock, _ int) *types.Block {
|
||||
header, err := mapBlockHeaderModelToType(blockModel)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrap(err, "failed to map block header model to type"))
|
||||
return nil
|
||||
}
|
||||
|
||||
txsModel := groupedTxs[blockModel.BlockHeight]
|
||||
return &types.Block{
|
||||
Header: header,
|
||||
Transactions: lo.Map(txsModel, func(txModel gen.BitcoinTransaction, _ int) *types.Transaction {
|
||||
tx, err := mapTransactionModelToType(txModel, groupedTxIns[txModel.TxHash], groupedTxOuts[txModel.TxHash])
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrap(err, "failed to map transaction model to type"))
|
||||
return nil
|
||||
}
|
||||
return &tx
|
||||
}),
|
||||
}
|
||||
})
|
||||
if len(errs) > 0 {
|
||||
return nil, errors.Wrap(errors.Join(errs...), "failed while mapping result")
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,408 +0,0 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
// source: data.sql
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
const batchInsertBlocks = `-- name: BatchInsertBlocks :exec
|
||||
INSERT INTO bitcoin_blocks ("block_height","block_hash","version","merkle_root","prev_block_hash","timestamp","bits","nonce")
|
||||
VALUES (
|
||||
unnest($1::INT[]),
|
||||
unnest($2::TEXT[]),
|
||||
unnest($3::INT[]),
|
||||
unnest($4::TEXT[]),
|
||||
unnest($5::TEXT[]),
|
||||
unnest($6::TIMESTAMP WITH TIME ZONE[]), -- or use TIMESTAMPTZ
|
||||
unnest($7::BIGINT[]),
|
||||
unnest($8::BIGINT[])
|
||||
)
|
||||
`
|
||||
|
||||
type BatchInsertBlocksParams struct {
|
||||
BlockHeightArr []int32
|
||||
BlockHashArr []string
|
||||
VersionArr []int32
|
||||
MerkleRootArr []string
|
||||
PrevBlockHashArr []string
|
||||
TimestampArr []pgtype.Timestamptz
|
||||
BitsArr []int64
|
||||
NonceArr []int64
|
||||
}
|
||||
|
||||
func (q *Queries) BatchInsertBlocks(ctx context.Context, arg BatchInsertBlocksParams) error {
|
||||
_, err := q.db.Exec(ctx, batchInsertBlocks,
|
||||
arg.BlockHeightArr,
|
||||
arg.BlockHashArr,
|
||||
arg.VersionArr,
|
||||
arg.MerkleRootArr,
|
||||
arg.PrevBlockHashArr,
|
||||
arg.TimestampArr,
|
||||
arg.BitsArr,
|
||||
arg.NonceArr,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const batchInsertTransactionTxIns = `-- name: BatchInsertTransactionTxIns :exec
|
||||
WITH update_txout AS (
|
||||
UPDATE "bitcoin_transaction_txouts"
|
||||
SET "is_spent" = true
|
||||
FROM (SELECT unnest($1::TEXT[]) as tx_hash, unnest($2::INT[]) as tx_idx) as txin
|
||||
WHERE "bitcoin_transaction_txouts"."tx_hash" = txin.tx_hash AND "bitcoin_transaction_txouts"."tx_idx" = txin.tx_idx AND "is_spent" = false
|
||||
RETURNING "bitcoin_transaction_txouts"."tx_hash", "bitcoin_transaction_txouts"."tx_idx", "pkscript"
|
||||
), prepare_insert AS (
|
||||
SELECT input.tx_hash, input.tx_idx, prevout_tx_hash, prevout_tx_idx, update_txout.pkscript as prevout_pkscript, scriptsig, witness, sequence
|
||||
FROM (
|
||||
SELECT
|
||||
unnest($3::TEXT[]) as tx_hash,
|
||||
unnest($4::INT[]) as tx_idx,
|
||||
unnest($1::TEXT[]) as prevout_tx_hash,
|
||||
unnest($2::INT[]) as prevout_tx_idx,
|
||||
unnest($5::TEXT[]) as scriptsig,
|
||||
unnest($6::TEXT[]) as witness,
|
||||
unnest($7::INT[]) as sequence
|
||||
) input LEFT JOIN update_txout ON "update_txout"."tx_hash" = "input"."prevout_tx_hash" AND "update_txout"."tx_idx" = "input"."prevout_tx_idx"
|
||||
)
|
||||
INSERT INTO bitcoin_transaction_txins ("tx_hash","tx_idx","prevout_tx_hash","prevout_tx_idx", "prevout_pkscript","scriptsig","witness","sequence")
|
||||
SELECT "tx_hash", "tx_idx", "prevout_tx_hash", "prevout_tx_idx", "prevout_pkscript", "scriptsig", "witness", "sequence" FROM prepare_insert
|
||||
`
|
||||
|
||||
type BatchInsertTransactionTxInsParams struct {
|
||||
PrevoutTxHashArr []string
|
||||
PrevoutTxIdxArr []int32
|
||||
TxHashArr []string
|
||||
TxIdxArr []int32
|
||||
ScriptsigArr []string
|
||||
WitnessArr []string
|
||||
SequenceArr []int32
|
||||
}
|
||||
|
||||
func (q *Queries) BatchInsertTransactionTxIns(ctx context.Context, arg BatchInsertTransactionTxInsParams) error {
|
||||
_, err := q.db.Exec(ctx, batchInsertTransactionTxIns,
|
||||
arg.PrevoutTxHashArr,
|
||||
arg.PrevoutTxIdxArr,
|
||||
arg.TxHashArr,
|
||||
arg.TxIdxArr,
|
||||
arg.ScriptsigArr,
|
||||
arg.WitnessArr,
|
||||
arg.SequenceArr,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const batchInsertTransactionTxOuts = `-- name: BatchInsertTransactionTxOuts :exec
|
||||
INSERT INTO bitcoin_transaction_txouts ("tx_hash","tx_idx","pkscript","value")
|
||||
VALUES (
|
||||
unnest($1::TEXT[]),
|
||||
unnest($2::INT[]),
|
||||
unnest($3::TEXT[]),
|
||||
unnest($4::BIGINT[])
|
||||
)
|
||||
`
|
||||
|
||||
type BatchInsertTransactionTxOutsParams struct {
|
||||
TxHashArr []string
|
||||
TxIdxArr []int32
|
||||
PkscriptArr []string
|
||||
ValueArr []int64
|
||||
}
|
||||
|
||||
func (q *Queries) BatchInsertTransactionTxOuts(ctx context.Context, arg BatchInsertTransactionTxOutsParams) error {
|
||||
_, err := q.db.Exec(ctx, batchInsertTransactionTxOuts,
|
||||
arg.TxHashArr,
|
||||
arg.TxIdxArr,
|
||||
arg.PkscriptArr,
|
||||
arg.ValueArr,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const batchInsertTransactions = `-- name: BatchInsertTransactions :exec
|
||||
INSERT INTO bitcoin_transactions ("tx_hash","version","locktime","block_height","block_hash","idx")
|
||||
VALUES (
|
||||
unnest($1::TEXT[]),
|
||||
unnest($2::INT[]),
|
||||
unnest($3::BIGINT[]),
|
||||
unnest($4::INT[]),
|
||||
unnest($5::TEXT[]),
|
||||
unnest($6::INT[])
|
||||
)
|
||||
`
|
||||
|
||||
type BatchInsertTransactionsParams struct {
|
||||
TxHashArr []string
|
||||
VersionArr []int32
|
||||
LocktimeArr []int64
|
||||
BlockHeightArr []int32
|
||||
BlockHashArr []string
|
||||
IdxArr []int32
|
||||
}
|
||||
|
||||
func (q *Queries) BatchInsertTransactions(ctx context.Context, arg BatchInsertTransactionsParams) error {
|
||||
_, err := q.db.Exec(ctx, batchInsertTransactions,
|
||||
arg.TxHashArr,
|
||||
arg.VersionArr,
|
||||
arg.LocktimeArr,
|
||||
arg.BlockHeightArr,
|
||||
arg.BlockHashArr,
|
||||
arg.IdxArr,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const getBlockByHeight = `-- name: GetBlockByHeight :one
|
||||
SELECT block_height, block_hash, version, merkle_root, prev_block_hash, timestamp, bits, nonce FROM bitcoin_blocks WHERE block_height = $1
|
||||
`
|
||||
|
||||
func (q *Queries) GetBlockByHeight(ctx context.Context, blockHeight int32) (BitcoinBlock, error) {
|
||||
row := q.db.QueryRow(ctx, getBlockByHeight, blockHeight)
|
||||
var i BitcoinBlock
|
||||
err := row.Scan(
|
||||
&i.BlockHeight,
|
||||
&i.BlockHash,
|
||||
&i.Version,
|
||||
&i.MerkleRoot,
|
||||
&i.PrevBlockHash,
|
||||
&i.Timestamp,
|
||||
&i.Bits,
|
||||
&i.Nonce,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getBlocksByHeightRange = `-- name: GetBlocksByHeightRange :many
|
||||
SELECT block_height, block_hash, version, merkle_root, prev_block_hash, timestamp, bits, nonce FROM bitcoin_blocks WHERE block_height >= $1 AND block_height <= $2 ORDER BY block_height ASC
|
||||
`
|
||||
|
||||
type GetBlocksByHeightRangeParams struct {
|
||||
FromHeight int32
|
||||
ToHeight int32
|
||||
}
|
||||
|
||||
func (q *Queries) GetBlocksByHeightRange(ctx context.Context, arg GetBlocksByHeightRangeParams) ([]BitcoinBlock, error) {
|
||||
rows, err := q.db.Query(ctx, getBlocksByHeightRange, arg.FromHeight, arg.ToHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []BitcoinBlock
|
||||
for rows.Next() {
|
||||
var i BitcoinBlock
|
||||
if err := rows.Scan(
|
||||
&i.BlockHeight,
|
||||
&i.BlockHash,
|
||||
&i.Version,
|
||||
&i.MerkleRoot,
|
||||
&i.PrevBlockHash,
|
||||
&i.Timestamp,
|
||||
&i.Bits,
|
||||
&i.Nonce,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getLatestBlockHeader = `-- name: GetLatestBlockHeader :one
|
||||
SELECT block_height, block_hash, version, merkle_root, prev_block_hash, timestamp, bits, nonce FROM bitcoin_blocks ORDER BY block_height DESC LIMIT 1
|
||||
`
|
||||
|
||||
func (q *Queries) GetLatestBlockHeader(ctx context.Context) (BitcoinBlock, error) {
|
||||
row := q.db.QueryRow(ctx, getLatestBlockHeader)
|
||||
var i BitcoinBlock
|
||||
err := row.Scan(
|
||||
&i.BlockHeight,
|
||||
&i.BlockHash,
|
||||
&i.Version,
|
||||
&i.MerkleRoot,
|
||||
&i.PrevBlockHash,
|
||||
&i.Timestamp,
|
||||
&i.Bits,
|
||||
&i.Nonce,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getTransactionByHash = `-- name: GetTransactionByHash :one
|
||||
SELECT tx_hash, version, locktime, block_height, block_hash, idx FROM bitcoin_transactions WHERE tx_hash = $1
|
||||
`
|
||||
|
||||
func (q *Queries) GetTransactionByHash(ctx context.Context, txHash string) (BitcoinTransaction, error) {
|
||||
row := q.db.QueryRow(ctx, getTransactionByHash, txHash)
|
||||
var i BitcoinTransaction
|
||||
err := row.Scan(
|
||||
&i.TxHash,
|
||||
&i.Version,
|
||||
&i.Locktime,
|
||||
&i.BlockHeight,
|
||||
&i.BlockHash,
|
||||
&i.Idx,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getTransactionTxInsByTxHashes = `-- name: GetTransactionTxInsByTxHashes :many
|
||||
SELECT tx_hash, tx_idx, prevout_tx_hash, prevout_tx_idx, prevout_pkscript, scriptsig, witness, sequence FROM bitcoin_transaction_txins WHERE tx_hash = ANY($1::TEXT[])
|
||||
`
|
||||
|
||||
func (q *Queries) GetTransactionTxInsByTxHashes(ctx context.Context, txHashes []string) ([]BitcoinTransactionTxin, error) {
|
||||
rows, err := q.db.Query(ctx, getTransactionTxInsByTxHashes, txHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []BitcoinTransactionTxin
|
||||
for rows.Next() {
|
||||
var i BitcoinTransactionTxin
|
||||
if err := rows.Scan(
|
||||
&i.TxHash,
|
||||
&i.TxIdx,
|
||||
&i.PrevoutTxHash,
|
||||
&i.PrevoutTxIdx,
|
||||
&i.PrevoutPkscript,
|
||||
&i.Scriptsig,
|
||||
&i.Witness,
|
||||
&i.Sequence,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getTransactionTxOutsByTxHashes = `-- name: GetTransactionTxOutsByTxHashes :many
|
||||
SELECT tx_hash, tx_idx, pkscript, value, is_spent FROM bitcoin_transaction_txouts WHERE tx_hash = ANY($1::TEXT[])
|
||||
`
|
||||
|
||||
func (q *Queries) GetTransactionTxOutsByTxHashes(ctx context.Context, txHashes []string) ([]BitcoinTransactionTxout, error) {
|
||||
rows, err := q.db.Query(ctx, getTransactionTxOutsByTxHashes, txHashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []BitcoinTransactionTxout
|
||||
for rows.Next() {
|
||||
var i BitcoinTransactionTxout
|
||||
if err := rows.Scan(
|
||||
&i.TxHash,
|
||||
&i.TxIdx,
|
||||
&i.Pkscript,
|
||||
&i.Value,
|
||||
&i.IsSpent,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getTransactionsByHeightRange = `-- name: GetTransactionsByHeightRange :many
|
||||
SELECT tx_hash, version, locktime, block_height, block_hash, idx FROM bitcoin_transactions WHERE block_height >= $1 AND block_height <= $2
|
||||
`
|
||||
|
||||
type GetTransactionsByHeightRangeParams struct {
|
||||
FromHeight int32
|
||||
ToHeight int32
|
||||
}
|
||||
|
||||
func (q *Queries) GetTransactionsByHeightRange(ctx context.Context, arg GetTransactionsByHeightRangeParams) ([]BitcoinTransaction, error) {
|
||||
rows, err := q.db.Query(ctx, getTransactionsByHeightRange, arg.FromHeight, arg.ToHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []BitcoinTransaction
|
||||
for rows.Next() {
|
||||
var i BitcoinTransaction
|
||||
if err := rows.Scan(
|
||||
&i.TxHash,
|
||||
&i.Version,
|
||||
&i.Locktime,
|
||||
&i.BlockHeight,
|
||||
&i.BlockHash,
|
||||
&i.Idx,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const insertBlock = `-- name: InsertBlock :exec
|
||||
INSERT INTO bitcoin_blocks ("block_height","block_hash","version","merkle_root","prev_block_hash","timestamp","bits","nonce") VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
`
|
||||
|
||||
type InsertBlockParams struct {
|
||||
BlockHeight int32
|
||||
BlockHash string
|
||||
Version int32
|
||||
MerkleRoot string
|
||||
PrevBlockHash string
|
||||
Timestamp pgtype.Timestamptz
|
||||
Bits int64
|
||||
Nonce int64
|
||||
}
|
||||
|
||||
func (q *Queries) InsertBlock(ctx context.Context, arg InsertBlockParams) error {
|
||||
_, err := q.db.Exec(ctx, insertBlock,
|
||||
arg.BlockHeight,
|
||||
arg.BlockHash,
|
||||
arg.Version,
|
||||
arg.MerkleRoot,
|
||||
arg.PrevBlockHash,
|
||||
arg.Timestamp,
|
||||
arg.Bits,
|
||||
arg.Nonce,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const revertData = `-- name: RevertData :exec
|
||||
WITH delete_tx AS (
|
||||
DELETE FROM "bitcoin_transactions" WHERE "block_height" >= $1
|
||||
RETURNING "tx_hash"
|
||||
), delete_txin AS (
|
||||
DELETE FROM "bitcoin_transaction_txins" WHERE "tx_hash" = ANY(SELECT "tx_hash" FROM delete_tx)
|
||||
RETURNING "prevout_tx_hash", "prevout_tx_idx"
|
||||
), delete_txout AS (
|
||||
DELETE FROM "bitcoin_transaction_txouts" WHERE "tx_hash" = ANY(SELECT "tx_hash" FROM delete_tx)
|
||||
RETURNING "tx_hash", "tx_idx"
|
||||
), revert_txout_spent AS (
|
||||
UPDATE "bitcoin_transaction_txouts"
|
||||
SET "is_spent" = false
|
||||
WHERE
|
||||
("tx_hash", "tx_idx") IN (SELECT "prevout_tx_hash", "prevout_tx_idx" FROM delete_txin) AND
|
||||
("tx_hash", "tx_idx") NOT IN (SELECT "tx_hash", "tx_idx" FROM delete_txout) -- avoid to modified same row twice (modified the same row twice in a single statement is not supported)
|
||||
RETURNING NULL
|
||||
)
|
||||
DELETE FROM "bitcoin_blocks" WHERE "bitcoin_blocks"."block_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) RevertData(ctx context.Context, fromHeight int32) error {
|
||||
_, err := q.db.Exec(ctx, revertData, fromHeight)
|
||||
return err
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
type DBTX interface {
|
||||
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
|
||||
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
|
||||
QueryRow(context.Context, string, ...interface{}) pgx.Row
|
||||
}
|
||||
|
||||
func New(db DBTX) *Queries {
|
||||
return &Queries{db: db}
|
||||
}
|
||||
|
||||
type Queries struct {
|
||||
db DBTX
|
||||
}
|
||||
|
||||
func (q *Queries) WithTx(tx pgx.Tx) *Queries {
|
||||
return &Queries{
|
||||
db: tx,
|
||||
}
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
// source: info.sql
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
const getCurrentDBVersion = `-- name: GetCurrentDBVersion :one
|
||||
SELECT "version" FROM bitcoin_indexer_db_version ORDER BY id DESC LIMIT 1
|
||||
`
|
||||
|
||||
func (q *Queries) GetCurrentDBVersion(ctx context.Context) (int32, error) {
|
||||
row := q.db.QueryRow(ctx, getCurrentDBVersion)
|
||||
var version int32
|
||||
err := row.Scan(&version)
|
||||
return version, err
|
||||
}
|
||||
|
||||
const getCurrentIndexerStats = `-- name: GetCurrentIndexerStats :one
|
||||
SELECT "client_version", "network" FROM bitcoin_indexer_stats ORDER BY id DESC LIMIT 1
|
||||
`
|
||||
|
||||
type GetCurrentIndexerStatsRow struct {
|
||||
ClientVersion string
|
||||
Network string
|
||||
}
|
||||
|
||||
func (q *Queries) GetCurrentIndexerStats(ctx context.Context) (GetCurrentIndexerStatsRow, error) {
|
||||
row := q.db.QueryRow(ctx, getCurrentIndexerStats)
|
||||
var i GetCurrentIndexerStatsRow
|
||||
err := row.Scan(&i.ClientVersion, &i.Network)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const updateIndexerStats = `-- name: UpdateIndexerStats :exec
|
||||
INSERT INTO bitcoin_indexer_stats (client_version, network) VALUES ($1, $2)
|
||||
`
|
||||
|
||||
type UpdateIndexerStatsParams struct {
|
||||
ClientVersion string
|
||||
Network string
|
||||
}
|
||||
|
||||
func (q *Queries) UpdateIndexerStats(ctx context.Context, arg UpdateIndexerStatsParams) error {
|
||||
_, err := q.db.Exec(ctx, updateIndexerStats, arg.ClientVersion, arg.Network)
|
||||
return err
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
type BitcoinBlock struct {
|
||||
BlockHeight int32
|
||||
BlockHash string
|
||||
Version int32
|
||||
MerkleRoot string
|
||||
PrevBlockHash string
|
||||
Timestamp pgtype.Timestamptz
|
||||
Bits int64
|
||||
Nonce int64
|
||||
}
|
||||
|
||||
type BitcoinIndexerDbVersion struct {
|
||||
Id int64
|
||||
Version int32
|
||||
CreatedAt pgtype.Timestamptz
|
||||
}
|
||||
|
||||
type BitcoinIndexerStat struct {
|
||||
Id int64
|
||||
ClientVersion string
|
||||
Network string
|
||||
CreatedAt pgtype.Timestamptz
|
||||
}
|
||||
|
||||
type BitcoinTransaction struct {
|
||||
TxHash string
|
||||
Version int32
|
||||
Locktime int64
|
||||
BlockHeight int32
|
||||
BlockHash string
|
||||
Idx int32
|
||||
}
|
||||
|
||||
type BitcoinTransactionTxin struct {
|
||||
TxHash string
|
||||
TxIdx int32
|
||||
PrevoutTxHash string
|
||||
PrevoutTxIdx int32
|
||||
PrevoutPkscript pgtype.Text
|
||||
Scriptsig string
|
||||
Witness string
|
||||
Sequence int64
|
||||
}
|
||||
|
||||
type BitcoinTransactionTxout struct {
|
||||
TxHash string
|
||||
TxIdx int32
|
||||
Pkscript string
|
||||
Value int64
|
||||
IsSpent bool
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/bitcoin/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres/gen"
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
// Make sure Repository implements the IndexerInformationDataGateway interface
|
||||
var _ datagateway.IndexerInformationDataGateway = (*Repository)(nil)
|
||||
|
||||
func (r *Repository) GetCurrentDBVersion(ctx context.Context) (int32, error) {
|
||||
version, err := r.queries.GetCurrentDBVersion(ctx)
|
||||
if err != nil {
|
||||
return 0, errors.WithStack(err)
|
||||
}
|
||||
return version, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetLatestIndexerStats(ctx context.Context) (string, common.Network, error) {
|
||||
stats, err := r.queries.GetCurrentIndexerStats(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, pgx.ErrNoRows) {
|
||||
return "", "", errors.Join(errs.NotFound, err)
|
||||
}
|
||||
return "", "", errors.WithStack(err)
|
||||
}
|
||||
return stats.ClientVersion, common.Network(stats.Network), nil
|
||||
}
|
||||
|
||||
func (r *Repository) UpdateIndexerStats(ctx context.Context, clientVersion string, network common.Network) error {
|
||||
if err := r.queries.UpdateIndexerStats(ctx, gen.UpdateIndexerStatsParams{
|
||||
ClientVersion: clientVersion,
|
||||
Network: network.String(),
|
||||
}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"encoding/hex"
|
||||
"slices"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres/gen"
|
||||
"github.com/gaze-network/indexer-network/pkg/btcutils"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
func mapBlockHeaderModelToType(src gen.BitcoinBlock) (types.BlockHeader, error) {
|
||||
hash, err := chainhash.NewHashFromStr(src.BlockHash)
|
||||
if err != nil {
|
||||
return types.BlockHeader{}, errors.Join(errors.Wrap(err, "failed to parse block hash"), errs.InternalError)
|
||||
}
|
||||
prevHash, err := chainhash.NewHashFromStr(src.PrevBlockHash)
|
||||
if err != nil {
|
||||
return types.BlockHeader{}, errors.Join(errors.Wrap(err, "failed to parse prev block hash"), errs.InternalError)
|
||||
}
|
||||
merkleRoot, err := chainhash.NewHashFromStr(src.MerkleRoot)
|
||||
if err != nil {
|
||||
return types.BlockHeader{}, errors.Join(errors.Wrap(err, "failed to parse merkle root"), errs.InternalError)
|
||||
}
|
||||
return types.BlockHeader{
|
||||
Hash: *hash,
|
||||
Height: int64(src.BlockHeight),
|
||||
Version: src.Version,
|
||||
PrevBlock: *prevHash,
|
||||
MerkleRoot: *merkleRoot,
|
||||
Timestamp: src.Timestamp.Time,
|
||||
Bits: uint32(src.Bits),
|
||||
Nonce: uint32(src.Nonce),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapBlocksTypeToParams(src []*types.Block) (gen.BatchInsertBlocksParams, gen.BatchInsertTransactionsParams, gen.BatchInsertTransactionTxOutsParams, gen.BatchInsertTransactionTxInsParams) {
|
||||
blocks := gen.BatchInsertBlocksParams{
|
||||
BlockHeightArr: make([]int32, 0, len(src)),
|
||||
BlockHashArr: make([]string, 0, len(src)),
|
||||
VersionArr: make([]int32, 0, len(src)),
|
||||
MerkleRootArr: make([]string, 0, len(src)),
|
||||
PrevBlockHashArr: make([]string, 0, len(src)),
|
||||
TimestampArr: make([]pgtype.Timestamptz, 0, len(src)),
|
||||
BitsArr: make([]int64, 0, len(src)),
|
||||
NonceArr: make([]int64, 0, len(src)),
|
||||
}
|
||||
txs := gen.BatchInsertTransactionsParams{
|
||||
TxHashArr: []string{},
|
||||
VersionArr: []int32{},
|
||||
LocktimeArr: []int64{},
|
||||
BlockHeightArr: []int32{},
|
||||
BlockHashArr: []string{},
|
||||
IdxArr: []int32{},
|
||||
}
|
||||
txouts := gen.BatchInsertTransactionTxOutsParams{
|
||||
TxHashArr: []string{},
|
||||
TxIdxArr: []int32{},
|
||||
PkscriptArr: []string{},
|
||||
ValueArr: []int64{},
|
||||
}
|
||||
txins := gen.BatchInsertTransactionTxInsParams{
|
||||
PrevoutTxHashArr: []string{},
|
||||
PrevoutTxIdxArr: []int32{},
|
||||
TxHashArr: []string{},
|
||||
TxIdxArr: []int32{},
|
||||
ScriptsigArr: []string{},
|
||||
WitnessArr: []string{},
|
||||
SequenceArr: []int32{},
|
||||
}
|
||||
|
||||
for _, block := range src {
|
||||
blockHash := block.Header.Hash.String()
|
||||
|
||||
// Batch insert blocks
|
||||
blocks.BlockHeightArr = append(blocks.BlockHeightArr, int32(block.Header.Height))
|
||||
blocks.BlockHashArr = append(blocks.BlockHashArr, blockHash)
|
||||
blocks.VersionArr = append(blocks.VersionArr, block.Header.Version)
|
||||
blocks.MerkleRootArr = append(blocks.MerkleRootArr, block.Header.MerkleRoot.String())
|
||||
blocks.PrevBlockHashArr = append(blocks.PrevBlockHashArr, block.Header.PrevBlock.String())
|
||||
blocks.TimestampArr = append(blocks.TimestampArr, pgtype.Timestamptz{
|
||||
Time: block.Header.Timestamp,
|
||||
Valid: true,
|
||||
})
|
||||
blocks.BitsArr = append(blocks.BitsArr, int64(block.Header.Bits))
|
||||
blocks.NonceArr = append(blocks.NonceArr, int64(block.Header.Nonce))
|
||||
|
||||
for txIdx, srcTx := range block.Transactions {
|
||||
txHash := srcTx.TxHash.String()
|
||||
|
||||
// Batch insert transactions
|
||||
txs.TxHashArr = append(txs.TxHashArr, txHash)
|
||||
txs.VersionArr = append(txs.VersionArr, srcTx.Version)
|
||||
txs.LocktimeArr = append(txs.LocktimeArr, int64(srcTx.LockTime))
|
||||
txs.BlockHeightArr = append(txs.BlockHeightArr, int32(block.Header.Height))
|
||||
txs.BlockHashArr = append(txs.BlockHashArr, blockHash)
|
||||
txs.IdxArr = append(txs.IdxArr, int32(txIdx))
|
||||
|
||||
// Batch insert txins
|
||||
for idx, txin := range srcTx.TxIn {
|
||||
var witness string
|
||||
if len(txin.Witness) > 0 {
|
||||
witness = btcutils.WitnessToString(txin.Witness)
|
||||
}
|
||||
txins.TxHashArr = append(txins.TxHashArr, txHash)
|
||||
txins.TxIdxArr = append(txins.TxIdxArr, int32(idx))
|
||||
txins.PrevoutTxHashArr = append(txins.PrevoutTxHashArr, txin.PreviousOutTxHash.String())
|
||||
txins.PrevoutTxIdxArr = append(txins.PrevoutTxIdxArr, int32(txin.PreviousOutIndex))
|
||||
txins.ScriptsigArr = append(txins.ScriptsigArr, hex.EncodeToString(txin.SignatureScript))
|
||||
txins.WitnessArr = append(txins.WitnessArr, witness)
|
||||
txins.SequenceArr = append(txins.SequenceArr, int32(txin.Sequence))
|
||||
}
|
||||
|
||||
// Batch insert txouts
|
||||
for idx, txout := range srcTx.TxOut {
|
||||
txouts.TxHashArr = append(txouts.TxHashArr, txHash)
|
||||
txouts.TxIdxArr = append(txouts.TxIdxArr, int32(idx))
|
||||
txouts.PkscriptArr = append(txouts.PkscriptArr, hex.EncodeToString(txout.PkScript))
|
||||
txouts.ValueArr = append(txouts.ValueArr, txout.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
return blocks, txs, txouts, txins
|
||||
}
|
||||
|
||||
func mapTransactionModelToType(src gen.BitcoinTransaction, txInModel []gen.BitcoinTransactionTxin, txOutModels []gen.BitcoinTransactionTxout) (types.Transaction, error) {
|
||||
blockHash, err := chainhash.NewHashFromStr(src.BlockHash)
|
||||
if err != nil {
|
||||
return types.Transaction{}, errors.Wrap(err, "failed to parse block hash")
|
||||
}
|
||||
|
||||
txHash, err := chainhash.NewHashFromStr(src.TxHash)
|
||||
if err != nil {
|
||||
return types.Transaction{}, errors.Wrap(err, "failed to parse tx hash")
|
||||
}
|
||||
|
||||
// Sort txins and txouts by index (Asc)
|
||||
slices.SortFunc(txOutModels, func(i, j gen.BitcoinTransactionTxout) int {
|
||||
return cmp.Compare(i.TxIdx, j.TxIdx)
|
||||
})
|
||||
slices.SortFunc(txInModel, func(i, j gen.BitcoinTransactionTxin) int {
|
||||
return cmp.Compare(i.TxIdx, j.TxIdx)
|
||||
})
|
||||
|
||||
txIns := make([]*types.TxIn, 0, len(txInModel))
|
||||
txOuts := make([]*types.TxOut, 0, len(txOutModels))
|
||||
for _, txInModel := range txInModel {
|
||||
scriptsig, err := hex.DecodeString(txInModel.Scriptsig)
|
||||
if err != nil {
|
||||
return types.Transaction{}, errors.Wrap(err, "failed to decode scriptsig")
|
||||
}
|
||||
|
||||
prevoutTxHash, err := chainhash.NewHashFromStr(txInModel.PrevoutTxHash)
|
||||
if err != nil {
|
||||
return types.Transaction{}, errors.Wrap(err, "failed to parse prevout tx hash")
|
||||
}
|
||||
|
||||
witness, err := btcutils.WitnessFromString(txInModel.Witness)
|
||||
if err != nil {
|
||||
return types.Transaction{}, errors.Wrap(err, "failed to parse witness from hex string")
|
||||
}
|
||||
|
||||
txIns = append(txIns, &types.TxIn{
|
||||
SignatureScript: scriptsig,
|
||||
Witness: witness,
|
||||
Sequence: uint32(txInModel.Sequence),
|
||||
PreviousOutIndex: uint32(txInModel.PrevoutTxIdx),
|
||||
PreviousOutTxHash: *prevoutTxHash,
|
||||
})
|
||||
}
|
||||
for _, txOutModel := range txOutModels {
|
||||
pkscript, err := hex.DecodeString(txOutModel.Pkscript)
|
||||
if err != nil {
|
||||
return types.Transaction{}, errors.Wrap(err, "failed to decode pkscript")
|
||||
}
|
||||
txOuts = append(txOuts, &types.TxOut{
|
||||
PkScript: pkscript,
|
||||
Value: txOutModel.Value,
|
||||
})
|
||||
}
|
||||
|
||||
return types.Transaction{
|
||||
BlockHeight: int64(src.BlockHeight),
|
||||
BlockHash: *blockHash,
|
||||
Index: uint32(src.Idx),
|
||||
TxHash: *txHash,
|
||||
Version: src.Version,
|
||||
LockTime: uint32(src.Locktime),
|
||||
TxIn: txIns,
|
||||
TxOut: txOuts,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"github.com/gaze-network/indexer-network/internal/postgres"
|
||||
"github.com/gaze-network/indexer-network/modules/bitcoin/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres/gen"
|
||||
)
|
||||
|
||||
// Make sure Repository implements the BitcoinDataGateway interface
|
||||
var _ datagateway.BitcoinDataGateway = (*Repository)(nil)
|
||||
|
||||
type Repository struct {
|
||||
db postgres.DB
|
||||
queries *gen.Queries
|
||||
}
|
||||
|
||||
func NewRepository(db postgres.DB) *Repository {
|
||||
return &Repository{
|
||||
db: db,
|
||||
queries: gen.New(db),
|
||||
}
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
func (r *Repository) GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error) {
|
||||
model, err := r.queries.GetTransactionByHash(ctx, txHash.String())
|
||||
if err != nil {
|
||||
if errors.Is(err, pgx.ErrNoRows) {
|
||||
return nil, errors.Join(errs.NotFound, err)
|
||||
}
|
||||
return nil, errors.Wrap(err, "failed to get transaction by hash")
|
||||
}
|
||||
txIns, err := r.queries.GetTransactionTxInsByTxHashes(ctx, []string{txHash.String()})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get transaction txins by tx hashes")
|
||||
}
|
||||
txOuts, err := r.queries.GetTransactionTxOutsByTxHashes(ctx, []string{txHash.String()})
|
||||
if err != nil && !errors.Is(err, pgx.ErrNoRows) {
|
||||
return nil, errors.Wrap(err, "failed to get transaction txouts by tx hashes")
|
||||
}
|
||||
|
||||
tx, err := mapTransactionModelToType(model, txIns, txOuts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to map transaction model to type")
|
||||
}
|
||||
return &tx, nil
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
package bitcoin
|
||||
@@ -2,6 +2,7 @@ package httphandler
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
@@ -14,9 +15,11 @@ import (
|
||||
)
|
||||
|
||||
type getTransactionsRequest struct {
|
||||
Wallet string `query:"wallet"`
|
||||
Id string `query:"id"`
|
||||
BlockHeight uint64 `query:"blockHeight"`
|
||||
Wallet string `query:"wallet"`
|
||||
Id string `query:"id"`
|
||||
|
||||
FromBlock int64 `query:"fromBlock"`
|
||||
ToBlock int64 `query:"toBlock"`
|
||||
}
|
||||
|
||||
func (r getTransactionsRequest) Validate() error {
|
||||
@@ -24,6 +27,12 @@ func (r getTransactionsRequest) Validate() error {
|
||||
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
|
||||
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
|
||||
}
|
||||
if r.FromBlock < -1 {
|
||||
errList = append(errList, errors.Errorf("invalid fromBlock range"))
|
||||
}
|
||||
if r.ToBlock < -1 {
|
||||
errList = append(errList, errors.Errorf("invalid toBlock range"))
|
||||
}
|
||||
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
|
||||
}
|
||||
|
||||
@@ -125,17 +134,31 @@ func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
blockHeight := req.BlockHeight
|
||||
// set blockHeight to the latest block height blockHeight, pkScript, and runeId are not provided
|
||||
if blockHeight == 0 && pkScript == nil && runeId == (runes.RuneId{}) {
|
||||
// default to latest block
|
||||
if req.ToBlock == 0 {
|
||||
req.ToBlock = -1
|
||||
}
|
||||
|
||||
// get latest block height if block height is -1
|
||||
if req.FromBlock == -1 || req.ToBlock == -1 {
|
||||
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during GetLatestBlock")
|
||||
}
|
||||
blockHeight = uint64(blockHeader.Height)
|
||||
if req.FromBlock == -1 {
|
||||
req.FromBlock = blockHeader.Height
|
||||
}
|
||||
if req.ToBlock == -1 {
|
||||
req.ToBlock = blockHeader.Height
|
||||
}
|
||||
}
|
||||
|
||||
txs, err := h.usecase.GetRuneTransactions(ctx.UserContext(), pkScript, runeId, blockHeight)
|
||||
// validate block height range
|
||||
if req.FromBlock > req.ToBlock {
|
||||
return errs.NewPublicError(fmt.Sprintf("fromBlock must be less than or equal to toBlock, got fromBlock=%d, toBlock=%d", req.FromBlock, req.ToBlock))
|
||||
}
|
||||
|
||||
txs, err := h.usecase.GetRuneTransactions(ctx.UserContext(), pkScript, runeId, uint64(req.FromBlock), uint64(req.ToBlock))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error during GetRuneTransactions")
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package config
|
||||
import "github.com/gaze-network/indexer-network/internal/postgres"
|
||||
|
||||
type Config struct {
|
||||
Datasource string `mapstructure:"datasource"` // Datasource to fetch bitcoin data for Meta-Protocol e.g. `bitcoin-node` | `database`
|
||||
Datasource string `mapstructure:"datasource"` // Datasource to fetch bitcoin data for Meta-Protocol e.g. `bitcoin-node`
|
||||
Database string `mapstructure:"database"` // Database to store runes data.
|
||||
APIHandlers []string `mapstructure:"api_handlers"` // List of API handlers to enable. (e.g. `http`)
|
||||
Postgres postgres.Config `mapstructure:"postgres"`
|
||||
|
||||
@@ -55,8 +55,9 @@ SELECT * FROM runes_transactions
|
||||
OR runes_transactions.burns ? @rune_id
|
||||
OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = @rune_id_block_height AND runes_transactions.index = @rune_id_tx_index)
|
||||
) AND (
|
||||
@block_height::INT = 0 OR runes_transactions.block_height = @block_height::INT -- if @block_height > 0, apply block_height filter
|
||||
);
|
||||
@from_block <= runes_transactions.block_height AND runes_transactions.block_height <= @to_block
|
||||
)
|
||||
ORDER BY runes_transactions.block_height DESC LIMIT 10000;
|
||||
|
||||
-- name: CountRuneEntries :one
|
||||
SELECT COUNT(*) FROM runes_entries;
|
||||
|
||||
@@ -27,7 +27,7 @@ type RunesReaderDataGateway interface {
|
||||
GetLatestBlock(ctx context.Context) (types.BlockHeader, error)
|
||||
GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error)
|
||||
// GetRuneTransactions returns the runes transactions, filterable by pkScript, runeId and height. If pkScript, runeId or height is zero value, that filter is ignored.
|
||||
GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, height uint64) ([]*entity.RuneTransaction, error)
|
||||
GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64) ([]*entity.RuneTransaction, error)
|
||||
|
||||
GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error)
|
||||
GetUnspentOutPointBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.OutPointBalance, error)
|
||||
|
||||
@@ -9,12 +9,12 @@ import (
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/indexers"
|
||||
"github.com/gaze-network/indexer-network/core/indexer"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/bitcoin/btcclient"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
"github.com/gaze-network/indexer-network/pkg/btcclient"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
|
||||
"github.com/gaze-network/indexer-network/pkg/reportingclient"
|
||||
@@ -22,15 +22,16 @@ import (
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
var _ indexers.BitcoinProcessor = (*Processor)(nil)
|
||||
// Make sure to implement the Bitcoin Processor interface
|
||||
var _ indexer.Processor[*types.Block] = (*Processor)(nil)
|
||||
|
||||
type Processor struct {
|
||||
runesDg datagateway.RunesDataGateway
|
||||
indexerInfoDg datagateway.IndexerInfoDataGateway
|
||||
bitcoinClient btcclient.Contract
|
||||
bitcoinDataSource indexers.BitcoinDatasource
|
||||
network common.Network
|
||||
reportingClient *reportingclient.ReportingClient
|
||||
runesDg datagateway.RunesDataGateway
|
||||
indexerInfoDg datagateway.IndexerInfoDataGateway
|
||||
bitcoinClient btcclient.Contract
|
||||
network common.Network
|
||||
reportingClient *reportingclient.ReportingClient
|
||||
cleanupFuncs []func(context.Context) error
|
||||
|
||||
newRuneEntries map[runes.RuneId]*runes.RuneEntry
|
||||
newRuneEntryStates map[runes.RuneId]*runes.RuneEntry
|
||||
@@ -40,14 +41,14 @@ type Processor struct {
|
||||
newRuneTxs []*entity.RuneTransaction
|
||||
}
|
||||
|
||||
func NewProcessor(runesDg datagateway.RunesDataGateway, indexerInfoDg datagateway.IndexerInfoDataGateway, bitcoinClient btcclient.Contract, bitcoinDataSource indexers.BitcoinDatasource, network common.Network, reportingClient *reportingclient.ReportingClient) *Processor {
|
||||
func NewProcessor(runesDg datagateway.RunesDataGateway, indexerInfoDg datagateway.IndexerInfoDataGateway, bitcoinClient btcclient.Contract, network common.Network, reportingClient *reportingclient.ReportingClient, cleanupFuncs []func(context.Context) error) *Processor {
|
||||
return &Processor{
|
||||
runesDg: runesDg,
|
||||
indexerInfoDg: indexerInfoDg,
|
||||
bitcoinClient: bitcoinClient,
|
||||
bitcoinDataSource: bitcoinDataSource,
|
||||
network: network,
|
||||
reportingClient: reportingClient,
|
||||
cleanupFuncs: cleanupFuncs,
|
||||
newRuneEntries: make(map[runes.RuneId]*runes.RuneEntry),
|
||||
newRuneEntryStates: make(map[runes.RuneId]*runes.RuneEntry),
|
||||
newOutPointBalances: make(map[wire.OutPoint][]*entity.OutPointBalance),
|
||||
@@ -229,3 +230,13 @@ func (p *Processor) RevertData(ctx context.Context, from int64) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) Shutdown(ctx context.Context) error {
|
||||
var errs []error
|
||||
for _, cleanup := range p.cleanupFuncs {
|
||||
if err := cleanup(ctx); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return errors.WithStack(errors.Join(errs...))
|
||||
}
|
||||
|
||||
@@ -646,8 +646,9 @@ SELECT hash, runes_transactions.block_height, index, timestamp, inputs, outputs,
|
||||
OR runes_transactions.burns ? $5
|
||||
OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = $6 AND runes_transactions.index = $7)
|
||||
) AND (
|
||||
$8::INT = 0 OR runes_transactions.block_height = $8::INT -- if @block_height > 0, apply block_height filter
|
||||
$8 <= runes_transactions.block_height AND runes_transactions.block_height <= $9
|
||||
)
|
||||
ORDER BY runes_transactions.block_height DESC LIMIT 10000
|
||||
`
|
||||
|
||||
type GetRuneTransactionsParams struct {
|
||||
@@ -658,7 +659,8 @@ type GetRuneTransactionsParams struct {
|
||||
RuneID []byte
|
||||
RuneIDBlockHeight int32
|
||||
RuneIDTxIndex int32
|
||||
BlockHeight int32
|
||||
FromBlock int32
|
||||
ToBlock int32
|
||||
}
|
||||
|
||||
type GetRuneTransactionsRow struct {
|
||||
@@ -703,7 +705,8 @@ func (q *Queries) GetRuneTransactions(ctx context.Context, arg GetRuneTransactio
|
||||
arg.RuneID,
|
||||
arg.RuneIDBlockHeight,
|
||||
arg.RuneIDTxIndex,
|
||||
arg.BlockHeight,
|
||||
arg.FromBlock,
|
||||
arg.ToBlock,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -62,7 +62,7 @@ func (r *Repository) GetIndexedBlockByHeight(ctx context.Context, height int64)
|
||||
return indexedBlock, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, height uint64) ([]*entity.RuneTransaction, error) {
|
||||
func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64) ([]*entity.RuneTransaction, error) {
|
||||
pkScriptParam := []byte(fmt.Sprintf(`[{"pkScript":"%s"}]`, hex.EncodeToString(pkScript)))
|
||||
runeIdParam := []byte(fmt.Sprintf(`[{"runeId":"%s"}]`, runeId.String()))
|
||||
rows, err := r.queries.GetRuneTransactions(ctx, gen.GetRuneTransactionsParams{
|
||||
@@ -75,7 +75,8 @@ func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, r
|
||||
RuneIDBlockHeight: int32(runeId.BlockHeight),
|
||||
RuneIDTxIndex: int32(runeId.TxIndex),
|
||||
|
||||
BlockHeight: int32(height),
|
||||
FromBlock: int32(fromBlock),
|
||||
ToBlock: int32(toBlock),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error during query")
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/jackc/pgx"
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
var ErrTxAlreadyExists = errors.New("Transaction already exists. Call Commit() or Rollback() first.")
|
||||
|
||||
93
modules/runes/runes.go
Normal file
93
modules/runes/runes.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package runes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/btcsuite/btcd/rpcclient"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/datasources"
|
||||
"github.com/gaze-network/indexer-network/core/indexer"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/internal/config"
|
||||
"github.com/gaze-network/indexer-network/internal/postgres"
|
||||
runesapi "github.com/gaze-network/indexer-network/modules/runes/api"
|
||||
runesdatagateway "github.com/gaze-network/indexer-network/modules/runes/datagateway"
|
||||
runespostgres "github.com/gaze-network/indexer-network/modules/runes/repository/postgres"
|
||||
runesusecase "github.com/gaze-network/indexer-network/modules/runes/usecase"
|
||||
"github.com/gaze-network/indexer-network/pkg/btcclient"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/reportingclient"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/samber/do/v2"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func New(injector do.Injector) (indexer.IndexerWorker, error) {
|
||||
ctx := do.MustInvoke[context.Context](injector)
|
||||
conf := do.MustInvoke[config.Config](injector)
|
||||
reportingClient := do.MustInvoke[*reportingclient.ReportingClient](injector)
|
||||
|
||||
var (
|
||||
runesDg runesdatagateway.RunesDataGateway
|
||||
indexerInfoDg runesdatagateway.IndexerInfoDataGateway
|
||||
)
|
||||
var cleanupFuncs []func(context.Context) error
|
||||
switch strings.ToLower(conf.Modules.Runes.Database) {
|
||||
case "postgresql", "postgres", "pg":
|
||||
pg, err := postgres.NewPool(ctx, conf.Modules.Runes.Postgres)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.InvalidArgument) {
|
||||
return nil, errors.Wrap(err, "Invalid Postgres configuration for indexer")
|
||||
}
|
||||
return nil, errors.Wrap(err, "can't create Postgres connection pool")
|
||||
}
|
||||
cleanupFuncs = append(cleanupFuncs, func(ctx context.Context) error {
|
||||
pg.Close()
|
||||
return nil
|
||||
})
|
||||
runesRepo := runespostgres.NewRepository(pg)
|
||||
runesDg = runesRepo
|
||||
indexerInfoDg = runesRepo
|
||||
default:
|
||||
return nil, errors.Wrapf(errs.Unsupported, "%q database for indexer is not supported", conf.Modules.Runes.Database)
|
||||
}
|
||||
|
||||
var bitcoinDatasource datasources.Datasource[*types.Block]
|
||||
var bitcoinClient btcclient.Contract
|
||||
switch strings.ToLower(conf.Modules.Runes.Datasource) {
|
||||
case "bitcoin-node":
|
||||
btcClient := do.MustInvoke[*rpcclient.Client](injector)
|
||||
bitcoinNodeDatasource := datasources.NewBitcoinNode(btcClient)
|
||||
bitcoinDatasource = bitcoinNodeDatasource
|
||||
bitcoinClient = bitcoinNodeDatasource
|
||||
default:
|
||||
return nil, errors.Wrapf(errs.Unsupported, "%q datasource is not supported", conf.Modules.Runes.Datasource)
|
||||
}
|
||||
|
||||
processor := NewProcessor(runesDg, indexerInfoDg, bitcoinClient, conf.Network, reportingClient, cleanupFuncs)
|
||||
if err := processor.VerifyStates(ctx); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Mount API
|
||||
apiHandlers := lo.Uniq(conf.Modules.Runes.APIHandlers)
|
||||
for _, handler := range apiHandlers {
|
||||
switch handler { // TODO: support more handlers (e.g. gRPC)
|
||||
case "http":
|
||||
httpServer := do.MustInvoke[*fiber.App](injector)
|
||||
runesUsecase := runesusecase.New(runesDg, bitcoinClient)
|
||||
runesHTTPHandler := runesapi.NewHTTPHandler(conf.Network, runesUsecase)
|
||||
if err := runesHTTPHandler.Mount(httpServer); err != nil {
|
||||
return nil, errors.Wrap(err, "can't mount Runes API")
|
||||
}
|
||||
logger.InfoContext(ctx, "Mounted HTTP handler")
|
||||
default:
|
||||
return nil, errors.Wrapf(errs.Unsupported, "%q API handler is not supported", handler)
|
||||
}
|
||||
}
|
||||
|
||||
indexer := indexer.New(processor, bitcoinDatasource)
|
||||
return indexer, nil
|
||||
}
|
||||
@@ -69,8 +69,26 @@ func ParseTag(input interface{}) (Tag, error) {
|
||||
return input, nil
|
||||
case uint128.Uint128:
|
||||
return Tag(input), nil
|
||||
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
|
||||
return Tag(uint128.From64(input.(uint64))), nil
|
||||
case int:
|
||||
return Tag(uint128.From64(uint64(input))), nil
|
||||
case int8:
|
||||
return Tag(uint128.From64(uint64(input))), nil
|
||||
case int16:
|
||||
return Tag(uint128.From64(uint64(input))), nil
|
||||
case int32:
|
||||
return Tag(uint128.From64(uint64(input))), nil
|
||||
case int64:
|
||||
return Tag(uint128.From64(uint64(input))), nil
|
||||
case uint:
|
||||
return Tag(uint128.From64(uint64(input))), nil
|
||||
case uint8:
|
||||
return Tag(uint128.From64(uint64(input))), nil
|
||||
case uint16:
|
||||
return Tag(uint128.From64(uint64(input))), nil
|
||||
case uint32:
|
||||
return Tag(uint128.From64(uint64(input))), nil
|
||||
case uint64:
|
||||
return Tag(uint128.From64(input)), nil
|
||||
case big.Int:
|
||||
u128, err := uint128.FromBig(&input)
|
||||
if err != nil {
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"github.com/gaze-network/indexer-network/modules/runes/runes"
|
||||
)
|
||||
|
||||
func (u *Usecase) GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, height uint64) ([]*entity.RuneTransaction, error) {
|
||||
txs, err := u.runesDg.GetRuneTransactions(ctx, pkScript, runeId, height)
|
||||
func (u *Usecase) GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64) ([]*entity.RuneTransaction, error) {
|
||||
txs, err := u.runesDg.GetRuneTransactions(ctx, pkScript, runeId, fromBlock, toBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error during GetTransactionsByHeight")
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package usecase
|
||||
|
||||
import (
|
||||
"github.com/gaze-network/indexer-network/modules/bitcoin/btcclient"
|
||||
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
|
||||
"github.com/gaze-network/indexer-network/pkg/btcclient"
|
||||
)
|
||||
|
||||
type Usecase struct {
|
||||
|
||||
99
pkg/automaxprocs/automaxprocs.go
Normal file
99
pkg/automaxprocs/automaxprocs.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package automaxprocs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
)
|
||||
|
||||
var (
|
||||
// undo is the undo function returned by maxprocs.Set
|
||||
undo func()
|
||||
|
||||
// autoMaxProcs is the value of GOMAXPROCS set by automaxprocs.
|
||||
// will be -1 if `automaxprocs` is not initialized.
|
||||
autoMaxProcs = -1
|
||||
|
||||
// initialMaxProcs is the initial value of GOMAXPROCS.
|
||||
initialMaxProcs = Current()
|
||||
)
|
||||
|
||||
func Init() error {
|
||||
logger := logger.With(
|
||||
slogx.String("package", "automaxprocs"),
|
||||
slogx.String("event", "set_gomaxprocs"),
|
||||
slogx.Int("prev_maxprocs", initialMaxProcs),
|
||||
)
|
||||
|
||||
// Create a logger function for `maxprocs.Set`.
|
||||
setMaxProcLogger := func(format string, v ...any) {
|
||||
fields := make([]slog.Attr, 0, 1)
|
||||
|
||||
// `maxprocs.Set` will always pass current GOMAXPROCS value to logger.
|
||||
// except when calling `undo` function, it will not pass any value.
|
||||
if val, ok := utils.Optional(v); ok {
|
||||
// if `GOMAXPROCS` environment variable is set, then `automaxprocs` will honor it.
|
||||
if _, exists := os.LookupEnv("GOMAXPROCS"); exists {
|
||||
val = Current()
|
||||
}
|
||||
|
||||
// add logging field for `set_maxprocs` value if it's present in integer value.
|
||||
if setmaxprocs, ok := val.(int); ok {
|
||||
fields = append(fields, slogx.Int("set_maxprocs", setmaxprocs))
|
||||
}
|
||||
}
|
||||
|
||||
logger.LogAttrs(context.Background(), slog.LevelInfo, fmt.Sprintf(format, v...), fields...)
|
||||
}
|
||||
|
||||
// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning
|
||||
// any error encountered and an undo function.
|
||||
//
|
||||
// Set is a no-op on non-Linux systems and in Linux environments without a
|
||||
// configured CPU quota.
|
||||
revert, err := maxprocs.Set(maxprocs.Logger(setMaxProcLogger), maxprocs.Min(1))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// set the result of `maxprocs.Set` to global variable.
|
||||
autoMaxProcs = Current()
|
||||
undo = revert
|
||||
return nil
|
||||
}
|
||||
|
||||
// Undo restores GOMAXPROCS to its previous value.
|
||||
// or revert to initial value if `automaxprocs` is not initialized.
|
||||
//
|
||||
// returns the current GOMAXPROCS value.
|
||||
func Undo() int {
|
||||
if undo != nil {
|
||||
undo()
|
||||
return Current()
|
||||
}
|
||||
|
||||
runtime.GOMAXPROCS(initialMaxProcs)
|
||||
return initialMaxProcs
|
||||
}
|
||||
|
||||
// Current returns the current value of GOMAXPROCS.
|
||||
func Current() int {
|
||||
return runtime.GOMAXPROCS(0)
|
||||
}
|
||||
|
||||
// Value returns the value of GOMAXPROCS set by automaxprocs.
|
||||
// returns -1 if `automaxprocs` is not initialized.
|
||||
func Value() int {
|
||||
if autoMaxProcs <= 0 {
|
||||
return -1
|
||||
}
|
||||
return autoMaxProcs
|
||||
}
|
||||
212
pkg/btcutils/address.go
Normal file
212
pkg/btcutils/address.go
Normal file
@@ -0,0 +1,212 @@
|
||||
package btcutils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxSupportedPkScriptSize is the maximum supported size of a pkScript.
|
||||
MaxSupportedPkScriptSize = 40
|
||||
)
|
||||
|
||||
// IsAddress returns whether or not the passed string is a valid bitcoin address and valid supported type.
|
||||
//
|
||||
// NetParams is optional. If provided, we only check for that network,
|
||||
// otherwise, we check for all supported networks.
|
||||
func IsAddress(address string, defaultNet ...*chaincfg.Params) bool {
|
||||
if len(address) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// If defaultNet is provided, we only check for that network.
|
||||
net, ok := utils.Optional(defaultNet)
|
||||
if ok {
|
||||
_, _, err := parseAddress(address, net)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Otherwise, we check for all supported networks.
|
||||
for _, net := range supportedNetworks {
|
||||
_, _, err := parseAddress(address, net)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO: create GetAddressNetwork
|
||||
// check `Bech32HRPSegwit` prefix or netID for P2SH/P2PKH is equal to `PubKeyHashAddrID/ScriptHashAddrID`
|
||||
|
||||
// GetAddressType returns the address type of the passed address.
|
||||
func GetAddressType(address string, net *chaincfg.Params) (AddressType, error) {
|
||||
_, addrType, err := parseAddress(address, net)
|
||||
return addrType, errors.WithStack(err)
|
||||
}
|
||||
|
||||
type Address struct {
|
||||
decoded btcutil.Address
|
||||
net *chaincfg.Params
|
||||
encoded string
|
||||
encodedType AddressType
|
||||
scriptPubKey [MaxSupportedPkScriptSize]byte
|
||||
scriptPubKeySize int
|
||||
}
|
||||
|
||||
// NewAddress creates a new address from the given address string.
|
||||
//
|
||||
// defaultNet is required if your address is P2SH or P2PKH (legacy or nested segwit)
|
||||
// If your address is P2WSH, P2WPKH or P2TR, defaultNet is not required.
|
||||
func NewAddress(address string, defaultNet ...*chaincfg.Params) Address {
|
||||
addr, err := SafeNewAddress(address, defaultNet...)
|
||||
if err != nil {
|
||||
logger.Panic("can't create parse address", slogx.Error(err), slogx.String("package", "btcutils"))
|
||||
}
|
||||
return addr
|
||||
}
|
||||
|
||||
// SafeNewAddress creates a new address from the given address string.
|
||||
// It returns an error if the address is invalid.
|
||||
//
|
||||
// defaultNet is required if your address is P2SH or P2PKH (legacy or nested segwit)
|
||||
// If your address is P2WSH, P2WPKH or P2TR, defaultNet is not required.
|
||||
func SafeNewAddress(address string, defaultNet ...*chaincfg.Params) (Address, error) {
|
||||
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
|
||||
|
||||
decoded, addrType, err := parseAddress(address, net)
|
||||
if err != nil {
|
||||
return Address{}, errors.Wrap(err, "can't parse address")
|
||||
}
|
||||
|
||||
scriptPubkey, err := txscript.PayToAddrScript(decoded)
|
||||
if err != nil {
|
||||
return Address{}, errors.Wrap(err, "can't get script pubkey")
|
||||
}
|
||||
|
||||
fixedPkScript := [MaxSupportedPkScriptSize]byte{}
|
||||
copy(fixedPkScript[:], scriptPubkey)
|
||||
return Address{
|
||||
decoded: decoded,
|
||||
net: net,
|
||||
encoded: decoded.EncodeAddress(),
|
||||
encodedType: addrType,
|
||||
scriptPubKey: fixedPkScript,
|
||||
scriptPubKeySize: len(scriptPubkey),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the address string.
|
||||
func (a Address) String() string {
|
||||
return a.encoded
|
||||
}
|
||||
|
||||
// Type returns the address type.
|
||||
func (a Address) Type() AddressType {
|
||||
return a.encodedType
|
||||
}
|
||||
|
||||
// Decoded returns the btcutil.Address
|
||||
func (a Address) Decoded() btcutil.Address {
|
||||
return a.decoded
|
||||
}
|
||||
|
||||
// IsForNet returns whether or not the address is associated with the passed bitcoin network.
|
||||
func (a Address) IsForNet(net *chaincfg.Params) bool {
|
||||
return a.decoded.IsForNet(net)
|
||||
}
|
||||
|
||||
// ScriptAddress returns the raw bytes of the address to be used when inserting the address into a txout's script.
|
||||
func (a Address) ScriptAddress() []byte {
|
||||
return a.decoded.ScriptAddress()
|
||||
}
|
||||
|
||||
// Net returns the address network params.
|
||||
func (a Address) Net() *chaincfg.Params {
|
||||
return a.net
|
||||
}
|
||||
|
||||
// NetworkName
|
||||
func (a Address) NetworkName() string {
|
||||
return a.net.Name
|
||||
}
|
||||
|
||||
// ScriptPubKey or pubkey script
|
||||
func (a Address) ScriptPubKey() []byte {
|
||||
return a.scriptPubKey[:a.scriptPubKeySize]
|
||||
}
|
||||
|
||||
// Equal return true if addresses are equal
|
||||
func (a Address) Equal(b Address) bool {
|
||||
return a.encoded == b.encoded
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
func (a Address) MarshalText() ([]byte, error) {
|
||||
return []byte(a.encoded), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
func (a *Address) UnmarshalText(input []byte) error {
|
||||
address := string(input)
|
||||
addr, err := SafeNewAddress(address)
|
||||
if err == nil {
|
||||
*a = addr
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(errs.InvalidArgument, "invalid address `%s`", address)
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (a Address) MarshalJSON() ([]byte, error) {
|
||||
t, err := a.MarshalText()
|
||||
if err != nil {
|
||||
return nil, &json.MarshalerError{Type: reflect.TypeOf(a), Err: err}
|
||||
}
|
||||
b := make([]byte, len(t)+2)
|
||||
b[0], b[len(b)-1] = '"', '"' // add quotes
|
||||
copy(b[1:], t)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON parses a hash in hex syntax.
|
||||
func (a *Address) UnmarshalJSON(input []byte) error {
|
||||
if !(len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"') {
|
||||
return &json.UnmarshalTypeError{Value: "non-string", Type: reflect.TypeOf(Address{})}
|
||||
}
|
||||
if err := a.UnmarshalText(input[1 : len(input)-1]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseAddress(address string, params *chaincfg.Params) (btcutil.Address, AddressType, error) {
|
||||
decoded, err := btcutil.DecodeAddress(address, params)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "can't decode address `%s` for network `%s`", address, params.Name)
|
||||
}
|
||||
|
||||
switch decoded.(type) {
|
||||
case *btcutil.AddressWitnessPubKeyHash:
|
||||
return decoded, AddressP2WPKH, nil
|
||||
case *btcutil.AddressTaproot:
|
||||
return decoded, AddressP2TR, nil
|
||||
case *btcutil.AddressScriptHash:
|
||||
return decoded, AddressP2SH, nil
|
||||
case *btcutil.AddressPubKeyHash:
|
||||
return decoded, AddressP2PKH, nil
|
||||
case *btcutil.AddressWitnessScriptHash:
|
||||
return decoded, AddressP2WSH, nil
|
||||
default:
|
||||
return nil, 0, errors.Wrap(errs.Unsupported, "unsupported address type")
|
||||
}
|
||||
}
|
||||
80
pkg/btcutils/address_bench_test.go
Normal file
80
pkg/btcutils/address_bench_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package btcutils_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/gaze-network/indexer-network/pkg/btcutils"
|
||||
)
|
||||
|
||||
/*
|
||||
NOTE:
|
||||
|
||||
# Compare this benchmark to go-ethereum/common.Address utils
|
||||
- go-ethereum/common.HexToAddress speed: 45 ns/op, 48 B/op, 1 allocs/op
|
||||
- go-ethereum/common.IsHexAddress speed: 25 ns/op, 0 B/op, 0 allocs/op
|
||||
|
||||
It's slower than go-ethereum/common.Address utils because ethereum wallet address is Hex string 20 bytes,
|
||||
but Bitcoin has many types of address and each type has complex algorithm to solve (can't solve and validate address type directly from address string)
|
||||
|
||||
20/Jan/2024 @Planxnx Macbook Air M1 16GB
|
||||
BenchmarkIsAddress/specific-network/mainnet/P2WPKH-8 1776146 625.6 ns/op 120 B/op 3 allocs/op
|
||||
BenchmarkIsAddress/specific-network/testnet3/P2WPKH-8 1917876 623.2 ns/op 120 B/op 3 allocs/op
|
||||
BenchmarkIsAddress/specific-network/mainnet/P2TR-8 1330348 915.4 ns/op 160 B/op 3 allocs/op
|
||||
BenchmarkIsAddress/specific-network/testnet3/P2TR-8 1235806 931.1 ns/op 160 B/op 3 allocs/op
|
||||
BenchmarkIsAddress/specific-network/mainnet/P2WSH-8 1261730 960.9 ns/op 160 B/op 3 allocs/op
|
||||
BenchmarkIsAddress/specific-network/testnet3/P2WSH-8 1307851 916.1 ns/op 160 B/op 3 allocs/op
|
||||
BenchmarkIsAddress/specific-network/mainnet/P2SH-8 3081762 402.0 ns/op 192 B/op 8 allocs/op
|
||||
BenchmarkIsAddress/specific-network/testnet3/P2SH-8 3245838 344.9 ns/op 176 B/op 7 allocs/op
|
||||
BenchmarkIsAddress/specific-network/mainnet/P2PKH-8 2904252 410.4 ns/op 184 B/op 8 allocs/op
|
||||
BenchmarkIsAddress/specific-network/testnet3/P2PKH-8 3522332 342.8 ns/op 176 B/op 7 allocs/op
|
||||
BenchmarkIsAddress/automate-network/mainnet/P2WPKH-8 1882059 637.6 ns/op 120 B/op 3 allocs/op
|
||||
BenchmarkIsAddress/automate-network/testnet3/P2WPKH-8 1626151 664.8 ns/op 120 B/op 3 allocs/op
|
||||
BenchmarkIsAddress/automate-network/mainnet/P2TR-8 1250253 952.1 ns/op 160 B/op 3 allocs/op
|
||||
BenchmarkIsAddress/automate-network/testnet3/P2TR-8 1257901 993.7 ns/op 160 B/op 3 allocs/op
|
||||
BenchmarkIsAddress/automate-network/mainnet/P2WSH-8 1000000 1005 ns/op 160 B/op 3 allocs/op
|
||||
BenchmarkIsAddress/automate-network/testnet3/P2WSH-8 1209108 971.2 ns/op 160 B/op 3 allocs/op
|
||||
BenchmarkIsAddress/automate-network/mainnet/P2SH-8 1869075 625.0 ns/op 268 B/op 9 allocs/op
|
||||
BenchmarkIsAddress/automate-network/testnet3/P2SH-8 779496 1609 ns/op 694 B/op 17 allocs/op
|
||||
BenchmarkIsAddress/automate-network/mainnet/P2PKH-8 1924058 650.6 ns/op 259 B/op 9 allocs/op
|
||||
BenchmarkIsAddress/automate-network/testnet3/P2PKH-8 721510 1690 ns/op 694 B/op 17 allocs/op
|
||||
*/
|
||||
func BenchmarkIsAddress(b *testing.B) {
|
||||
cases := []btcutils.Address{
|
||||
/* P2WPKH */ btcutils.NewAddress("bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh", &chaincfg.MainNetParams),
|
||||
/* P2WPKH */ btcutils.NewAddress("tb1qfpgdxtpl7kz5qdus2pmexyjaza99c28qd6ltey", &chaincfg.TestNet3Params),
|
||||
/* P2TR */ btcutils.NewAddress("bc1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qvz5d38", &chaincfg.MainNetParams),
|
||||
/* P2TR */ btcutils.NewAddress("tb1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qm2zztg", &chaincfg.TestNet3Params),
|
||||
/* P2WSH */ btcutils.NewAddress("bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak", &chaincfg.MainNetParams),
|
||||
/* P2WSH */ btcutils.NewAddress("tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7", &chaincfg.TestNet3Params),
|
||||
/* P2SH */ btcutils.NewAddress("3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw", &chaincfg.MainNetParams),
|
||||
/* P2SH */ btcutils.NewAddress("2NCxMvHPTduZcCuUeAiWUpuwHga7Y66y9XJ", &chaincfg.TestNet3Params),
|
||||
/* P2PKH */ btcutils.NewAddress("1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH", &chaincfg.MainNetParams),
|
||||
/* P2PKH */ btcutils.NewAddress("migbBPcDajPfffrhoLpYFTQNXQFbWbhpz3", &chaincfg.TestNet3Params),
|
||||
}
|
||||
|
||||
b.Run("specific-network", func(b *testing.B) {
|
||||
for _, c := range cases {
|
||||
b.Run(c.NetworkName()+"/"+c.Type().String(), func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = btcutils.IsAddress(c.String(), c.Net())
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("automate-network", func(b *testing.B) {
|
||||
for _, c := range cases {
|
||||
b.Run(c.NetworkName()+"/"+c.Type().String(), func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ok := btcutils.IsAddress(c.String())
|
||||
if !ok {
|
||||
b.Error("IsAddress returned false")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
449
pkg/btcutils/address_test.go
Normal file
449
pkg/btcutils/address_test.go
Normal file
@@ -0,0 +1,449 @@
|
||||
package btcutils_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/gaze-network/indexer-network/pkg/btcutils"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetAddressType(t *testing.T) {
|
||||
type Spec struct {
|
||||
Address string
|
||||
DefaultNet *chaincfg.Params
|
||||
|
||||
ExpectedError error
|
||||
ExpectedAddressType btcutils.AddressType
|
||||
}
|
||||
|
||||
specs := []Spec{
|
||||
{
|
||||
Address: "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2WPKH,
|
||||
},
|
||||
{
|
||||
Address: "tb1qfpgdxtpl7kz5qdus2pmexyjaza99c28qd6ltey",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2WPKH,
|
||||
},
|
||||
{
|
||||
Address: "bc1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qvz5d38",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2TR,
|
||||
},
|
||||
{
|
||||
Address: "tb1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qm2zztg",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2TR,
|
||||
},
|
||||
{
|
||||
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2SH,
|
||||
},
|
||||
{
|
||||
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2PKH,
|
||||
},
|
||||
{
|
||||
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2WSH,
|
||||
},
|
||||
{
|
||||
Address: "migbBPcDajPfffrhoLpYFTQNXQFbWbhpz3",
|
||||
DefaultNet: &chaincfg.TestNet3Params,
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2PKH,
|
||||
},
|
||||
{
|
||||
Address: "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2WSH,
|
||||
},
|
||||
{
|
||||
Address: "2NCxMvHPTduZcCuUeAiWUpuwHga7Y66y9XJ",
|
||||
DefaultNet: &chaincfg.TestNet3Params,
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2SH,
|
||||
},
|
||||
}
|
||||
|
||||
for _, spec := range specs {
|
||||
t.Run(fmt.Sprintf("address:%s", spec.Address), func(t *testing.T) {
|
||||
actualAddressType, actualError := btcutils.GetAddressType(spec.Address, spec.DefaultNet)
|
||||
if spec.ExpectedError != nil {
|
||||
assert.ErrorIs(t, actualError, spec.ExpectedError)
|
||||
} else {
|
||||
assert.Equal(t, spec.ExpectedAddressType, actualAddressType)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewAddress(t *testing.T) {
|
||||
type Spec struct {
|
||||
Address string
|
||||
DefaultNet *chaincfg.Params
|
||||
|
||||
ExpectedAddressType btcutils.AddressType
|
||||
}
|
||||
|
||||
specs := []Spec{
|
||||
{
|
||||
Address: "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh",
|
||||
// DefaultNet: &chaincfg.MainNetParams, // Optional
|
||||
|
||||
ExpectedAddressType: btcutils.AddressP2WPKH,
|
||||
},
|
||||
{
|
||||
Address: "tb1qfpgdxtpl7kz5qdus2pmexyjaza99c28qd6ltey",
|
||||
// DefaultNet: &chaincfg.MainNetParams, // Optional
|
||||
|
||||
ExpectedAddressType: btcutils.AddressP2WPKH,
|
||||
},
|
||||
{
|
||||
Address: "bc1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qvz5d38",
|
||||
// DefaultNet: &chaincfg.MainNetParams, // Optional
|
||||
|
||||
ExpectedAddressType: btcutils.AddressP2TR,
|
||||
},
|
||||
{
|
||||
Address: "tb1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qm2zztg",
|
||||
// DefaultNet: &chaincfg.MainNetParams, // Optional
|
||||
|
||||
ExpectedAddressType: btcutils.AddressP2TR,
|
||||
},
|
||||
{
|
||||
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
|
||||
// DefaultNet: &chaincfg.MainNetParams, // Optional
|
||||
|
||||
ExpectedAddressType: btcutils.AddressP2WSH,
|
||||
},
|
||||
{
|
||||
Address: "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
|
||||
// DefaultNet: &chaincfg.MainNetParams, // Optional
|
||||
|
||||
ExpectedAddressType: btcutils.AddressP2WSH,
|
||||
},
|
||||
{
|
||||
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
|
||||
ExpectedAddressType: btcutils.AddressP2SH,
|
||||
},
|
||||
{
|
||||
Address: "2NCxMvHPTduZcCuUeAiWUpuwHga7Y66y9XJ",
|
||||
DefaultNet: &chaincfg.TestNet3Params,
|
||||
|
||||
ExpectedAddressType: btcutils.AddressP2SH,
|
||||
},
|
||||
{
|
||||
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
|
||||
ExpectedAddressType: btcutils.AddressP2PKH,
|
||||
},
|
||||
{
|
||||
Address: "migbBPcDajPfffrhoLpYFTQNXQFbWbhpz3",
|
||||
DefaultNet: &chaincfg.TestNet3Params,
|
||||
|
||||
ExpectedAddressType: btcutils.AddressP2PKH,
|
||||
},
|
||||
}
|
||||
|
||||
for _, spec := range specs {
|
||||
t.Run(fmt.Sprintf("address:%s,type:%s", spec.Address, spec.ExpectedAddressType), func(t *testing.T) {
|
||||
addr := btcutils.NewAddress(spec.Address, spec.DefaultNet)
|
||||
|
||||
assert.Equal(t, spec.ExpectedAddressType, addr.Type())
|
||||
assert.Equal(t, spec.Address, addr.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsAddress(t *testing.T) {
|
||||
type Spec struct {
|
||||
Address string
|
||||
Expected bool
|
||||
}
|
||||
|
||||
specs := []Spec{
|
||||
{
|
||||
Address: "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh",
|
||||
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
Address: "tb1qfpgdxtpl7kz5qdus2pmexyjaza99c28qd6ltey",
|
||||
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
Address: "bc1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qvz5d38",
|
||||
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
Address: "tb1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qm2zztg",
|
||||
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
|
||||
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
Address: "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
|
||||
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
|
||||
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
Address: "2NCxMvHPTduZcCuUeAiWUpuwHga7Y66y9XJ",
|
||||
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
|
||||
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
Address: "migbBPcDajPfffrhoLpYFTQNXQFbWbhpz3",
|
||||
|
||||
Expected: true,
|
||||
},
|
||||
{
|
||||
Address: "",
|
||||
|
||||
Expected: false,
|
||||
},
|
||||
{
|
||||
Address: "migbBPcDajPfffrhoLpYFTQNXQFbWbhpz2",
|
||||
|
||||
Expected: false,
|
||||
},
|
||||
{
|
||||
Address: "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczz",
|
||||
|
||||
Expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, spec := range specs {
|
||||
t.Run(fmt.Sprintf("address:%s", spec.Address), func(t *testing.T) {
|
||||
ok := btcutils.IsAddress(spec.Address)
|
||||
assert.Equal(t, spec.Expected, ok)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddressEncoding(t *testing.T) {
|
||||
rawAddress := "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh"
|
||||
address := btcutils.NewAddress(rawAddress, &chaincfg.MainNetParams)
|
||||
|
||||
type Spec struct {
|
||||
Data interface{}
|
||||
Expected string
|
||||
}
|
||||
|
||||
specs := []Spec{
|
||||
{
|
||||
Data: address,
|
||||
Expected: fmt.Sprintf(`"%s"`, rawAddress),
|
||||
},
|
||||
{
|
||||
Data: map[string]interface{}{
|
||||
"address": rawAddress,
|
||||
},
|
||||
Expected: fmt.Sprintf(`{"address":"%s"}`, rawAddress),
|
||||
},
|
||||
}
|
||||
|
||||
for i, spec := range specs {
|
||||
t.Run(fmt.Sprint(i+1), func(t *testing.T) {
|
||||
actual, err := json.Marshal(spec.Data)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, spec.Expected, string(actual))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddressDecoding(t *testing.T) {
|
||||
rawAddress := "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh"
|
||||
address := btcutils.NewAddress(rawAddress, &chaincfg.MainNetParams)
|
||||
|
||||
// Case #1: address is a string
|
||||
t.Run("from_string", func(t *testing.T) {
|
||||
input := fmt.Sprintf(`"%s"`, rawAddress)
|
||||
expected := address
|
||||
actual := btcutils.Address{}
|
||||
|
||||
err := json.Unmarshal([]byte(input), &actual)
|
||||
if !assert.NoError(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
assert.Equal(t, expected, actual)
|
||||
})
|
||||
|
||||
// Case #2: address is a field of a struct
|
||||
t.Run("from_field_string", func(t *testing.T) {
|
||||
type Data struct {
|
||||
Address btcutils.Address `json:"address"`
|
||||
}
|
||||
input := fmt.Sprintf(`{"address":"%s"}`, rawAddress)
|
||||
expected := Data{Address: address}
|
||||
actual := Data{}
|
||||
err := json.Unmarshal([]byte(input), &actual)
|
||||
if !assert.NoError(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
assert.Equal(t, expected, actual)
|
||||
})
|
||||
|
||||
// Case #3: address is an element of an array
|
||||
t.Run("from_array", func(t *testing.T) {
|
||||
input := fmt.Sprintf(`["%s"]`, rawAddress)
|
||||
expected := []btcutils.Address{address}
|
||||
actual := []btcutils.Address{}
|
||||
err := json.Unmarshal([]byte(input), &actual)
|
||||
if !assert.NoError(t, err) {
|
||||
t.FailNow()
|
||||
}
|
||||
assert.Equal(t, expected, actual)
|
||||
})
|
||||
|
||||
// Case #4: not supported address type
|
||||
t.Run("from_string/not_address", func(t *testing.T) {
|
||||
input := fmt.Sprintf(`"%s"`, "THIS_IS_NOT_SUPPORTED_ADDRESS")
|
||||
actual := btcutils.Address{}
|
||||
err := json.Unmarshal([]byte(input), &actual)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
// Case #5: invalid field type
|
||||
t.Run("from_number", func(t *testing.T) {
|
||||
type Data struct {
|
||||
Address btcutils.Address `json:"address"`
|
||||
}
|
||||
input := fmt.Sprintf(`{"address":%d}`, 123)
|
||||
actual := Data{}
|
||||
err := json.Unmarshal([]byte(input), &actual)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAddressPkScript(t *testing.T) {
|
||||
anyErr := errors.New("any error")
|
||||
type Spec struct {
|
||||
Address string
|
||||
DefaultNet *chaincfg.Params
|
||||
ExpectedError error
|
||||
ExpectedPkScript string // hex encoded
|
||||
}
|
||||
specs := []Spec{
|
||||
{
|
||||
Address: "some_invalid_address",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: anyErr,
|
||||
ExpectedPkScript: "",
|
||||
},
|
||||
{
|
||||
// P2WPKH
|
||||
Address: "bc1qdx72th7e3z8zc5wdrdxweswfcne974pjneyjln",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "001469bca5dfd9888e2c51cd1b4cecc1c9c4f25f5432",
|
||||
},
|
||||
{
|
||||
// P2WPKH
|
||||
Address: "bc1q7cj6gz6t3d28qg7kxhrc7h5t3h0re34fqqalga",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "0014f625a40b4b8b547023d635c78f5e8b8dde3cc6a9",
|
||||
},
|
||||
{
|
||||
// P2TR
|
||||
Address: "bc1pfd0zw2jwlpn4xckpr3dxpt7x0gw6wetuftxvrc4dt2qgn9azjuus65fug6",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "51204b5e272a4ef8675362c11c5a60afc67a1da7657c4accc1e2ad5a808997a29739",
|
||||
},
|
||||
{
|
||||
// P2TR
|
||||
Address: "bc1pxpumml545tqum5afarzlmnnez2npd35nvf0j0vnrp88nemqsn54qle05sm",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "51203079bdfe95a2c1cdd3a9e8c5fdce7912a616c693625f27b26309cf3cec109d2a",
|
||||
},
|
||||
{
|
||||
// P2SH
|
||||
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "a91477e1a3d54f545d83869ae3a6b28b071422801d7b87",
|
||||
},
|
||||
{
|
||||
// P2PKH
|
||||
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "76a914cecb25b53809991c7beef2d27bc2be49e78c684388ac",
|
||||
},
|
||||
{
|
||||
// P2WSH
|
||||
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "0020cdbf909e935c855d3e8d1b61aeb9c5e3c03ae8021b286839b1a72f2e48fdba70",
|
||||
},
|
||||
}
|
||||
|
||||
for _, spec := range specs {
|
||||
t.Run(spec.Address, func(t *testing.T) {
|
||||
addr, err := btcutils.SafeNewAddress(spec.Address, spec.DefaultNet)
|
||||
if spec.ExpectedError != nil {
|
||||
if errors.Is(spec.ExpectedError, anyErr) {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.ErrorIs(t, err, spec.ExpectedError)
|
||||
}
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, spec.ExpectedPkScript, hex.EncodeToString(addr.ScriptPubKey()))
|
||||
})
|
||||
}
|
||||
}
|
||||
58
pkg/btcutils/btc.go
Normal file
58
pkg/btcutils/btc.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package btcutils
|
||||
|
||||
import (
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
)
|
||||
|
||||
var (
|
||||
// NullAddress is an address that script address is all zeros.
|
||||
NullAddress = NewAddress("1111111111111111111114oLvT2", &chaincfg.MainNetParams)
|
||||
|
||||
// NullHash is a hash that all bytes are zero.
|
||||
NullHash = utils.Must(chainhash.NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000"))
|
||||
)
|
||||
|
||||
// TransactionType is the type of bitcoin transaction
|
||||
// It's an alias of txscript.ScriptClass
|
||||
type TransactionType = txscript.ScriptClass
|
||||
|
||||
// AddressType is the type of bitcoin address.
|
||||
// It's an alias of txscript.ScriptClass
|
||||
type AddressType = txscript.ScriptClass
|
||||
|
||||
// Types of bitcoin transaction
|
||||
const (
|
||||
TransactionP2WPKH = txscript.WitnessV0PubKeyHashTy
|
||||
TransactionP2TR = txscript.WitnessV1TaprootTy
|
||||
TransactionTaproot = TransactionP2TR // Alias of P2TR
|
||||
TransactionP2SH = txscript.ScriptHashTy
|
||||
TransactionP2PKH = txscript.PubKeyHashTy
|
||||
TransactionP2WSH = txscript.WitnessV0ScriptHashTy
|
||||
)
|
||||
|
||||
// Types of bitcoin address
|
||||
const (
|
||||
AddressP2WPKH = txscript.WitnessV0PubKeyHashTy
|
||||
AddressP2TR = txscript.WitnessV1TaprootTy
|
||||
AddressTaproot = AddressP2TR // Alias of P2TR
|
||||
AddressP2SH = txscript.ScriptHashTy
|
||||
AddressP2PKH = txscript.PubKeyHashTy
|
||||
AddressP2WSH = txscript.WitnessV0ScriptHashTy
|
||||
)
|
||||
|
||||
// IsSupportType returns true if the given tx/address type is supported.
|
||||
func IsSupportType(t txscript.ScriptClass) bool {
|
||||
_, ok := supportedTypes[t]
|
||||
return ok
|
||||
}
|
||||
|
||||
var supportedTypes = map[txscript.ScriptClass]struct{}{
|
||||
txscript.WitnessV0PubKeyHashTy: {},
|
||||
txscript.WitnessV1TaprootTy: {},
|
||||
txscript.ScriptHashTy: {},
|
||||
txscript.PubKeyHashTy: {},
|
||||
txscript.WitnessV0ScriptHashTy: {},
|
||||
}
|
||||
23
pkg/btcutils/btc_network.go
Normal file
23
pkg/btcutils/btc_network.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package btcutils
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
)
|
||||
|
||||
var supportedNetworks = map[string]*chaincfg.Params{
|
||||
"mainnet": &chaincfg.MainNetParams,
|
||||
"testnet": &chaincfg.TestNet3Params,
|
||||
}
|
||||
|
||||
// IsSupportedNetwork returns true if the given network is supported.
|
||||
//
|
||||
// TODO: create enum for network
|
||||
func IsSupportedNetwork(network string) bool {
|
||||
_, ok := supportedNetworks[network]
|
||||
return ok
|
||||
}
|
||||
|
||||
// GetNetParams returns the *chaincfg.Params for the given network.
|
||||
func GetNetParams(network string) *chaincfg.Params {
|
||||
return supportedNetworks[network]
|
||||
}
|
||||
69
pkg/btcutils/pk_script.go
Normal file
69
pkg/btcutils/pk_script.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package btcutils
|
||||
|
||||
import (
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
)
|
||||
|
||||
// NewPkScript creates a pubkey script(or witness program) from the given address string
|
||||
//
|
||||
// see: https://en.bitcoin.it/wiki/Script
|
||||
func NewPkScript(address string, defaultNet ...*chaincfg.Params) ([]byte, error) {
|
||||
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
|
||||
decoded, _, err := parseAddress(address, net)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't parse address")
|
||||
}
|
||||
scriptPubkey, err := txscript.PayToAddrScript(decoded)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't get script pubkey")
|
||||
}
|
||||
return scriptPubkey, nil
|
||||
}
|
||||
|
||||
// GetAddressTypeFromPkScript returns the address type from the given pubkey script/script pubkey.
|
||||
func GetAddressTypeFromPkScript(pkScript []byte, defaultNet ...*chaincfg.Params) (AddressType, error) {
|
||||
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
|
||||
scriptClass, _, _, err := txscript.ExtractPkScriptAddrs(pkScript, net)
|
||||
if err != nil {
|
||||
return txscript.NonStandardTy, errors.Wrap(err, "can't parse pkScript")
|
||||
}
|
||||
return scriptClass, nil
|
||||
}
|
||||
|
||||
// ExtractAddressFromPkScript extracts address from the given pubkey script/script pubkey.
|
||||
// multi-signature script not supported
|
||||
func ExtractAddressFromPkScript(pkScript []byte, defaultNet ...*chaincfg.Params) (Address, error) {
|
||||
if len(pkScript) == 0 {
|
||||
return Address{}, errors.New("empty pkScript")
|
||||
}
|
||||
if pkScript[0] == txscript.OP_RETURN {
|
||||
return Address{}, errors.Wrap(errs.NotSupported, "OP_RETURN script")
|
||||
}
|
||||
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
|
||||
addrType, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, net)
|
||||
if err != nil {
|
||||
return Address{}, errors.Wrap(err, "can't parse pkScript")
|
||||
}
|
||||
if !IsSupportType(addrType) {
|
||||
return Address{}, errors.Wrapf(errs.NotSupported, "unsupported pkscript type %s", addrType)
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
return Address{}, errors.New("can't extract address from pkScript")
|
||||
}
|
||||
|
||||
fixedPkScript := [MaxSupportedPkScriptSize]byte{}
|
||||
copy(fixedPkScript[:], pkScript)
|
||||
|
||||
return Address{
|
||||
decoded: addrs[0],
|
||||
net: net,
|
||||
encoded: addrs[0].EncodeAddress(),
|
||||
encodedType: addrType,
|
||||
scriptPubKey: fixedPkScript,
|
||||
scriptPubKeySize: len(pkScript),
|
||||
}, nil
|
||||
}
|
||||
217
pkg/btcutils/pk_script_test.go
Normal file
217
pkg/btcutils/pk_script_test.go
Normal file
@@ -0,0 +1,217 @@
|
||||
package btcutils_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/pkg/btcutils"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewPkScript(t *testing.T) {
|
||||
anyError := errors.New("any error")
|
||||
|
||||
type Spec struct {
|
||||
Address string
|
||||
DefaultNet *chaincfg.Params
|
||||
ExpectedError error
|
||||
ExpectedPkScript string // hex encoded
|
||||
}
|
||||
|
||||
specs := []Spec{
|
||||
{
|
||||
Address: "some_invalid_address",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: anyError,
|
||||
ExpectedPkScript: "",
|
||||
},
|
||||
{
|
||||
// P2WPKH
|
||||
Address: "bc1qdx72th7e3z8zc5wdrdxweswfcne974pjneyjln",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "001469bca5dfd9888e2c51cd1b4cecc1c9c4f25f5432",
|
||||
},
|
||||
{
|
||||
// P2WPKH
|
||||
Address: "bc1q7cj6gz6t3d28qg7kxhrc7h5t3h0re34fqqalga",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "0014f625a40b4b8b547023d635c78f5e8b8dde3cc6a9",
|
||||
},
|
||||
{
|
||||
// P2TR
|
||||
Address: "bc1pfd0zw2jwlpn4xckpr3dxpt7x0gw6wetuftxvrc4dt2qgn9azjuus65fug6",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "51204b5e272a4ef8675362c11c5a60afc67a1da7657c4accc1e2ad5a808997a29739",
|
||||
},
|
||||
{
|
||||
// P2TR
|
||||
Address: "bc1pxpumml545tqum5afarzlmnnez2npd35nvf0j0vnrp88nemqsn54qle05sm",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "51203079bdfe95a2c1cdd3a9e8c5fdce7912a616c693625f27b26309cf3cec109d2a",
|
||||
},
|
||||
{
|
||||
// P2SH
|
||||
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "a91477e1a3d54f545d83869ae3a6b28b071422801d7b87",
|
||||
},
|
||||
{
|
||||
// P2PKH
|
||||
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "76a914cecb25b53809991c7beef2d27bc2be49e78c684388ac",
|
||||
},
|
||||
{
|
||||
// P2WSH
|
||||
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
|
||||
DefaultNet: &chaincfg.MainNetParams,
|
||||
ExpectedError: nil,
|
||||
ExpectedPkScript: "0020cdbf909e935c855d3e8d1b61aeb9c5e3c03ae8021b286839b1a72f2e48fdba70",
|
||||
},
|
||||
}
|
||||
|
||||
for _, spec := range specs {
|
||||
t.Run(fmt.Sprintf("address:%s", spec.Address), func(t *testing.T) {
|
||||
// Validate Expected PkScript
|
||||
if spec.ExpectedError == nil {
|
||||
{
|
||||
expectedPkScriptRaw, err := hex.DecodeString(spec.ExpectedPkScript)
|
||||
if err != nil {
|
||||
t.Fatalf("can't decode expected pkscript %s, Reason: %s", spec.ExpectedPkScript, err)
|
||||
}
|
||||
expectedPkScript, err := txscript.ParsePkScript(expectedPkScriptRaw)
|
||||
if err != nil {
|
||||
t.Fatalf("invalid expected pkscript %s, Reason: %s", spec.ExpectedPkScript, err)
|
||||
}
|
||||
|
||||
expectedAddress, err := expectedPkScript.Address(spec.DefaultNet)
|
||||
if err != nil {
|
||||
t.Fatalf("can't get address from expected pkscript %s, Reason: %s", spec.ExpectedPkScript, err)
|
||||
}
|
||||
assert.Equal(t, spec.Address, expectedAddress.EncodeAddress())
|
||||
}
|
||||
{
|
||||
address, err := btcutil.DecodeAddress(spec.Address, spec.DefaultNet)
|
||||
if err != nil {
|
||||
t.Fatalf("can't decode address %s(%s),Reason: %s", spec.Address, spec.DefaultNet.Name, err)
|
||||
}
|
||||
|
||||
pkScript, err := txscript.PayToAddrScript(address)
|
||||
if err != nil {
|
||||
t.Fatalf("can't get pkscript from address %s(%s),Reason: %s", spec.Address, spec.DefaultNet.Name, err)
|
||||
}
|
||||
|
||||
pkScriptStr := hex.EncodeToString(pkScript)
|
||||
assert.Equal(t, spec.ExpectedPkScript, pkScriptStr)
|
||||
}
|
||||
}
|
||||
|
||||
pkScript, err := btcutils.NewPkScript(spec.Address, spec.DefaultNet)
|
||||
if spec.ExpectedError == anyError {
|
||||
assert.Error(t, err)
|
||||
} else if spec.ExpectedError != nil {
|
||||
assert.ErrorIs(t, err, spec.ExpectedError)
|
||||
} else {
|
||||
address, err := btcutils.SafeNewAddress(spec.Address, spec.DefaultNet)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create address %s(%s),Reason: %s", spec.Address, spec.DefaultNet.Name, err)
|
||||
}
|
||||
|
||||
// ScriptPubKey from address and from NewPkScript should be the same
|
||||
assert.Equal(t, address.ScriptPubKey(), pkScript)
|
||||
|
||||
// Expected PkScript and New PkScript should be the same
|
||||
pkScriptStr := hex.EncodeToString(pkScript)
|
||||
assert.Equal(t, spec.ExpectedPkScript, pkScriptStr)
|
||||
|
||||
// Can convert PkScript back to same address
|
||||
acualPkScript, err := txscript.ParsePkScript(address.ScriptPubKey())
|
||||
if !assert.NoError(t, err) {
|
||||
t.Fail()
|
||||
}
|
||||
assert.Equal(t, address.Decoded().String(), utils.Must(acualPkScript.Address(spec.DefaultNet)).String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAddressTypeFromPkScript(t *testing.T) {
|
||||
type Spec struct {
|
||||
PubkeyScript string
|
||||
|
||||
ExpectedError error
|
||||
ExpectedAddressType btcutils.AddressType
|
||||
}
|
||||
|
||||
specs := []Spec{
|
||||
{
|
||||
PubkeyScript: "0014602181cc89f7c9f54cb6d7607a3445e3e022895d",
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2WPKH,
|
||||
},
|
||||
{
|
||||
PubkeyScript: "5120ef8d59038dd51093fbfff794f658a07a3697b94d9e6d24e45b28abd88f10e33d",
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2TR,
|
||||
},
|
||||
{
|
||||
PubkeyScript: "a91416eef7e84fb9821db1341b6ccef1c4a4e5ec21e487",
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2SH,
|
||||
},
|
||||
{
|
||||
PubkeyScript: "76a914cecb25b53809991c7beef2d27bc2be49e78c684388ac",
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2PKH,
|
||||
},
|
||||
{
|
||||
PubkeyScript: "0020cdbf909e935c855d3e8d1b61aeb9c5e3c03ae8021b286839b1a72f2e48fdba70",
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2WSH,
|
||||
},
|
||||
{
|
||||
PubkeyScript: "0020cdbf909e935c855d3e8d1b61aeb9c5e3c03ae8021b286839b1a72f2e48fdba70",
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: btcutils.AddressP2WSH,
|
||||
},
|
||||
{
|
||||
PubkeyScript: "6a5d0614c0a2331441",
|
||||
|
||||
ExpectedError: nil,
|
||||
ExpectedAddressType: txscript.NonStandardTy,
|
||||
},
|
||||
}
|
||||
|
||||
for _, spec := range specs {
|
||||
t.Run(fmt.Sprintf("PkScript:%s", spec.PubkeyScript), func(t *testing.T) {
|
||||
pkScript, err := hex.DecodeString(spec.PubkeyScript)
|
||||
if err != nil {
|
||||
t.Fail()
|
||||
}
|
||||
actualAddressType, actualError := btcutils.GetAddressTypeFromPkScript(pkScript)
|
||||
if spec.ExpectedError != nil {
|
||||
assert.ErrorIs(t, actualError, spec.ExpectedError)
|
||||
} else {
|
||||
assert.Equal(t, spec.ExpectedAddressType, actualAddressType)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
92
pkg/btcutils/psbtutils/encoding.go
Normal file
92
pkg/btcutils/psbtutils/encoding.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package psbtutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/btcsuite/btcd/btcutil/psbt"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
)
|
||||
|
||||
const (
|
||||
// default psbt encoding is hex
|
||||
DefaultEncoding = EncodingHex
|
||||
)
|
||||
|
||||
type Encoding string
|
||||
|
||||
const (
|
||||
EncodingBase64 Encoding = "base64"
|
||||
EncodingHex Encoding = "hex"
|
||||
)
|
||||
|
||||
// DecodeString decodes a psbt hex/base64 string into a psbt.Packet
|
||||
//
|
||||
// encoding is optional, default is EncodingHex
|
||||
func DecodeString(psbtStr string, encoding ...Encoding) (*psbt.Packet, error) {
|
||||
pC, err := Decode([]byte(psbtStr), encoding...)
|
||||
return pC, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Decode decodes a psbt hex/base64 byte into a psbt.Packet
|
||||
//
|
||||
// encoding is optional, default is EncodingHex
|
||||
func Decode(psbtB []byte, encoding ...Encoding) (*psbt.Packet, error) {
|
||||
enc, ok := utils.Optional(encoding)
|
||||
if !ok {
|
||||
enc = DefaultEncoding
|
||||
}
|
||||
|
||||
var (
|
||||
psbtBytes []byte
|
||||
err error
|
||||
)
|
||||
|
||||
switch enc {
|
||||
case EncodingBase64, "b64":
|
||||
psbtBytes = make([]byte, base64.StdEncoding.DecodedLen(len(psbtB)))
|
||||
_, err = base64.StdEncoding.Decode(psbtBytes, psbtB)
|
||||
case EncodingHex:
|
||||
psbtBytes = make([]byte, hex.DecodedLen(len(psbtB)))
|
||||
_, err = hex.Decode(psbtBytes, psbtB)
|
||||
default:
|
||||
return nil, errors.Wrap(errs.Unsupported, "invalid encoding")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't decode psbt string")
|
||||
}
|
||||
|
||||
pC, err := psbt.NewFromRawBytes(bytes.NewReader(psbtBytes), false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't create psbt from given psbt")
|
||||
}
|
||||
|
||||
return pC, nil
|
||||
}
|
||||
|
||||
// EncodeToString encodes a psbt.Packet into a psbt hex/base64 string
|
||||
//
|
||||
// encoding is optional, default is EncodingHex
|
||||
func EncodeToString(pC *psbt.Packet, encoding ...Encoding) (string, error) {
|
||||
enc, ok := utils.Optional(encoding)
|
||||
if !ok {
|
||||
enc = DefaultEncoding
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := pC.Serialize(&buf); err != nil {
|
||||
return "", errors.Wrap(err, "can't serialize psbt")
|
||||
}
|
||||
|
||||
switch enc {
|
||||
case EncodingBase64, "b64":
|
||||
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
|
||||
case EncodingHex:
|
||||
return hex.EncodeToString(buf.Bytes()), nil
|
||||
default:
|
||||
return "", errors.Wrap(errs.Unsupported, "invalid encoding")
|
||||
}
|
||||
}
|
||||
110
pkg/btcutils/psbtutils/fee.go
Normal file
110
pkg/btcutils/psbtutils/fee.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package psbtutils
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil/psbt"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/pkg/btcutils"
|
||||
)
|
||||
|
||||
// TxFee returns satoshis fee of a transaction given the fee rate (sat/vB)
|
||||
// and the number of inputs and outputs.
|
||||
func TxFee(feeRate int64, p *psbt.Packet) (int64, error) {
|
||||
size, err := PSBTSize(p)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "psbt size")
|
||||
}
|
||||
return int64(math.Ceil(size * float64(feeRate))), nil
|
||||
}
|
||||
|
||||
func PredictTxFee(feeRate int64, inputs, outputs int) int64 {
|
||||
/**
|
||||
TODO: handle edge cases like:
|
||||
1. when we predict that we need to use unnecessary UTXOs
|
||||
2. when we predict that we need to use more value than user have, but user do have enough for the actual transaction
|
||||
|
||||
Idea for solving this:
|
||||
- When trying to find the best UTXOs to use, we:
|
||||
- Will not reject when user's balance is not enough, instead we will return all UTXOs even if it's not enough.
|
||||
- Will be okay returning excessive UTXOs (say we predict we need 10K satoshis, but actually we only need 5K satoshis, then we will return UTXOs enough for 10K satoshis)
|
||||
- And then we:
|
||||
- Construct the actual PSBT, then select UTXOs to use accordingly,
|
||||
- If the user's balance is not enough, then we will return an error,
|
||||
- Or if when we predict we expect to use more UTXOs than the actual transaction, then we will just use what's needed.
|
||||
*/
|
||||
size := defaultOverhead + 148*float64(inputs) + 43*float64(outputs)
|
||||
return int64(math.Ceil(size * float64(feeRate)))
|
||||
}
|
||||
|
||||
type txSize struct {
|
||||
Overhead float64
|
||||
Inputs float64
|
||||
Outputs float64
|
||||
}
|
||||
|
||||
const defaultOverhead = 10.5
|
||||
|
||||
// Transaction Virtual Sizes Bytes
|
||||
//
|
||||
// Reference: https://bitcoinops.org/en/tools/calc-size/
|
||||
var txSizes = map[btcutils.TransactionType]txSize{
|
||||
btcutils.TransactionP2WPKH: {
|
||||
Inputs: 68,
|
||||
Outputs: 31,
|
||||
},
|
||||
btcutils.TransactionP2TR: {
|
||||
Inputs: 57.5,
|
||||
Outputs: 43,
|
||||
},
|
||||
btcutils.TransactionP2SH: {
|
||||
Inputs: 91,
|
||||
Outputs: 32,
|
||||
},
|
||||
btcutils.TransactionP2PKH: {
|
||||
Inputs: 148,
|
||||
Outputs: 34,
|
||||
},
|
||||
btcutils.TransactionP2WSH: {
|
||||
Inputs: 104.5,
|
||||
Outputs: 43,
|
||||
},
|
||||
}
|
||||
|
||||
func PSBTSize(psbt *psbt.Packet) (float64, error) {
|
||||
if err := psbt.SanityCheck(); err != nil {
|
||||
return 0, errors.Wrap(errors.Join(err, errs.InvalidArgument), "psbt sanity check")
|
||||
}
|
||||
|
||||
inputs := map[btcutils.TransactionType]int{}
|
||||
outputs := map[btcutils.TransactionType]int{}
|
||||
|
||||
for _, input := range psbt.Inputs {
|
||||
addrType, err := btcutils.GetAddressTypeFromPkScript(input.WitnessUtxo.PkScript)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "get address type from pk script")
|
||||
}
|
||||
inputs[addrType]++
|
||||
}
|
||||
|
||||
for _, output := range psbt.UnsignedTx.TxOut {
|
||||
addrType, err := btcutils.GetAddressTypeFromPkScript(output.PkScript)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "get address type from pk script")
|
||||
}
|
||||
outputs[addrType]++
|
||||
}
|
||||
|
||||
totalSize := defaultOverhead
|
||||
for txType, txSizeData := range txSizes {
|
||||
if inputCount, ok := inputs[txType]; ok {
|
||||
totalSize += txSizeData.Inputs * float64(inputCount)
|
||||
}
|
||||
if outputCount, ok := outputs[txType]; ok {
|
||||
totalSize += txSizeData.Outputs * float64(outputCount)
|
||||
}
|
||||
}
|
||||
|
||||
return totalSize, nil
|
||||
}
|
||||
131
pkg/btcutils/psbtutils/fee_test.go
Normal file
131
pkg/btcutils/psbtutils/fee_test.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package psbtutils_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/gaze-network/indexer-network/pkg/btcutils/psbtutils"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPSBTSize(t *testing.T) {
|
||||
type Spec struct {
|
||||
Name string
|
||||
PSBTString string
|
||||
ExpectedError error
|
||||
ExpectedSize float64
|
||||
}
|
||||
|
||||
specs := []Spec{
|
||||
{
|
||||
Name: "3-inputs-3-outputs-taproot",
|
||||
PSBTString: "70736274ff0100fd06010100000003866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910000000000ffffffff866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910100000000ffffffff866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910200000000ffffffff03b0040000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f22020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f4d370f00000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f000000000001012b58020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f0001012b58020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f0001012bcb3c0f00000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f00000000",
|
||||
ExpectedError: nil,
|
||||
ExpectedSize: 312,
|
||||
},
|
||||
{
|
||||
Name: "mixed-segwit-taproot",
|
||||
PSBTString: "70736274ff0100fd230202000000061f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90300000000ffffffff1f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90400000000ffffffff21c8ec368f2aff1a7baf4964e4070f52e7247ae39edfbda3976f8df4da1b72a00000000000ffffffff969e65b705e3d5071f1743a63381b3aa1ec31e1dbbbd63ab594a19ca399a58af0000000000ffffffffcca5cfd28bd6c54a851d97d029560b3047f7c6482fda7b2f2603d56ade8c95890000000000ffffffff1f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90500000000ffffffff0908070000000000001600144850d32c3ff585403790507793125d174a5c28e022020000000000001600144850d32c3ff585403790507793125d174a5c28e022020000000000001600144850d32c3ff585403790507793125d174a5c28e0b03600000000000016001459805fc1fdb9f05e190db569987c95c4f9deaa532a680000000000002251203a9ddeb6a2a327fed0f50d18778b28168e3ddb7fdfd4b05f4e438c9174d76a8d58020000000000001600144850d32c3ff585403790507793125d174a5c28e058020000000000001600144850d32c3ff585403790507793125d174a5c28e058020000000000001600144850d32c3ff585403790507793125d174a5c28e0b21f1e00000000001600144850d32c3ff585403790507793125d174a5c28e0000000000001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f220200000000000016001459805fc1fdb9f05e190db569987c95c4f9deaa53010304830000000001012b22020000000000002251203a9ddeb6a2a327fed0f50d18778b28168e3ddb7fdfd4b05f4e438c9174d76a8d010304830000000001011f06432000000000001600144850d32c3ff585403790507793125d174a5c28e000000000000000000000",
|
||||
ExpectedError: nil,
|
||||
ExpectedSize: 699,
|
||||
},
|
||||
{
|
||||
Name: "segwit-transfer-to-legacy",
|
||||
PSBTString: "70736274ff010074020000000124ba4becfc732f3b4729784a3dd0cc2494ae890d826377fd98aeb0607feb1ace0100000000ffffffff0210270000000000001976a91422bae94117be666b593916527d55bdaf030d756e88ac25f62e000000000016001476d1e072c9b8a18fa1e4be697c175e0c642026ac000000000001011fc51d2f000000000016001476d1e072c9b8a18fa1e4be697c175e0c642026ac01086b024730440220759df9d109298a1ef69b9faa1786f4118f0d4d63a68cd2061e217b6090573f62022053ffa117fc21e5bf20e7d16bb786de52dc0214c9a21af87b4e92a639ef66e997012103e0cb213a46a68b1f463a4858635ee44694ce4b512788833d629840341b1219c9000000",
|
||||
ExpectedError: nil,
|
||||
ExpectedSize: 143.5,
|
||||
},
|
||||
}
|
||||
|
||||
for _, spec := range specs {
|
||||
t.Run(spec.Name, func(t *testing.T) {
|
||||
p, err := psbtutils.DecodeString(spec.PSBTString)
|
||||
assert.NoError(t, err)
|
||||
size, err := psbtutils.PSBTSize(p)
|
||||
if spec.ExpectedError != nil {
|
||||
assert.ErrorIs(t, err, spec.ExpectedError)
|
||||
} else {
|
||||
assert.Equal(t, spec.ExpectedSize, size)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPredictTxFee(t *testing.T) {
|
||||
type Spec struct {
|
||||
FeeRate int64
|
||||
InputsCount int
|
||||
OutputsCount int
|
||||
ExpectedFee int64
|
||||
}
|
||||
|
||||
specs := []Spec{
|
||||
{
|
||||
FeeRate: 100,
|
||||
InputsCount: 1,
|
||||
OutputsCount: 1,
|
||||
ExpectedFee: int64(math.Ceil((10.5 + 148 + 43) * 100)),
|
||||
},
|
||||
{
|
||||
FeeRate: 1,
|
||||
InputsCount: 99,
|
||||
OutputsCount: 99,
|
||||
ExpectedFee: int64(math.Ceil((10.5 + (99 * 148) + (99 * 43)) * 1)),
|
||||
},
|
||||
}
|
||||
|
||||
for _, spec := range specs {
|
||||
t.Run(fmt.Sprintf("feeRate=%d:inputs=%d:outputs=%d", spec.FeeRate, spec.InputsCount, spec.OutputsCount), func(t *testing.T) {
|
||||
fee := psbtutils.PredictTxFee(spec.FeeRate, spec.InputsCount, spec.OutputsCount)
|
||||
assert.Equal(t, spec.ExpectedFee, fee)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxFee(t *testing.T) {
|
||||
type Spec struct {
|
||||
Name string
|
||||
FeeRate int64
|
||||
PSBTString string
|
||||
ExpectedError error
|
||||
ExpectedFee int64
|
||||
}
|
||||
|
||||
specs := []Spec{
|
||||
{
|
||||
Name: "3-inputs-3-outputs-taproot",
|
||||
FeeRate: 10,
|
||||
PSBTString: "70736274ff0100fd06010100000003866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910000000000ffffffff866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910100000000ffffffff866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910200000000ffffffff03b0040000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f22020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f4d370f00000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f000000000001012b58020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f0001012b58020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f0001012bcb3c0f00000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f00000000",
|
||||
ExpectedError: nil,
|
||||
ExpectedFee: 312 * 10,
|
||||
},
|
||||
{
|
||||
Name: "mixed-segwit-taproot",
|
||||
FeeRate: 20,
|
||||
PSBTString: "70736274ff0100fd230202000000061f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90300000000ffffffff1f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90400000000ffffffff21c8ec368f2aff1a7baf4964e4070f52e7247ae39edfbda3976f8df4da1b72a00000000000ffffffff969e65b705e3d5071f1743a63381b3aa1ec31e1dbbbd63ab594a19ca399a58af0000000000ffffffffcca5cfd28bd6c54a851d97d029560b3047f7c6482fda7b2f2603d56ade8c95890000000000ffffffff1f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90500000000ffffffff0908070000000000001600144850d32c3ff585403790507793125d174a5c28e022020000000000001600144850d32c3ff585403790507793125d174a5c28e022020000000000001600144850d32c3ff585403790507793125d174a5c28e0b03600000000000016001459805fc1fdb9f05e190db569987c95c4f9deaa532a680000000000002251203a9ddeb6a2a327fed0f50d18778b28168e3ddb7fdfd4b05f4e438c9174d76a8d58020000000000001600144850d32c3ff585403790507793125d174a5c28e058020000000000001600144850d32c3ff585403790507793125d174a5c28e058020000000000001600144850d32c3ff585403790507793125d174a5c28e0b21f1e00000000001600144850d32c3ff585403790507793125d174a5c28e0000000000001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f220200000000000016001459805fc1fdb9f05e190db569987c95c4f9deaa53010304830000000001012b22020000000000002251203a9ddeb6a2a327fed0f50d18778b28168e3ddb7fdfd4b05f4e438c9174d76a8d010304830000000001011f06432000000000001600144850d32c3ff585403790507793125d174a5c28e000000000000000000000",
|
||||
ExpectedError: nil,
|
||||
ExpectedFee: 699 * 20,
|
||||
},
|
||||
{
|
||||
Name: "segwit-transfer-to-legacy",
|
||||
FeeRate: 99,
|
||||
PSBTString: "70736274ff010074020000000124ba4becfc732f3b4729784a3dd0cc2494ae890d826377fd98aeb0607feb1ace0100000000ffffffff0210270000000000001976a91422bae94117be666b593916527d55bdaf030d756e88ac25f62e000000000016001476d1e072c9b8a18fa1e4be697c175e0c642026ac000000000001011fc51d2f000000000016001476d1e072c9b8a18fa1e4be697c175e0c642026ac01086b024730440220759df9d109298a1ef69b9faa1786f4118f0d4d63a68cd2061e217b6090573f62022053ffa117fc21e5bf20e7d16bb786de52dc0214c9a21af87b4e92a639ef66e997012103e0cb213a46a68b1f463a4858635ee44694ce4b512788833d629840341b1219c9000000",
|
||||
ExpectedError: nil,
|
||||
ExpectedFee: int64(math.Ceil((143.5) * 99)),
|
||||
},
|
||||
}
|
||||
|
||||
for _, spec := range specs {
|
||||
t.Run(spec.Name, func(t *testing.T) {
|
||||
p, err := psbtutils.DecodeString(spec.PSBTString)
|
||||
assert.NoError(t, err)
|
||||
fee, err := psbtutils.TxFee(spec.FeeRate, p)
|
||||
if spec.ExpectedError != nil {
|
||||
assert.ErrorIs(t, err, spec.ExpectedError)
|
||||
} else {
|
||||
assert.Equal(t, spec.ExpectedFee, fee)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
35
pkg/btcutils/psbtutils/is_ready.go
Normal file
35
pkg/btcutils/psbtutils/is_ready.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package psbtutils
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/btcutil/psbt"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func IsReadyPSBT(pC *psbt.Packet, feeRate int64) (bool, error) {
|
||||
// if input = output + fee then it's ready
|
||||
|
||||
// Calculate tx fee
|
||||
fee, err := TxFee(feeRate, pC)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "calculate fee")
|
||||
}
|
||||
|
||||
// sum total input and output
|
||||
totalInputValue := lo.SumBy(pC.Inputs, func(input psbt.PInput) int64 { return input.WitnessUtxo.Value })
|
||||
totalOutputValue := lo.SumBy(pC.UnsignedTx.TxOut, func(txout *wire.TxOut) int64 { return txout.Value }) + fee
|
||||
|
||||
// it's perfect match
|
||||
if totalInputValue == totalOutputValue {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// if input is more than output + fee but not more than 1000 satoshi,
|
||||
// then it's ready
|
||||
if totalInputValue > totalOutputValue && totalInputValue-totalOutputValue < 1000 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
21
pkg/btcutils/signature.go
Normal file
21
pkg/btcutils/signature.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package btcutils
|
||||
|
||||
import (
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
verifier "github.com/bitonicnl/verify-signed-message/pkg"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/cockroachdb/errors"
|
||||
)
|
||||
|
||||
func VerifySignature(address string, message string, sigBase64 string, defaultNet ...*chaincfg.Params) error {
|
||||
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
|
||||
_, err := verifier.VerifyWithChain(verifier.SignedMessage{
|
||||
Address: address,
|
||||
Message: message,
|
||||
Signature: sigBase64,
|
||||
}, net)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
69
pkg/btcutils/signature_test.go
Normal file
69
pkg/btcutils/signature_test.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package btcutils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestVerifySignature(t *testing.T) {
|
||||
{
|
||||
message := "Test123"
|
||||
address := "18J72YSM9pKLvyXX1XAjFXA98zeEvxBYmw"
|
||||
signature := "Gzhfsw0ItSrrTCChykFhPujeTyAcvVxiXwywxpHmkwFiKuUR2ETbaoFcocmcSshrtdIjfm8oXlJoTOLosZp3Yc8="
|
||||
network := &chaincfg.MainNetParams
|
||||
|
||||
err := VerifySignature(address, message, signature, network)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
{
|
||||
address := "tb1qr97cuq4kvq7plfetmxnl6kls46xaka78n2288z"
|
||||
message := "The outage comes at a time when bitcoin has been fast approaching new highs not seen since June 26, 2019."
|
||||
signature := "H/bSByRH7BW1YydfZlEx9x/nt4EAx/4A691CFlK1URbPEU5tJnTIu4emuzkgZFwC0ptvKuCnyBThnyLDCqPqT10="
|
||||
network := &chaincfg.TestNet3Params
|
||||
|
||||
err := VerifySignature(address, message, signature, network)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
{
|
||||
// Missmatch address
|
||||
address := "tb1qp7y2ywgrv8a4t9h47yphtgj8w759rk6vgd9ran"
|
||||
message := "The outage comes at a time when bitcoin has been fast approaching new highs not seen since June 26, 2019."
|
||||
signature := "H/bSByRH7BW1YydfZlEx9x/nt4EAx/4A691CFlK1URbPEU5tJnTIu4emuzkgZFwC0ptvKuCnyBThnyLDCqPqT10="
|
||||
network := &chaincfg.TestNet3Params
|
||||
|
||||
err := VerifySignature(address, message, signature, network)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
{
|
||||
// Missmatch signature
|
||||
address := "tb1qr97cuq4kvq7plfetmxnl6kls46xaka78n2288z"
|
||||
message := "The outage comes at a time when bitcoin has been fast approaching new highs not seen since June 26, 2019."
|
||||
signature := "Gzhfsw0ItSrrTCChykFhPujeTyAcvVxiXwywxpHmkwFiKuUR2ETbaoFcocmcSshrtdIjfm8oXlJoTOLosZp3Yc8="
|
||||
network := &chaincfg.TestNet3Params
|
||||
|
||||
err := VerifySignature(address, message, signature, network)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
{
|
||||
// Missmatch message
|
||||
address := "tb1qr97cuq4kvq7plfetmxnl6kls46xaka78n2288z"
|
||||
message := "Hello World"
|
||||
signature := "H/bSByRH7BW1YydfZlEx9x/nt4EAx/4A691CFlK1URbPEU5tJnTIu4emuzkgZFwC0ptvKuCnyBThnyLDCqPqT10="
|
||||
network := &chaincfg.TestNet3Params
|
||||
|
||||
err := VerifySignature(address, message, signature, network)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
{
|
||||
// Missmatch network
|
||||
address := "tb1qr97cuq4kvq7plfetmxnl6kls46xaka78n2288z"
|
||||
message := "The outage comes at a time when bitcoin has been fast approaching new highs not seen since June 26, 2019."
|
||||
signature := "H/bSByRH7BW1YydfZlEx9x/nt4EAx/4A691CFlK1URbPEU5tJnTIu4emuzkgZFwC0ptvKuCnyBThnyLDCqPqT10="
|
||||
network := &chaincfg.MainNetParams
|
||||
|
||||
err := VerifySignature(address, message, signature, network)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
10
pkg/btcutils/transaction.go
Normal file
10
pkg/btcutils/transaction.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package btcutils
|
||||
|
||||
const (
|
||||
// TxVersion is the current latest supported transaction version.
|
||||
TxVersion = 2
|
||||
|
||||
// MaxTxInSequenceNum is the maximum sequence number the sequence field
|
||||
// of a transaction input can be.
|
||||
MaxTxInSequenceNum uint32 = 0xffffffff
|
||||
)
|
||||
@@ -5,12 +5,11 @@ import (
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/valyala/fasthttp"
|
||||
)
|
||||
@@ -24,13 +23,14 @@ type Config struct {
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
baseURL string
|
||||
baseURL *url.URL
|
||||
Config
|
||||
}
|
||||
|
||||
func New(baseURL string, config ...Config) (*Client, error) {
|
||||
if _, err := url.Parse(baseURL); err != nil {
|
||||
return nil, errors.Join(errs.InvalidArgument, errors.Wrap(err, "can't parse base url"))
|
||||
parsedBaseURL, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't parse base url")
|
||||
}
|
||||
var cf Config
|
||||
if len(config) > 0 {
|
||||
@@ -40,7 +40,7 @@ func New(baseURL string, config ...Config) (*Client, error) {
|
||||
cf.Headers = make(map[string]string)
|
||||
}
|
||||
return &Client{
|
||||
baseURL: baseURL,
|
||||
baseURL: parsedBaseURL,
|
||||
Config: cf,
|
||||
}, nil
|
||||
}
|
||||
@@ -60,11 +60,21 @@ type HttpResponse struct {
|
||||
}
|
||||
|
||||
func (r *HttpResponse) UnmarshalBody(out any) error {
|
||||
err := json.Unmarshal(r.Body(), out)
|
||||
body, err := r.BodyUncompressed()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "can't unmarshal json body from %v, %v", r.URL, string(r.Body()))
|
||||
return errors.Wrapf(err, "can't uncompress body from %v", r.URL)
|
||||
}
|
||||
switch strings.ToLower(string(r.Header.ContentType())) {
|
||||
case "application/json", "application/json; charset=utf-8":
|
||||
if err := json.Unmarshal(body, out); err != nil {
|
||||
return errors.Wrapf(err, "can't unmarshal json body from %s, %q", r.URL, string(body))
|
||||
}
|
||||
return nil
|
||||
case "text/plain", "text/plain; charset=utf-8":
|
||||
return errors.Errorf("can't unmarshal plain text %q", string(body))
|
||||
default:
|
||||
return errors.Errorf("unsupported content type: %s, contents: %v", r.Header.ContentType(), string(r.Body()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Client) request(ctx context.Context, reqOptions RequestOptions) (*HttpResponse, error) {
|
||||
@@ -77,9 +87,14 @@ func (h *Client) request(ctx context.Context, reqOptions RequestOptions) (*HttpR
|
||||
for k, v := range reqOptions.Header {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
parsedUrl := utils.Must(url.Parse(h.baseURL)) // checked in httpclient.New
|
||||
parsedUrl.Path = reqOptions.path
|
||||
parsedUrl.RawQuery = reqOptions.Query.Encode()
|
||||
|
||||
parsedUrl := h.BaseURL()
|
||||
parsedUrl.Path = path.Join(parsedUrl.Path, reqOptions.path)
|
||||
baseQuery := parsedUrl.Query()
|
||||
for k, v := range reqOptions.Query {
|
||||
baseQuery[k] = v
|
||||
}
|
||||
parsedUrl.RawQuery = baseQuery.Encode()
|
||||
|
||||
// remove %20 from url (empty space)
|
||||
url := strings.TrimSuffix(parsedUrl.String(), "%20")
|
||||
@@ -111,6 +126,7 @@ func (h *Client) request(ctx context.Context, reqOptions RequestOptions) (*HttpR
|
||||
logger = logger.With(
|
||||
slog.Int("status_code", resp.StatusCode()),
|
||||
slog.String("resp_content_type", string(resp.Header.ContentType())),
|
||||
slog.String("resp_content_encoding", string(resp.Header.ContentEncoding())),
|
||||
slog.Int("resp_content_length", len(resp.Body())),
|
||||
)
|
||||
}
|
||||
@@ -134,6 +150,12 @@ func (h *Client) request(ctx context.Context, reqOptions RequestOptions) (*HttpR
|
||||
return &httpResponse, nil
|
||||
}
|
||||
|
||||
// BaseURL returns the cloned base URL of the client.
|
||||
func (h *Client) BaseURL() *url.URL {
|
||||
u := *h.baseURL
|
||||
return &u
|
||||
}
|
||||
|
||||
func (h *Client) Do(ctx context.Context, method, path string, reqOptions RequestOptions) (*HttpResponse, error) {
|
||||
reqOptions.path = path
|
||||
reqOptions.method = method
|
||||
|
||||
@@ -119,10 +119,10 @@ type Config struct {
|
||||
// - Text (default)
|
||||
// - JSON
|
||||
// - GCP: Output format for Stackdriver Logging/Cloud Logging or others GCP services.
|
||||
Output string `mapstructure:"output"`
|
||||
Output string `mapstructure:"output" env:"OUTPUT" envDefault:"text"`
|
||||
|
||||
// Debug is enabled logger level debug. (default: false)
|
||||
Debug bool `mapstructure:"debug"`
|
||||
Debug bool `mapstructure:"debug" env:"DEBUG" envDefault:"false"`
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -10,23 +10,28 @@ import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func NewHTTPErrorHandler() func(ctx *fiber.Ctx, err error) error {
|
||||
return func(ctx *fiber.Ctx, err error) error {
|
||||
// New setup error handler middleware
|
||||
func New() fiber.Handler {
|
||||
return func(ctx *fiber.Ctx) error {
|
||||
err := ctx.Next()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if e := new(errs.PublicError); errors.As(err, &e) {
|
||||
return errors.WithStack(ctx.Status(http.StatusBadRequest).JSON(map[string]any{
|
||||
return errors.WithStack(ctx.Status(http.StatusBadRequest).JSON(fiber.Map{
|
||||
"error": e.Message(),
|
||||
}))
|
||||
}
|
||||
if e := new(fiber.Error); errors.As(err, &e) {
|
||||
return errors.WithStack(ctx.Status(e.Code).SendString(e.Error()))
|
||||
return errors.WithStack(ctx.Status(e.Code).JSON(fiber.Map{
|
||||
"error": e.Error(),
|
||||
}))
|
||||
}
|
||||
|
||||
logger.ErrorContext(ctx.UserContext(), "Something went wrong, unhandled api error",
|
||||
slogx.String("event", "api_unhandled_error"),
|
||||
logger.ErrorContext(ctx.UserContext(), "Something went wrong, api error",
|
||||
slogx.String("event", "api_error"),
|
||||
slogx.Error(err),
|
||||
)
|
||||
|
||||
return errors.WithStack(ctx.Status(http.StatusInternalServerError).JSON(map[string]any{
|
||||
return errors.WithStack(ctx.Status(http.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": "Internal Server Error",
|
||||
}))
|
||||
}
|
||||
7
pkg/middleware/requestcontext/PROXY-IP.md
Normal file
7
pkg/middleware/requestcontext/PROXY-IP.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Proxies IP Range Resources
|
||||
|
||||
- Cloudflare - https://www.cloudflare.com/ips/
|
||||
- GCP Load Balancer - https://cloud.google.com/load-balancing/docs/health-check-concepts#ip-ranges
|
||||
- GCP Compute Engine, Customer-usable external IP address ranges - https://www.gstatic.com/ipranges/cloud.json
|
||||
- Other GCP Services - https://cloud.google.com/compute/docs/faq#networking
|
||||
- Other Resources - https://github.com/lord-alfred/ipranges
|
||||
21
pkg/middleware/requestcontext/errors.go
Normal file
21
pkg/middleware/requestcontext/errors.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package requestcontext
|
||||
|
||||
// requestcontextError implements error interface
|
||||
var _ error = requestcontextError{}
|
||||
|
||||
type requestcontextError struct {
|
||||
err error
|
||||
status int
|
||||
message string
|
||||
}
|
||||
|
||||
func (r requestcontextError) Error() string {
|
||||
if r.err != nil {
|
||||
return r.err.Error()
|
||||
}
|
||||
return r.message
|
||||
}
|
||||
|
||||
func (r requestcontextError) Unwrap() error {
|
||||
return r.err
|
||||
}
|
||||
44
pkg/middleware/requestcontext/requestcontext.go
Normal file
44
pkg/middleware/requestcontext/requestcontext.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package requestcontext
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
type Response struct {
|
||||
Result any `json:"result"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type Option func(ctx context.Context, c *fiber.Ctx) (context.Context, error)
|
||||
|
||||
func New(opts ...Option) fiber.Handler {
|
||||
return func(c *fiber.Ctx) error {
|
||||
var err error
|
||||
ctx := c.UserContext()
|
||||
for i, opt := range opts {
|
||||
ctx, err = opt(ctx, c)
|
||||
if err != nil {
|
||||
rErr := requestcontextError{}
|
||||
if errors.As(err, &rErr) {
|
||||
return c.Status(rErr.status).JSON(Response{Error: rErr.message})
|
||||
}
|
||||
|
||||
logger.ErrorContext(ctx, "failed to extract request context",
|
||||
err,
|
||||
slog.String("event", "requestcontext/error"),
|
||||
slog.String("module", "requestcontext"),
|
||||
slog.Int("optionIndex", i),
|
||||
)
|
||||
return c.Status(http.StatusInternalServerError).JSON(Response{Error: "internal server error"})
|
||||
}
|
||||
}
|
||||
c.SetUserContext(ctx)
|
||||
return c.Next()
|
||||
}
|
||||
}
|
||||
150
pkg/middleware/requestcontext/with_clientip.go
Normal file
150
pkg/middleware/requestcontext/with_clientip.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package requestcontext
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"net"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
type clientIPKey struct{}
|
||||
|
||||
type WithClientIPConfig struct {
|
||||
// [Optional] TrustedProxiesIP is a list of all proxies IP ranges that's between the server and the client.
|
||||
//
|
||||
// If it's provided, it will walk backwards from the last IP in `X-Forwarded-For` header
|
||||
// and use first IP that's not trusted proxy(not in the given IP ranges.)
|
||||
//
|
||||
// **If you want to use this option, you should provide all of probable proxies IP ranges.**
|
||||
//
|
||||
// This is lowest priority.
|
||||
TrustedProxiesIP []string `env:"TRUSTED_PROXIES_IP" mapstructure:"trusted_proxies_ip"`
|
||||
|
||||
// [Optional] TrustedHeader is a header name for getting client IP. (e.g. X-Real-IP, CF-Connecting-IP, etc.)
|
||||
//
|
||||
// This is highest priority, it will ignore rest of the options if it's provided.
|
||||
TrustedHeader string `env:"TRUSTED_HEADER" mapstructure:"trusted_proxies_header"`
|
||||
|
||||
// EnableRejectMalformedRequest return 403 Forbidden if the request is from proxies, but can't extract client IP
|
||||
EnableRejectMalformedRequest bool `env:"ENABLE_REJECT_MALFORMED_REQUEST" envDefault:"false" mapstructure:"enable_reject_malformed_request"`
|
||||
}
|
||||
|
||||
// WithClientIP setup client IP context with XFF Spoofing prevention support.
|
||||
//
|
||||
// If request is from proxies, it will use first IP from `X-Forwarded-For` header by default.
|
||||
func WithClientIP(config WithClientIPConfig) Option {
|
||||
var trustedProxies trustedProxy
|
||||
if len(config.TrustedProxiesIP) > 0 {
|
||||
proxy, err := newTrustedProxy(config.TrustedProxiesIP)
|
||||
if err != nil {
|
||||
logger.Panic("Failed to parse trusted proxies", err)
|
||||
}
|
||||
trustedProxies = proxy
|
||||
}
|
||||
|
||||
return func(ctx context.Context, c *fiber.Ctx) (context.Context, error) {
|
||||
// Extract client IP from given header
|
||||
if config.TrustedHeader != "" {
|
||||
headerIP := c.Get(config.TrustedHeader)
|
||||
|
||||
// validate ip from header
|
||||
if ip := net.ParseIP(headerIP); ip != nil {
|
||||
return context.WithValue(ctx, clientIPKey{}, headerIP), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Extract client IP from XFF header
|
||||
rawIPs := c.IPs()
|
||||
ips := parseIPs(rawIPs)
|
||||
|
||||
// If the request is directly from client, we can use direct remote IP address
|
||||
if len(ips) == 0 {
|
||||
return context.WithValue(ctx, clientIPKey{}, c.IP()), nil
|
||||
}
|
||||
|
||||
// Walk back and find first IP that's not trusted proxy
|
||||
if len(trustedProxies) > 0 {
|
||||
for i := len(ips) - 1; i >= 0; i-- {
|
||||
if !trustedProxies.IsTrusted(ips[i]) {
|
||||
return context.WithValue(ctx, clientIPKey{}, ips[i].String()), nil
|
||||
}
|
||||
}
|
||||
|
||||
// If all IPs are trusted proxies, return first IP in XFF header
|
||||
return context.WithValue(ctx, clientIPKey{}, rawIPs[0]), nil
|
||||
}
|
||||
|
||||
// Finally, if we can't extract client IP, return forbidden
|
||||
if config.EnableRejectMalformedRequest {
|
||||
logger.WarnContext(ctx, "IP Spoofing detected, returning 403 Forbidden",
|
||||
slog.String("event", "requestcontext/ip_spoofing_detected"),
|
||||
slog.String("module", "requestcontext/with_clientip"),
|
||||
slog.String("ip", c.IP()),
|
||||
slog.Any("ips", rawIPs),
|
||||
)
|
||||
return nil, requestcontextError{
|
||||
status: fiber.StatusForbidden,
|
||||
message: "not allowed to access",
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to first IP in XFF header
|
||||
return context.WithValue(ctx, clientIPKey{}, rawIPs[0]), nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetClientIP get clientIP from context. If not found, return empty string
|
||||
//
|
||||
// Warning: Request context should be setup before using this function
|
||||
func GetClientIP(ctx context.Context) string {
|
||||
if ip, ok := ctx.Value(clientIPKey{}).(string); ok {
|
||||
return ip
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type trustedProxy []*net.IPNet
|
||||
|
||||
// newTrustedProxy create a new trusted proxies instance for preventing IP spoofing (XFF Attacks)
|
||||
func newTrustedProxy(ranges []string) (trustedProxy, error) {
|
||||
nets, err := parseCIDRs(ranges)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return trustedProxy(nets), nil
|
||||
}
|
||||
|
||||
func (t trustedProxy) IsTrusted(ip net.IP) bool {
|
||||
if ip == nil {
|
||||
return false
|
||||
}
|
||||
for _, r := range t {
|
||||
if r.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func parseCIDRs(ranges []string) ([]*net.IPNet, error) {
|
||||
nets := make([]*net.IPNet, 0, len(ranges))
|
||||
for _, r := range ranges {
|
||||
_, ipnet, err := net.ParseCIDR(r)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse CIDR for %q", r)
|
||||
}
|
||||
nets = append(nets, ipnet)
|
||||
}
|
||||
return nets, nil
|
||||
}
|
||||
|
||||
func parseIPs(ranges []string) []net.IP {
|
||||
ip := make([]net.IP, 0, len(ranges))
|
||||
for _, r := range ranges {
|
||||
ip = append(ip, net.ParseIP(r))
|
||||
}
|
||||
return ip
|
||||
}
|
||||
47
pkg/middleware/requestcontext/with_requestid.go
Normal file
47
pkg/middleware/requestcontext/with_requestid.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package requestcontext
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/requestid"
|
||||
fiberutils "github.com/gofiber/fiber/v2/utils"
|
||||
)
|
||||
|
||||
type requestIdKey struct{}
|
||||
|
||||
// GetRequestId get requestId from context. If not found, return empty string
|
||||
//
|
||||
// Warning: Request context should be setup before using this function
|
||||
func GetRequestId(ctx context.Context) string {
|
||||
if id, ok := ctx.Value(requestIdKey{}).(string); ok {
|
||||
return id
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func WithRequestId() Option {
|
||||
return func(ctx context.Context, c *fiber.Ctx) (context.Context, error) {
|
||||
// Try to get id from fiber context.
|
||||
requestId, ok := c.Locals(requestid.ConfigDefault.ContextKey).(string)
|
||||
if !ok || requestId == "" {
|
||||
// Try to get id from request, else we generate one
|
||||
requestId = c.Get(requestid.ConfigDefault.Header, fiberutils.UUID())
|
||||
|
||||
// Set new id to response header
|
||||
c.Set(requestid.ConfigDefault.Header, requestId)
|
||||
|
||||
// Add the request ID to locals (fasthttp UserValue storage)
|
||||
c.Locals(requestid.ConfigDefault.ContextKey, requestId)
|
||||
}
|
||||
|
||||
// Add the request ID to context
|
||||
ctx = context.WithValue(ctx, requestIdKey{}, requestId)
|
||||
|
||||
// Add the requuest ID to context logger
|
||||
ctx = logger.WithContext(ctx, "requestId", requestId)
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
}
|
||||
116
pkg/middleware/requestlogger/requestlogger.go
Normal file
116
pkg/middleware/requestlogger/requestlogger.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package requestlogger
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/middleware/requestcontext"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
WithRequestHeader bool `env:"REQUEST_HEADER" envDefault:"false" mapstructure:"request_header"`
|
||||
WithRequestQuery bool `env:"REQUEST_QUERY" envDefault:"false" mapstructure:"request_query"`
|
||||
Disable bool `env:"DISABLE" envDefault:"false" mapstructure:"disable"` // Disable logger level `INFO`
|
||||
HiddenRequestHeaders []string `env:"HIDDEN_REQUEST_HEADERS" mapstructure:"hidden_request_headers"`
|
||||
}
|
||||
|
||||
// New setup request context and information
|
||||
func New(config Config) fiber.Handler {
|
||||
hiddenRequestHeaders := make(map[string]struct{}, len(config.HiddenRequestHeaders))
|
||||
for _, header := range config.HiddenRequestHeaders {
|
||||
hiddenRequestHeaders[strings.TrimSpace(strings.ToLower(header))] = struct{}{}
|
||||
}
|
||||
return func(c *fiber.Ctx) error {
|
||||
start := time.Now()
|
||||
|
||||
// Continue stack
|
||||
err := c.Next()
|
||||
|
||||
end := time.Now()
|
||||
latency := end.Sub(start)
|
||||
status := c.Response().StatusCode()
|
||||
|
||||
baseAttrs := []slog.Attr{
|
||||
slog.String("event", "api_request"),
|
||||
slog.Int64("latency", latency.Milliseconds()),
|
||||
slog.String("latencyHuman", latency.String()),
|
||||
}
|
||||
|
||||
// prep request attributes
|
||||
requestAttributes := []slog.Attr{
|
||||
slog.Time("time", start),
|
||||
slog.String("method", c.Method()),
|
||||
slog.String("host", c.Hostname()),
|
||||
slog.String("path", c.Path()),
|
||||
slog.String("route", c.Route().Path),
|
||||
slog.String("ip", requestcontext.GetClientIP(c.UserContext())),
|
||||
slog.String("remoteIP", c.Context().RemoteIP().String()),
|
||||
slog.Any("x-forwarded-for", c.IPs()),
|
||||
slog.String("user-agent", string(c.Context().UserAgent())),
|
||||
slog.Any("params", c.AllParams()),
|
||||
slog.Any("query", c.Queries()),
|
||||
slog.Int("length", len((c.Body()))),
|
||||
}
|
||||
|
||||
// prep response attributes
|
||||
responseAttributes := []slog.Attr{
|
||||
slog.Time("time", end),
|
||||
slog.Int("status", status),
|
||||
slog.Int("length", len(c.Response().Body())),
|
||||
}
|
||||
|
||||
// request query
|
||||
if config.WithRequestQuery {
|
||||
requestAttributes = append(requestAttributes, slog.String("query", string(c.Request().URI().QueryString())))
|
||||
}
|
||||
|
||||
// request headers
|
||||
if config.WithRequestHeader {
|
||||
kv := []any{}
|
||||
|
||||
for k, v := range c.GetReqHeaders() {
|
||||
if _, found := hiddenRequestHeaders[strings.ToLower(k)]; found {
|
||||
continue
|
||||
}
|
||||
kv = append(kv, slog.Any(k, v))
|
||||
}
|
||||
|
||||
requestAttributes = append(requestAttributes, slog.Group("header", kv...))
|
||||
}
|
||||
|
||||
level := slog.LevelInfo
|
||||
if err != nil || status >= http.StatusInternalServerError {
|
||||
level = slog.LevelError
|
||||
|
||||
// error attributes
|
||||
logErr := err
|
||||
if logErr == nil {
|
||||
logErr = fiber.NewError(status)
|
||||
}
|
||||
baseAttrs = append(baseAttrs, slog.Any("error", logErr))
|
||||
}
|
||||
|
||||
if config.Disable && level == slog.LevelInfo {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
logger.LogAttrs(c.UserContext(), level, "Request Completed", append([]slog.Attr{
|
||||
{
|
||||
Key: "request",
|
||||
Value: slog.GroupValue(requestAttributes...),
|
||||
},
|
||||
{
|
||||
Key: "response",
|
||||
Value: slog.GroupValue(responseAttributes...),
|
||||
},
|
||||
}, baseAttrs...)...,
|
||||
)
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
10
sqlc.yaml
10
sqlc.yaml
@@ -7,16 +7,6 @@
|
||||
# https://docs.sqlc.dev/en/stable/howto/ddl.html#golang-migrate
|
||||
version: "2"
|
||||
sql:
|
||||
- schema: "./modules/bitcoin/database/postgresql/migrations"
|
||||
queries: "./modules/bitcoin/database/postgresql/queries"
|
||||
engine: "postgresql"
|
||||
gen:
|
||||
go:
|
||||
package: "gen"
|
||||
out: "./modules/bitcoin/repository/postgres/gen"
|
||||
sql_package: "pgx/v5"
|
||||
rename:
|
||||
id: "Id"
|
||||
- schema: "./modules/runes/database/postgresql/migrations"
|
||||
queries: "./modules/runes/database/postgresql/queries"
|
||||
engine: "postgresql"
|
||||
|
||||
Reference in New Issue
Block a user