Compare commits

...

94 Commits

Author SHA1 Message Date
Planxnx
bd692a1e12 Merge branch 'develop' into feature/brc20-module-api 2024-06-14 17:50:30 +07:00
Gaze
415a476478 Merge branch 'main' into develop 2024-06-14 16:55:39 +07:00
Gaze
f63505e173 feat(btcutils): use chain params instead common.network 2024-06-14 16:55:28 +07:00
Gaze
65a69ddb68 Merge remote-tracking branch 'origin/main' into develop 2024-06-14 16:48:48 +07:00
Thanee Charattrakool
4f5d1f077b feat(btcutils): add bitcoin utility functions (#26)
* feat(btcutils): add bitcoin utility functions

* feat(btcutils): add bitcoin signature verification
2024-06-14 16:48:22 +07:00
Gaze
7bf21cbdea feat: add todo note 2024-06-14 05:21:58 +07:00
Gaze
f3c944a57b feat(decimals): support pgtype.Numeric 2024-06-14 00:47:02 +07:00
Gaze
f2adf82121 feat: add todo note 2024-06-13 04:02:25 +07:00
Gaze
70434caa6a fix: typo 2024-06-13 01:08:16 +07:00
Gaze
1b52a20542 Merge branch 'develop' into feature/brc20-module-api 2024-06-13 00:16:11 +07:00
Gaze
c133006c82 Merge branch 'main' into develop 2024-06-12 23:39:24 +07:00
Thanee Charattrakool
51fd1f6636 feat: move requestip config to http config (#25) 2024-06-12 22:08:03 +07:00
Gaze
22c2716d6b feat(brc20): construct api 2024-06-11 14:36:17 +07:00
Gaze
02abc2a190 Merge remote-tracking branch 'origin/feat/brc20-module' into feature/brc20-module-api 2024-06-11 14:35:47 +07:00
Gaze
ac22c355f5 feat: add config example 2024-06-11 14:31:22 +07:00
Gaze
26aa2805c1 fix: insert id to events 2024-06-11 14:09:22 +07:00
Gaze
1b86dc6cec fix: sort transfers by sequence number 2024-06-11 12:49:07 +07:00
Gaze
7fb0c7a40d feat: add inscription number and sequence number to transfers 2024-06-11 12:45:43 +07:00
Gaze
43ac6badac fix: ensure outpoint values are fetched 2024-06-11 12:37:01 +07:00
Gaze
3c2c52eb1d feat(brc20): can't have multiple mint events in same tx 2024-06-10 22:03:48 +07:00
Gaze
2c9adb7e91 feat(brc20): implement brc-20 indexer api 2024-06-10 20:12:40 +07:00
Gaze
84bbc986f0 Merge remote-tracking branch 'origin/feat/brc20-module' into feature/brc20-module-api 2024-06-10 19:27:45 +07:00
Gaze
1439964666 feat(brc20): add db query 2024-06-10 18:03:23 +07:00
Gaze
f4025e0f15 feat: batch get outpoint values 2024-06-10 17:33:09 +07:00
Gaze
0172f036fb fix: assign to nil map 2024-06-10 17:29:42 +07:00
Gaze
5eb2380e4b fix: get all transfers in outpoints for all txs 2024-06-10 16:34:46 +07:00
Gaze
b7b4607b6a fix: coinbase tx processing 2024-06-10 16:04:28 +07:00
Gaze
cfcfd845f3 fix: processor bug 2024-06-10 14:26:41 +07:00
Gaze
7c23b93751 fix: jubilee height condition 2024-06-10 14:07:36 +07:00
Gaze
2c016f36c1 fix: payload parsing 2024-06-10 09:58:58 +07:00
Gaze
accf37a218 feat(decimals): add decimal utils 2024-06-10 05:07:26 +07:00
Gaze
e91c7db601 Merge remote-tracking branch 'origin/feat/brc20-module' into feature/brc20-module-api 2024-06-10 03:51:50 +07:00
Gaze
980163900c fix: imports 2024-06-10 01:53:13 +07:00
Gaze
8110434e18 fix: concat hex-encoded hashes instead 2024-06-10 01:52:24 +07:00
Gaze
3603248485 fix: start cumulative hash with first event hash 2024-06-10 01:45:17 +07:00
Gaze
2c5a6076ff feat: add flush brc20 states 2024-06-10 00:14:28 +07:00
Gaze
05d7fecf69 feat: add event hash logic 2024-06-10 00:09:18 +07:00
Gaze
49eff4f3ba refactor: separate brc20 processing logic to smaller funcs 2024-06-09 23:40:36 +07:00
Gaze
e4d41cc7a4 feat: skip non-brc20 transfers 2024-06-09 17:00:01 +07:00
Gaze
b45dfd066a fix: remove debug logs 2024-06-09 16:41:58 +07:00
Gaze
2ae5b0835d feat: process brc20 states 2024-06-09 16:29:00 +07:00
Gaze
132dcde715 fix: transfer order 2024-06-09 16:28:42 +07:00
Gaze
4228730a34 fix: inscription logic 2024-06-09 16:17:58 +07:00
Gaze
f3ff5ecb7d fix: bug transfer inscription in same block as inscribe 2024-06-09 15:30:27 +07:00
Gaze
99bdf49f02 feat: brc20 indexing logic 2024-06-09 14:49:13 +07:00
Gaze
806d27fb46 fix: remove wrong incomplete field check 2024-06-08 00:23:51 +07:00
Gaze
850728b3cf Merge remote-tracking branch 'origin/feat/brc20-module' into feature/brc20-module-api 2024-06-07 22:31:54 +07:00
Gaze
7453abec99 fix: remove stop panic 2024-06-07 21:59:02 +07:00
Gaze
0d075c31f8 chore: remove unused util func 2024-06-07 21:48:03 +07:00
Gaze
605ea63167 fix: use decimals for brc20 amounts 2024-06-07 21:37:17 +07:00
Gaze
3fa0a7d975 fix: update migration 2024-06-07 21:20:13 +07:00
Gaze
14142096af feat: update entities for new table columns 2024-06-07 21:17:56 +07:00
Gaze
2bb1bad449 feat: update migrations 2024-06-07 17:27:10 +07:00
Gaze
f0b4a69392 Merge remote-tracking branch 'origin/feat/brc20-module' into feature/brc20-module-api 2024-06-07 02:37:26 +07:00
Gaze
ccdc4c56ff feat: add brc20 logic 2024-06-07 00:48:31 +07:00
Gaze
b049170621 feat: create common utils for api 2024-06-06 21:44:13 +07:00
Gaze
f3c6180c17 feat: add more migration and queries 2024-06-06 16:46:25 +07:00
Gaze
ce11033919 fix: use internal query func 2024-06-06 14:04:18 +07:00
Gaze
033dbf7324 fix: inscription transfers primary key add tx_index 2024-06-06 12:55:43 +07:00
Gaze
38c37189fc fix: unique index 2024-06-06 12:09:19 +07:00
Gaze
6d1db50890 fix: rename table 2024-06-05 19:22:12 +07:00
Gaze
0a3800c68a fix: remove transfer count limit 2024-06-05 17:34:49 +07:00
Gaze
cab7d0448e fix: remove pkg errors 2024-06-05 16:12:04 +07:00
Gaze
7c555fe373 fix: get transfers from buffer first 2024-06-05 16:10:54 +07:00
Gaze
a082a35bb6 chore: remove log 2024-05-31 22:52:12 +07:00
Gaze
180ea17284 fix: wrong curse condition 2024-05-31 22:50:30 +07:00
Gaze
fc48ba50b8 fix: remove panic 2024-05-31 20:46:08 +07:00
Gaze
0305762166 fix: incorrect params mapper 2024-05-31 20:44:57 +07:00
Gaze
83b22eb883 feat: implement revert data 2024-05-31 18:49:36 +07:00
Gaze
45f106995a fix: correctly put coinbase at end of block 2024-05-31 14:37:22 +07:00
Gaze
d6c3f90d8c fix: add content to inscription transfers 2024-05-31 12:08:09 +07:00
Nut Pinyo
d4b694aa57 fix: implement Shutdown() for processors (#22) 2024-05-30 23:57:41 +07:00
Gaze
ef575dea85 fix: put coinbase txs at the end of block 2024-05-30 16:03:13 +07:00
Gaze
d70accc80e refactor: move flotsam to entity 2024-05-30 13:08:58 +07:00
Gaze
d8385125c3 fix: insert non-brc20 inscriptions 2024-05-29 18:36:45 +07:00
Gaze
dc44e4bb5c Revert "feat: seed brc20 stats"
This reverts commit 4ae169218f.

# Conflicts:
#	modules/brc20/constants.go
2024-05-29 17:29:05 +07:00
Gaze
f0cb5d651b fix: remove fmt print 2024-05-29 16:28:05 +07:00
Gaze
4ae169218f feat: seed brc20 stats 2024-05-29 16:23:32 +07:00
Gaze
c41da88808 fix: cache outpoint value before returning 2024-05-29 15:40:28 +07:00
Gaze
94b228dd75 fix: check json protocol value 2024-05-29 15:40:20 +07:00
Gaze
518a07e920 fix: change starting block to first ordinals inscription 2024-05-29 15:39:29 +07:00
Gaze
6512f18f94 feat: optimize get outpoint values 2024-05-29 15:19:52 +07:00
Gaze
27161f827d feat: implement migrate 2024-05-29 15:04:05 +07:00
Gaze
db209f68ad fix: optimize processing 2024-05-29 14:43:37 +07:00
Gaze
bb03d439f5 refactor: move processor to main processor 2024-05-28 15:03:51 +07:00
Gaze
71641dd2fb feat: implement brc20 inscription dgs and repos 2024-05-28 14:57:44 +07:00
Gaze
3bb26d012b feat: add inscription processor code 2024-05-27 17:01:56 +07:00
Gaze
bb3c24b472 feat: add base code for brc20 dgs and repos 2024-05-27 14:33:34 +07:00
Gaze
eb65b8dcbc feat: implement envelope tests 2024-05-27 14:33:34 +07:00
Gaze
b3363c7983 feat: implement envelope parsing 2024-05-27 14:33:34 +07:00
Gaze
c8e03e8056 fix: add inscription location table 2024-05-27 14:33:34 +07:00
Gaze
131afac8c2 feat: add base processor code 2024-05-27 14:33:34 +07:00
Gaze
2befd8b124 feat: add brc20 migrations 2024-05-27 14:33:34 +07:00
Gaze
9febf40e81 Merge remote-tracking branch 'origin/main' into develop 2024-05-27 14:33:00 +07:00
101 changed files with 11958 additions and 57 deletions

View File

@@ -39,7 +39,7 @@
"ui.completion.usePlaceholders": false,
"ui.diagnostic.analyses": {
// https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md
// "fieldalignment": false,
"fieldalignment": false,
"nilness": true,
"shadow": false,
"unusedparams": true,

View File

@@ -17,6 +17,7 @@ import (
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/modules/brc20"
"github.com/gaze-network/indexer-network/modules/runes"
"github.com/gaze-network/indexer-network/pkg/automaxprocs"
"github.com/gaze-network/indexer-network/pkg/errorhandler"
@@ -39,6 +40,7 @@ import (
// Register Modules
var Modules = do.Package(
do.LazyNamed("runes", runes.New),
do.LazyNamed("brc20", brc20.New),
)
func NewRunCommand() *cobra.Command {
@@ -145,7 +147,7 @@ func runHandler(cmd *cobra.Command, _ []string) error {
Use(requestid.New()).
Use(requestcontext.New(
requestcontext.WithRequestId(),
requestcontext.WithClientIP(conf.RequestIP),
requestcontext.WithClientIP(conf.HTTPServer.RequestIP),
)).
Use(requestlogger.New(conf.HTTPServer.Logger)).
Use(fiberrecover.New(fiberrecover.Config{

View File

@@ -17,7 +17,7 @@ import (
type migrateDownCmdOptions struct {
DatabaseURL string
Runes bool
Modules string
All bool
}
@@ -59,7 +59,7 @@ func NewMigrateDownCommand() *cobra.Command {
}
flags := cmd.Flags()
flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes down migrations")
flags.StringVar(&opts.Modules, "modules", "", "Modules to apply up migrations")
flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on")
flags.BoolVar(&opts.All, "all", false, "Confirm apply ALL down migrations without prompt")
@@ -87,6 +87,8 @@ func migrateDownHandler(opts *migrateDownCmdOptions, _ *cobra.Command, args migr
}
}
modules := strings.Split(opts.Modules, ",")
applyDownMigrations := func(module string, sourcePath string, migrationTable string) error {
newDatabaseURL := cloneURLWithQuery(databaseURL, url.Values{"x-migrations-table": {migrationTable}})
sourceURL := "file://" + sourcePath
@@ -116,10 +118,15 @@ func migrateDownHandler(opts *migrateDownCmdOptions, _ *cobra.Command, args migr
return nil
}
if opts.Runes {
if lo.Contains(modules, "runes") {
if err := applyDownMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
if lo.Contains(modules, "brc20") {
if err := applyDownMigrations("BRC20", brc20MigrationSource, "brc20_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
return nil
}

View File

@@ -11,12 +11,13 @@ import (
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/samber/lo"
"github.com/spf13/cobra"
)
type migrateUpCmdOptions struct {
DatabaseURL string
Runes bool
Modules string
}
type migrateUpCmdArgs struct {
@@ -54,7 +55,7 @@ func NewMigrateUpCommand() *cobra.Command {
}
flags := cmd.Flags()
flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes up migrations")
flags.StringVar(&opts.Modules, "modules", "", "Modules to apply up migrations")
flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on")
return cmd
@@ -72,6 +73,8 @@ func migrateUpHandler(opts *migrateUpCmdOptions, _ *cobra.Command, args migrateU
return errors.Errorf("unsupported database driver: %s", databaseURL.Scheme)
}
modules := strings.Split(opts.Modules, ",")
applyUpMigrations := func(module string, sourcePath string, migrationTable string) error {
newDatabaseURL := cloneURLWithQuery(databaseURL, url.Values{"x-migrations-table": {migrationTable}})
sourceURL := "file://" + sourcePath
@@ -101,10 +104,15 @@ func migrateUpHandler(opts *migrateUpCmdOptions, _ *cobra.Command, args migrateU
return nil
}
if opts.Runes {
if lo.Contains(modules, "runes") {
if err := applyUpMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
if lo.Contains(modules, "brc20") {
if err := applyUpMigrations("BRC20", brc20MigrationSource, "brc20_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
return nil
}

View File

@@ -4,6 +4,7 @@ import "net/url"
const (
runesMigrationSource = "modules/runes/database/postgresql/migrations"
brc20MigrationSource = "modules/brc20/database/postgresql/migrations"
)
func cloneURLWithQuery(u *url.URL, newQuery url.Values) *url.URL {

6
common/http.go Normal file
View File

@@ -0,0 +1,6 @@
package common
type HttpResponse[T any] struct {
Error *string `json:"error"`
Result *T `json:"result,omitempty"`
}

View File

@@ -27,16 +27,26 @@ http_server:
disable: false # disable logger if logger level is `INFO`
request_header: false
request_query: false
# Client IP extraction configuration options.
# This is unnecessary if you don't care about the real client IP or if you're not using a reverse proxy.
requestip:
trusted_proxies_ip: # Cloudflare, GCP Public LB. See: server/internal/middleware/requestcontext/PROXY-IP.md
trusted_proxies_header: # X-Real-IP, CF-Connecting-IP
enable_reject_malformed_request: false # return 403 if request is malformed (invalid IP)
requestip: # Client IP extraction configuration options. This is unnecessary if you don't care about the real client IP or if you're not using a reverse proxy.
trusted_proxies_ip: # Cloudflare, GCP Public LB. See: server/internal/middleware/requestcontext/PROXY-IP.md
trusted_proxies_header: # X-Real-IP, CF-Connecting-IP
enable_reject_malformed_request: false # return 403 if request is malformed (invalid IP)
# Meta-protocol modules configuration options.
modules:
# Configuration options for BRC20 module. Can be removed if not used.
brc20:
database: "postgres" # Database to store BRC20 data. current supported databases: "postgres"
datasource: "database" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node".
api_handlers: # API handlers to enable. current supported handlers: "http"
- http
postgres:
host: "localhost"
port: 5432
user: "postgres"
password: "password"
db_name: "postgres"
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.
# Configuration options for Runes module. Can be removed if not used.
runes:
database: "postgres" # Database to store Runes data. current supported databases: "postgres"

View File

@@ -292,3 +292,19 @@ func (d *BitcoinNodeDatasource) GetBlockHeader(ctx context.Context, height int64
return types.ParseMsgBlockHeader(*block, height), nil
}
// GetTransaction fetch transaction from Bitcoin node
func (d *BitcoinNodeDatasource) GetTransactionOutputs(ctx context.Context, txHash chainhash.Hash) ([]*types.TxOut, error) {
rawTx, err := d.btcclient.GetRawTransaction(&txHash)
if err != nil {
return nil, errors.Wrap(err, "failed to get raw transaction")
}
msgTx := rawTx.MsgTx()
txOuts := make([]*types.TxOut, 0, len(msgTx.TxOut))
for _, txOut := range msgTx.TxOut {
txOuts = append(txOuts, types.ParseTxOut(txOut))
}
return txOuts, nil
}

22
go.mod
View File

@@ -6,11 +6,13 @@ require (
github.com/Cleverse/go-utilities/utils v0.0.0-20240119201306-d71eb577ef11
github.com/btcsuite/btcd v0.24.0
github.com/btcsuite/btcd/btcutil v1.1.5
github.com/btcsuite/btcd/btcutil/psbt v1.1.9
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0
github.com/cockroachdb/errors v1.11.1
github.com/gaze-network/uint128 v1.3.0
github.com/gofiber/fiber/v2 v2.52.4
github.com/golang-migrate/migrate/v4 v4.17.1
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/jackc/pgx/v5 v5.5.5
github.com/mcosta74/pgx-slog v0.3.0
github.com/planxnx/concurrent-stream v0.1.5
@@ -20,23 +22,24 @@ require (
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.18.2
github.com/stretchr/testify v1.8.4
github.com/stretchr/testify v1.9.0
github.com/valyala/fasthttp v1.51.0
go.uber.org/automaxprocs v1.5.3
golang.org/x/sync v0.5.0
golang.org/x/sync v0.7.0
)
require (
github.com/andybalholm/brotli v1.0.5 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.1.3 // indirect
github.com/bitonicnl/verify-signed-message v0.7.1
github.com/btcsuite/btcd/btcec/v2 v2.3.3 // indirect
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
@@ -44,6 +47,7 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
@@ -74,10 +78,10 @@ require (
github.com/valyala/tcplisten v1.0.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.20.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

54
go.sum
View File

@@ -7,18 +7,23 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/bitonicnl/verify-signed-message v0.7.1 h1:1Qku9k9WgzobjqBY7tT3CLjWxtTJZxkYNhOV6QeCTjY=
github.com/bitonicnl/verify-signed-message v0.7.1/go.mod h1:PR60twfJIaHEo9Wb6eJBh8nBHEZIQQx8CvRwh0YmEPk=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A=
github.com/btcsuite/btcd v0.24.0 h1:gL3uHE/IaFj6fcZSu03SvqPMSx7s/dPzfpG/atRwWdo=
github.com/btcsuite/btcd v0.24.0/go.mod h1:K4IDc1593s8jKXIF7yS7yCTSxrknB9z0STzc2j6XgE4=
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE=
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
github.com/btcsuite/btcd/btcec/v2 v2.3.3 h1:6+iXlDKE8RMtKsvK0gshlXIuPbyWM/h84Ensb7o3sC0=
github.com/btcsuite/btcd/btcec/v2 v2.3.3/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8=
github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00=
github.com/btcsuite/btcd/btcutil/psbt v1.1.9 h1:UmfOIiWMZcVMOLaN+lxbbLSuoINGS1WmK1TZNI0b4yk=
github.com/btcsuite/btcd/btcutil/psbt v1.1.9/go.mod h1:ehBEvU91lxSlXtA+zZz3iFYx7Yq9eqnKx4/kSrnsvMY=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
@@ -50,10 +55,12 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/dhui/dktest v0.4.1 h1:/w+IWuDXVymg3IrRJCHHOkMK10m9aNVMOyD0X12YVTg=
github.com/dhui/dktest v0.4.1/go.mod h1:DdOqcUpL7vgyP4GlF3X3w7HbSlz8cEQzwewPveYEQbA=
@@ -92,12 +99,13 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
@@ -107,8 +115,12 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
@@ -218,10 +230,12 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
@@ -243,14 +257,14 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg=
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d h1:N0hmiNbwsSNwHBAvR3QB5w25pUwH4tK0Y/RltD1j1h4=
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -265,8 +279,8 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -279,19 +293,19 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -8,6 +8,7 @@ import (
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
brc20config "github.com/gaze-network/indexer-network/modules/brc20/config"
runesconfig "github.com/gaze-network/indexer-network/modules/runes/config"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
@@ -43,15 +44,14 @@ var (
)
type Config struct {
EnableModules []string `mapstructure:"enable_modules"`
APIOnly bool `mapstructure:"api_only"`
Logger logger.Config `mapstructure:"logger"`
BitcoinNode BitcoinNodeClient `mapstructure:"bitcoin_node"`
Network common.Network `mapstructure:"network"`
HTTPServer HTTPServerConfig `mapstructure:"http_server"`
Modules Modules `mapstructure:"modules"`
Reporting reportingclient.Config `mapstructure:"reporting"`
RequestIP requestcontext.WithClientIPConfig `mapstructure:"requestip"`
EnableModules []string `mapstructure:"enable_modules"`
APIOnly bool `mapstructure:"api_only"`
Logger logger.Config `mapstructure:"logger"`
BitcoinNode BitcoinNodeClient `mapstructure:"bitcoin_node"`
Network common.Network `mapstructure:"network"`
HTTPServer HTTPServerConfig `mapstructure:"http_server"`
Modules Modules `mapstructure:"modules"`
Reporting reportingclient.Config `mapstructure:"reporting"`
}
type BitcoinNodeClient struct {
@@ -63,11 +63,13 @@ type BitcoinNodeClient struct {
type Modules struct {
Runes runesconfig.Config `mapstructure:"runes"`
BRC20 brc20config.Config `mapstructure:"brc20"`
}
type HTTPServerConfig struct {
Port int `mapstructure:"port"`
Logger requestlogger.Config `mapstructure:"logger"`
Port int `mapstructure:"port"`
Logger requestlogger.Config `mapstructure:"logger"`
RequestIP requestcontext.WithClientIPConfig `mapstructure:"requestip"`
}
// Parse parse the configuration from environment variables

View File

@@ -0,0 +1,115 @@
package httphandler
import (
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/pkg/btcutils"
"github.com/gaze-network/indexer-network/pkg/decimals"
"github.com/gofiber/fiber/v2"
"github.com/holiman/uint256"
"github.com/samber/lo"
)
type getBalancesByAddressRequest struct {
Wallet string `params:"wallet"`
Id string `query:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getBalancesByAddressRequest) Validate() error {
var errList []error
if r.Wallet == "" {
errList = append(errList, errors.New("'wallet' is required"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type balanceExtend struct {
Transferable *uint256.Int `json:"transferable"`
Available *uint256.Int `json:"available"`
}
type balance struct {
Amount *uint256.Int `json:"amount"`
Id string `json:"id"`
Name string `json:"name"`
Symbol string `json:"symbol"`
Decimals uint16 `json:"decimals"`
Extend balanceExtend `json:"extend"`
}
type getBalancesByAddressResult struct {
List []balance `json:"list"`
BlockHeight uint64 `json:"blockHeight"`
}
type getBalancesByAddressResponse = common.HttpResponse[getBalancesByAddressResult]
func (h *HttpHandler) GetBalancesByAddress(ctx *fiber.Ctx) (err error) {
var req getBalancesByAddressRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
pkScript, err := btcutils.ToPkScript(h.network, req.Wallet)
if err != nil {
return errs.NewPublicError("unable to resolve pkscript from \"wallet\"")
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
balances, err := h.usecase.GetBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByPkScript")
}
ticks := lo.Keys(balances)
entries, err := h.usecase.GetTickEntryByTickBatch(ctx.UserContext(), ticks)
if err != nil {
return errors.Wrap(err, "error during GetTickEntryByTickBatch")
}
balanceList := make([]balance, 0, len(balances))
for id, b := range balances {
entry := entries[id]
balanceList = append(balanceList, balance{
Amount: decimals.ToUint256(b.OverallBalance, entry.Decimals),
Id: id,
Name: entry.OriginalTick,
Symbol: entry.Tick,
Decimals: entry.Decimals,
Extend: balanceExtend{
Transferable: decimals.ToUint256(b.OverallBalance.Sub(b.AvailableBalance), entry.Decimals),
Available: decimals.ToUint256(b.AvailableBalance, entry.Decimals),
},
})
}
slices.SortFunc(balanceList, func(i, j balance) int {
return j.Amount.Cmp(i.Amount)
})
resp := getBalancesByAddressResponse{
Result: &getBalancesByAddressResult{
BlockHeight: blockHeight,
List: balanceList,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,125 @@
package httphandler
import (
"context"
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/pkg/btcutils"
"github.com/gaze-network/indexer-network/pkg/decimals"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
"golang.org/x/sync/errgroup"
)
type getBalancesByAddressBatchRequest struct {
Queries []getBalancesByAddressRequest `json:"queries"`
}
func (r getBalancesByAddressBatchRequest) Validate() error {
var errList []error
for _, query := range r.Queries {
if query.Wallet == "" {
errList = append(errList, errors.Errorf("queries[%d]: 'wallet' is required"))
}
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type getBalancesByAddressBatchResult struct {
List []*getBalancesByAddressResult `json:"list"`
}
type getBalancesByAddressBatchResponse = common.HttpResponse[getBalancesByAddressBatchResult]
func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
var req getBalancesByAddressBatchRequest
if err := ctx.BodyParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
var latestBlockHeight uint64
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
latestBlockHeight = uint64(blockHeader.Height)
processQuery := func(ctx context.Context, query getBalancesByAddressRequest) (*getBalancesByAddressResult, error) {
pkScript, err := btcutils.ToPkScript(h.network, query.Wallet)
if err != nil {
return nil, errs.NewPublicError("unable to resolve pkscript from \"wallet\"")
}
blockHeight := query.BlockHeight
if blockHeight == 0 {
blockHeight = latestBlockHeight
}
balances, err := h.usecase.GetBalancesByPkScript(ctx, pkScript, blockHeight)
if err != nil {
return nil, errors.Wrap(err, "error during GetBalancesByPkScript")
}
balanceRuneIds := lo.Keys(balances)
entries, err := h.usecase.GetTickEntryByTickBatch(ctx, balanceRuneIds)
if err != nil {
return nil, errors.Wrap(err, "error during GetTickEntryByTickBatch")
}
balanceList := make([]balance, 0, len(balances))
for id, b := range balances {
entry := entries[id]
balanceList = append(balanceList, balance{
Amount: decimals.ToUint256(b.OverallBalance, entry.Decimals),
Id: id,
Name: entry.OriginalTick,
Symbol: entry.Tick,
Decimals: entry.Decimals,
Extend: balanceExtend{
Transferable: decimals.ToUint256(b.OverallBalance.Sub(b.AvailableBalance), entry.Decimals),
Available: decimals.ToUint256(b.AvailableBalance, entry.Decimals),
},
})
}
slices.SortFunc(balanceList, func(i, j balance) int {
return j.Amount.Cmp(i.Amount)
})
return &getBalancesByAddressResult{
BlockHeight: blockHeight,
List: balanceList,
}, nil
}
results := make([]*getBalancesByAddressResult, len(req.Queries))
eg, ectx := errgroup.WithContext(ctx.UserContext())
for i, query := range req.Queries {
i := i
query := query
eg.Go(func() error {
result, err := processQuery(ectx, query)
if err != nil {
return errors.Wrapf(err, "error during processQuery for query %d", i)
}
results[i] = result
return nil
})
}
if err := eg.Wait(); err != nil {
return errors.WithStack(err)
}
resp := getBalancesByAddressBatchResponse{
Result: &getBalancesByAddressBatchResult{
List: results,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,49 @@
package httphandler
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gofiber/fiber/v2"
)
// TODO: use modules/brc20/constants.go
var startingBlockHeader = map[common.Network]types.BlockHeader{
common.NetworkMainnet: {
Height: 767429,
Hash: *utils.Must(chainhash.NewHashFromStr("00000000000000000002b35aef66eb15cd2b232a800f75a2f25cedca4cfe52c4")),
},
common.NetworkTestnet: {
Height: 2413342,
Hash: *utils.Must(chainhash.NewHashFromStr("00000000000022e97030b143af785de812f836dd0651b6ac2b7dd9e90dc9abf9")),
},
}
type getCurrentBlockResult struct {
Hash string `json:"hash"`
Height int64 `json:"height"`
}
type getCurrentBlockResponse = common.HttpResponse[getCurrentBlockResult]
func (h *HttpHandler) GetCurrentBlock(ctx *fiber.Ctx) (err error) {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "error during get latest block")
}
blockHeader = startingBlockHeader[h.network]
}
resp := getCurrentBlockResponse{
Result: &getCurrentBlockResult{
Hash: blockHeader.Hash.String(),
Height: blockHeader.Height,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,107 @@
package httphandler
import (
"encoding/hex"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/pkg/btcutils"
"github.com/gaze-network/indexer-network/pkg/decimals"
"github.com/gofiber/fiber/v2"
"github.com/holiman/uint256"
)
type getHoldersRequest struct {
Id string `params:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getHoldersRequest) Validate() error {
var errList []error
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type holdingBalanceExtend struct {
Transferable *uint256.Int `json:"transferable"`
Available *uint256.Int `json:"available"`
}
type holdingBalance struct {
Address string `json:"address"`
PkScript string `json:"pkScript"`
Amount *uint256.Int `json:"amount"`
Percent float64 `json:"percent"`
Extend holdingBalanceExtend `json:"extend"`
}
type getHoldersResult struct {
BlockHeight uint64 `json:"blockHeight"`
TotalSupply *uint256.Int `json:"totalSupply"`
MintedAmount *uint256.Int `json:"mintedAmount"`
Decimals uint16 `json:"decimals"`
List []holdingBalance `json:"list"`
}
type getHoldersResponse = common.HttpResponse[getHoldersResult]
func (h *HttpHandler) GetHolders(ctx *fiber.Ctx) (err error) {
var req getHoldersRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
entry, err := h.usecase.GetTickEntryByTickAndHeight(ctx.UserContext(), req.Id, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetTickEntryByTickAndHeight")
}
holdingBalances, err := h.usecase.GetBalancesByTick(ctx.UserContext(), req.Id, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByTick")
}
list := make([]holdingBalance, 0, len(holdingBalances))
for _, balance := range holdingBalances {
address, err := btcutils.PkScriptToAddress(balance.PkScript, h.network)
if err != nil {
return errors.Wrapf(err, "can't convert pkscript(%x) to address", balance.PkScript)
}
percent := balance.OverallBalance.Div(entry.TotalSupply)
list = append(list, holdingBalance{
Address: address,
PkScript: hex.EncodeToString(balance.PkScript),
Amount: decimals.ToUint256(balance.OverallBalance, entry.Decimals),
Percent: percent.InexactFloat64(),
Extend: holdingBalanceExtend{
Transferable: decimals.ToUint256(balance.OverallBalance.Sub(balance.AvailableBalance), entry.Decimals),
Available: decimals.ToUint256(balance.AvailableBalance, entry.Decimals),
},
})
}
resp := getHoldersResponse{
Result: &getHoldersResult{
BlockHeight: blockHeight,
TotalSupply: decimals.ToUint256(entry.TotalSupply, entry.Decimals), // TODO: convert to wei
MintedAmount: decimals.ToUint256(entry.MintedAmount, entry.Decimals), // TODO: convert to wei
List: list,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,150 @@
package httphandler
import (
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/pkg/btcutils"
"github.com/gaze-network/indexer-network/pkg/decimals"
"github.com/gofiber/fiber/v2"
"github.com/holiman/uint256"
"github.com/samber/lo"
"golang.org/x/sync/errgroup"
)
type getTokenInfoRequest struct {
Id string `params:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getTokenInfoRequest) Validate() error {
var errList []error
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type tokenInfoExtend struct {
DeployedBy string `json:"deployedBy"`
LimitPerMint *uint256.Int `json:"limitPerMint"`
DeployInscriptionId string `json:"deployInscriptionId"`
DeployInscriptionNumber int64 `json:"deployInscriptionNumber"`
InscriptionStartNumber int64 `json:"inscriptionStartNumber"`
InscriptionEndNumber int64 `json:"inscriptionEndNumber"`
}
type getTokenInfoResult struct {
Id string `json:"id"`
Name string `json:"name"`
Symbol string `json:"symbol"`
TotalSupply *uint256.Int `json:"totalSupply"`
CirculatingSupply *uint256.Int `json:"circulatingSupply"`
MintedAmount *uint256.Int `json:"mintedAmount"`
BurnedAmount *uint256.Int `json:"burnedAmount"`
Decimals uint16 `json:"decimals"`
DeployedAt uint64 `json:"deployedAt"`
DeployedAtHeight uint64 `json:"deployedAtHeight"`
CompletedAt *uint64 `json:"completedAt"`
CompletedAtHeight *uint64 `json:"completedAtHeight"`
HoldersCount int `json:"holdersCount"`
Extend tokenInfoExtend `json:"extend"`
}
type getTokenInfoResponse = common.HttpResponse[getTokenInfoResult]
func (h *HttpHandler) GetTokenInfo(ctx *fiber.Ctx) (err error) {
var req getTokenInfoRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
group, groupctx := errgroup.WithContext(ctx.UserContext())
var (
entry *entity.TickEntry
firstInscriptionNumber, lastInscriptionNumber int64
deployEvent *entity.EventDeploy
holdingBalances []*entity.Balance
)
group.Go(func() error {
deployEvent, err = h.usecase.GetDeployEventByTick(groupctx, req.Id)
if err != nil {
return errors.Wrap(err, "error during GetDeployEventByTick")
}
return nil
})
group.Go(func() error {
// TODO: at block height to parameter.
firstInscriptionNumber, lastInscriptionNumber, err = h.usecase.GetFirstLastInscriptionNumberByTick(groupctx, req.Id)
if err != nil {
return errors.Wrap(err, "error during GetFirstLastInscriptionNumberByTick")
}
return nil
})
group.Go(func() error {
entry, err = h.usecase.GetTickEntryByTickAndHeight(groupctx, req.Id, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetTickEntryByTickAndHeight")
}
return nil
})
group.Go(func() error {
balances, err := h.usecase.GetBalancesByTick(groupctx, req.Id, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByRuneId")
}
holdingBalances = lo.Filter(balances, func(b *entity.Balance, _ int) bool {
return !b.OverallBalance.IsZero()
})
return nil
})
if err := group.Wait(); err != nil {
return errors.WithStack(err)
}
address, err := btcutils.PkScriptToAddress(deployEvent.PkScript, h.network)
if err != nil {
return errors.Wrapf(err, `error during PkScriptToAddress for pkscript: %x, network: %v`, deployEvent.PkScript, h.network)
}
resp := getTokenInfoResponse{
Result: &getTokenInfoResult{
Id: entry.Tick,
Name: entry.OriginalTick,
Symbol: entry.Tick,
TotalSupply: decimals.ToUint256(entry.TotalSupply, entry.Decimals),
CirculatingSupply: decimals.ToUint256(entry.MintedAmount.Sub(entry.BurnedAmount), entry.Decimals),
MintedAmount: decimals.ToUint256(entry.MintedAmount, entry.Decimals),
BurnedAmount: decimals.ToUint256(entry.BurnedAmount, entry.Decimals),
Decimals: entry.Decimals,
DeployedAt: uint64(entry.DeployedAt.Unix()),
DeployedAtHeight: entry.DeployedAtHeight,
CompletedAt: lo.Ternary(entry.CompletedAt.IsZero(), nil, lo.ToPtr(uint64(entry.CompletedAt.Unix()))),
CompletedAtHeight: lo.Ternary(entry.CompletedAtHeight == 0, nil, lo.ToPtr(entry.CompletedAtHeight)),
HoldersCount: len(holdingBalances),
Extend: tokenInfoExtend{
DeployedBy: address,
LimitPerMint: decimals.ToUint256(entry.LimitPerMint, entry.Decimals),
DeployInscriptionId: deployEvent.InscriptionId.String(),
DeployInscriptionNumber: deployEvent.InscriptionNumber,
InscriptionStartNumber: lo.Ternary(firstInscriptionNumber < 0, deployEvent.InscriptionNumber, firstInscriptionNumber),
InscriptionEndNumber: lo.Ternary(lastInscriptionNumber < 0, deployEvent.InscriptionNumber, lastInscriptionNumber),
},
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,454 @@
package httphandler
import (
"bytes"
"cmp"
"encoding/hex"
"slices"
"strings"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/pkg/btcutils"
"github.com/gaze-network/indexer-network/pkg/decimals"
"github.com/gofiber/fiber/v2"
"github.com/holiman/uint256"
"github.com/samber/lo"
"github.com/shopspring/decimal"
"golang.org/x/sync/errgroup"
)
var ops = []string{"inscribe-deploy", "inscribe-mint", "inscribe-transfer", "transfer-transfer"}
type getTransactionsRequest struct {
Wallet string `query:"wallet"`
Id string `query:"id"`
BlockHeight uint64 `query:"blockHeight"`
Op string `query:"op"`
}
func (r getTransactionsRequest) Validate() error {
var errList []error
if r.Op != "" {
if !lo.Contains(ops, r.Op) {
errList = append(errList, errors.Errorf("invalid 'op' value: %s, supported values: %s", r.Op, strings.Join(ops, ", ")))
}
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type txOpDeployArg struct {
Op string `json:"op"`
Tick string `json:"tick"`
Max decimal.Decimal `json:"max"`
Lim decimal.Decimal `json:"lim"`
Dec uint16 `json:"dec"`
SelfMint bool `json:"self_mint"`
}
type txOpGeneralArg struct {
Op string `json:"op"`
Tick string `json:"tick"`
Amount decimal.Decimal `json:"amt"`
}
type txOperation[T any] struct {
InscriptionId string `json:"inscriptionId"`
InscriptionNumber int64 `json:"inscriptionNumber"`
Op string `json:"op"`
Args T `json:"args"`
}
type txOperationsDeploy struct {
txOperation[txOpDeployArg]
Address string `json:"address"`
}
type txOperationsMint struct {
txOperation[txOpGeneralArg]
Address string `json:"address"`
}
type txOperationsInscribeTransfer struct {
txOperation[txOpGeneralArg]
Address string `json:"address"`
OutputIndex uint32 `json:"outputIndex"`
Sats uint64 `json:"sats"`
}
type txOperationsTransferTransfer struct {
txOperation[txOpGeneralArg]
FromAddress string `json:"fromAddress"`
ToAddress string `json:"toAddress"`
}
type transactionExtend struct {
Operations []any `json:"operations"`
}
type amountWithDecimal struct {
Amount *uint256.Int `json:"amount"`
Decimals uint16 `json:"decimals"`
}
type txInputOutput struct {
PkScript string `json:"pkScript"`
Address string `json:"address"`
Id string `json:"id"`
Amount *uint256.Int `json:"amount"`
Decimals uint16 `json:"decimals"`
Index uint32 `json:"index"`
}
type transaction struct {
TxHash chainhash.Hash `json:"txHash"`
BlockHeight uint64 `json:"blockHeight"`
Index uint32 `json:"index"`
Timestamp int64 `json:"timestamp"`
Inputs []txInputOutput `json:"inputs"`
Outputs []txInputOutput `json:"outputs"`
Mints map[string]amountWithDecimal `json:"mints"`
Burns map[string]amountWithDecimal `json:"burns"`
Extend transactionExtend `json:"extend"`
}
type getTransactionsResult struct {
List []transaction `json:"list"`
}
type getTransactionsResponse = common.HttpResponse[getTransactionsResult]
func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
var req getTransactionsRequest
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
var pkScript []byte
if req.Wallet != "" {
pkScript, err = btcutils.ToPkScript(h.network, req.Wallet)
if err != nil {
return errs.NewPublicError("unable to resolve pkscript from \"wallet\"")
}
}
blockHeight := req.BlockHeight
// set blockHeight to the latest block height blockHeight, pkScript, and runeId are not provided
if blockHeight == 0 && pkScript == nil && req.Id == "" {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
var (
deployEvents []*entity.EventDeploy
mintEvents []*entity.EventMint
transferTransferEvents []*entity.EventTransferTransfer
inscribeTransferEvents []*entity.EventInscribeTransfer
)
group, groupctx := errgroup.WithContext(ctx.UserContext())
if req.Op == "" || req.Op == "inscribe-deploy" {
group.Go(func() error {
events, err := h.usecase.GetDeployEvents(groupctx, pkScript, req.Id, blockHeight)
deployEvents = events
return errors.Wrap(err, "error during get inscribe-deploy events")
})
}
if req.Op == "" || req.Op == "inscribe-mint" {
group.Go(func() error {
events, err := h.usecase.GetMintEvents(groupctx, pkScript, req.Id, blockHeight)
mintEvents = events
return errors.Wrap(err, "error during get inscribe-mint events")
})
}
if req.Op == "" || req.Op == "transfer-transfer" {
group.Go(func() error {
events, err := h.usecase.GetTransferTransferEvents(groupctx, pkScript, req.Id, blockHeight)
transferTransferEvents = events
return errors.Wrap(err, "error during get transfer-transfer events")
})
}
if req.Op == "" || req.Op == "inscribe-transfer" {
group.Go(func() error {
events, err := h.usecase.GetInscribeTransferEvents(groupctx, pkScript, req.Id, blockHeight)
inscribeTransferEvents = events
return errors.Wrap(err, "error during get inscribe-transfer events")
})
}
if err := group.Wait(); err != nil {
return errors.WithStack(err)
}
allTicks := make([]string, 0, len(deployEvents)+len(mintEvents)+len(transferTransferEvents)+len(inscribeTransferEvents))
allTicks = append(allTicks, lo.Map(deployEvents, func(event *entity.EventDeploy, _ int) string { return event.Tick })...)
allTicks = append(allTicks, lo.Map(mintEvents, func(event *entity.EventMint, _ int) string { return event.Tick })...)
allTicks = append(allTicks, lo.Map(transferTransferEvents, func(event *entity.EventTransferTransfer, _ int) string { return event.Tick })...)
allTicks = append(allTicks, lo.Map(inscribeTransferEvents, func(event *entity.EventInscribeTransfer, _ int) string { return event.Tick })...)
entries, err := h.usecase.GetTickEntryByTickBatch(ctx.UserContext(), lo.Uniq(allTicks))
if err != nil {
return errors.Wrap(err, "error during GetTickEntryByTickBatch")
}
rawTxList := make([]transaction, 0, len(deployEvents)+len(mintEvents)+len(transferTransferEvents)+len(inscribeTransferEvents))
// Deploy events
for _, event := range deployEvents {
address, err := btcutils.PkScriptToAddress(event.PkScript, h.network)
if err != nil {
return errors.Wrapf(err, `error during PkScriptToAddress for deploy event %s, pkscript: %x, network: %v`, event.TxHash, event.PkScript, h.network)
}
respTx := transaction{
TxHash: event.TxHash,
BlockHeight: event.BlockHeight,
Index: event.TxIndex,
Timestamp: event.Timestamp.Unix(),
Mints: map[string]amountWithDecimal{},
Burns: map[string]amountWithDecimal{},
Extend: transactionExtend{
Operations: []any{
txOperationsDeploy{
txOperation: txOperation[txOpDeployArg]{
InscriptionId: event.InscriptionId.String(),
InscriptionNumber: event.InscriptionNumber,
Op: "deploy",
Args: txOpDeployArg{
Op: "deploy",
Tick: event.Tick,
Max: event.TotalSupply,
Lim: event.LimitPerMint,
Dec: event.Decimals,
SelfMint: event.IsSelfMint,
},
},
Address: address,
},
},
},
}
rawTxList = append(rawTxList, respTx)
}
// Mint events
for _, event := range mintEvents {
entry := entries[event.Tick]
address, err := btcutils.PkScriptToAddress(event.PkScript, h.network)
if err != nil {
return errors.Wrapf(err, `error during PkScriptToAddress for deploy event %s, pkscript: %x, network: %v`, event.TxHash, event.PkScript, h.network)
}
amtWei := decimals.ToUint256(event.Amount, entry.Decimals)
respTx := transaction{
TxHash: event.TxHash,
BlockHeight: event.BlockHeight,
Index: event.TxIndex,
Timestamp: event.Timestamp.Unix(),
Outputs: []txInputOutput{
{
PkScript: hex.EncodeToString(event.PkScript),
Address: address,
Id: event.Tick,
Amount: amtWei,
Decimals: entry.Decimals,
Index: event.TxIndex,
},
},
Mints: map[string]amountWithDecimal{
event.Tick: {
Amount: amtWei,
Decimals: entry.Decimals,
},
},
Extend: transactionExtend{
Operations: []any{
txOperationsMint{
txOperation: txOperation[txOpGeneralArg]{
InscriptionId: event.InscriptionId.String(),
InscriptionNumber: event.InscriptionNumber,
Op: "inscribe-mint",
Args: txOpGeneralArg{
Op: "inscribe-mint",
Tick: event.Tick,
Amount: event.Amount,
},
},
Address: address,
},
},
},
}
rawTxList = append(rawTxList, respTx)
}
// Inscribe Transfer events
for _, event := range inscribeTransferEvents {
address, err := btcutils.PkScriptToAddress(event.PkScript, h.network)
if err != nil {
return errors.Wrapf(err, `error during PkScriptToAddress for deploy event %s, pkscript: %x, network: %v`, event.TxHash, event.PkScript, h.network)
}
respTx := transaction{
TxHash: event.TxHash,
BlockHeight: event.BlockHeight,
Index: event.TxIndex,
Timestamp: event.Timestamp.Unix(),
Mints: map[string]amountWithDecimal{},
Burns: map[string]amountWithDecimal{},
Extend: transactionExtend{
Operations: []any{
txOperationsInscribeTransfer{
txOperation: txOperation[txOpGeneralArg]{
InscriptionId: event.InscriptionId.String(),
InscriptionNumber: event.InscriptionNumber,
Op: "inscribe-transfer",
Args: txOpGeneralArg{
Op: "inscribe-transfer",
Tick: event.Tick,
Amount: event.Amount,
},
},
Address: address,
OutputIndex: event.SatPoint.OutPoint.Index,
Sats: event.SatsAmount,
},
},
},
}
rawTxList = append(rawTxList, respTx)
}
// Transfer Transfer events
for _, event := range transferTransferEvents {
entry := entries[event.Tick]
amntWei := decimals.ToUint256(event.Amount, entry.Decimals)
fromAddress, err := btcutils.PkScriptToAddress(event.FromPkScript, h.network)
if err != nil {
return errors.Wrapf(err, `error during PkScriptToAddress for deploy event %s, pkscript: %x, network: %v`, event.TxHash, event.FromPkScript, h.network)
}
toAddress := ""
if len(event.ToPkScript) > 0 && !bytes.Equal(event.ToPkScript, []byte{0x6a}) {
toAddress, err = btcutils.PkScriptToAddress(event.ToPkScript, h.network)
if err != nil {
return errors.Wrapf(err, `error during PkScriptToAddress for deploy event %s, pkscript: %x, network: %v`, event.TxHash, event.FromPkScript, h.network)
}
}
// if toAddress is empty, it's a burn.
burns := map[string]amountWithDecimal{}
if len(toAddress) == 0 {
burns[event.Tick] = amountWithDecimal{
Amount: amntWei,
Decimals: entry.Decimals,
}
}
respTx := transaction{
TxHash: event.TxHash,
BlockHeight: event.BlockHeight,
Index: event.TxIndex,
Timestamp: event.Timestamp.Unix(),
Inputs: []txInputOutput{
{
PkScript: hex.EncodeToString(event.FromPkScript),
Address: fromAddress,
Id: event.Tick,
Amount: amntWei,
Decimals: entry.Decimals,
Index: event.ToOutputIndex,
},
},
Outputs: []txInputOutput{
{
PkScript: hex.EncodeToString(event.ToPkScript),
Address: fromAddress,
Id: event.Tick,
Amount: amntWei,
Decimals: entry.Decimals,
Index: event.ToOutputIndex,
},
},
Mints: map[string]amountWithDecimal{},
Burns: burns,
Extend: transactionExtend{
Operations: []any{
txOperationsTransferTransfer{
txOperation: txOperation[txOpGeneralArg]{
InscriptionId: event.InscriptionId.String(),
InscriptionNumber: event.InscriptionNumber,
Op: "transfer-transfer",
Args: txOpGeneralArg{
Op: "transfer-transfer",
Tick: event.Tick,
Amount: event.Amount,
},
},
FromAddress: fromAddress,
ToAddress: toAddress,
},
},
},
}
rawTxList = append(rawTxList, respTx)
}
// merge brc-20 tx events that have the same tx hash
txList := make([]transaction, 0, len(rawTxList))
groupedTxs := lo.GroupBy(rawTxList, func(tx transaction) chainhash.Hash { return tx.TxHash })
for _, txs := range groupedTxs {
tx := txs[0]
if tx.Mints == nil {
tx.Mints = map[string]amountWithDecimal{}
}
if tx.Burns == nil {
tx.Burns = map[string]amountWithDecimal{}
}
for _, tx2 := range txs[1:] {
tx.Inputs = append(tx.Inputs, tx2.Inputs...)
tx.Outputs = append(tx.Outputs, tx2.Outputs...)
if len(tx2.Mints) > 0 {
return errors.Wrap(errs.InvalidState, "transaction can't have multiple mints")
}
for tick, tx2Ammt := range tx2.Burns {
if txAmmt, ok := tx.Burns[tick]; ok {
tx.Burns[tick] = amountWithDecimal{
Amount: new(uint256.Int).Add(txAmmt.Amount, tx2Ammt.Amount),
Decimals: txAmmt.Decimals,
}
} else {
tx.Burns[tick] = tx2Ammt
}
}
tx.Extend.Operations = append(tx.Extend.Operations, tx2.Extend.Operations...)
}
slices.SortFunc(tx.Inputs, func(i, j txInputOutput) int {
return cmp.Compare(i.Index, j.Index)
})
slices.SortFunc(tx.Outputs, func(i, j txInputOutput) int {
return cmp.Compare(i.Index, j.Index)
})
txList = append(txList, tx)
}
// sort by block height ASC, then index ASC
slices.SortFunc(txList, func(t1, t2 transaction) int {
if t1.BlockHeight != t2.BlockHeight {
return int(t1.BlockHeight - t2.BlockHeight)
}
return int(t1.Index - t2.Index)
})
resp := getTransactionsResponse{
Result: &getTransactionsResult{
List: txList,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,136 @@
package httphandler
import (
"strings"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/pkg/btcutils"
"github.com/gaze-network/indexer-network/pkg/decimals"
"github.com/gofiber/fiber/v2"
"github.com/holiman/uint256"
"github.com/samber/lo"
)
type getUTXOsByAddressRequest struct {
Wallet string `params:"wallet"`
Id string `query:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getUTXOsByAddressRequest) Validate() error {
var errList []error
if r.Wallet == "" {
errList = append(errList, errors.New("'wallet' is required"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type transferableInscription struct {
Ticker string `json:"ticker"`
Amount *uint256.Int `json:"amount"`
Decimals uint16 `json:"decimals"`
}
type utxoExtend struct {
TransferableInscriptions []transferableInscription `json:"transferableInscriptions"`
}
type utxo struct {
TxHash chainhash.Hash `json:"txHash"`
OutputIndex uint32 `json:"outputIndex"`
Extend utxoExtend `json:"extend"`
}
type getUTXOsByAddressResult struct {
List []utxo `json:"list"`
BlockHeight uint64 `json:"blockHeight"`
}
type getUTXOsByAddressResponse = common.HttpResponse[getUTXOsByAddressResult]
func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
var req getUTXOsByAddressRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
pkScript, err := btcutils.ToPkScript(h.network, req.Wallet)
if err != nil {
return errs.NewPublicError("unable to resolve pkscript from \"wallet\"")
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
transferables, err := h.usecase.GetTransferableTransfersByPkScript(ctx.UserContext(), pkScript, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetTransferableTransfersByPkScript")
}
transferableTicks := lo.Map(transferables, func(src *entity.EventInscribeTransfer, _ int) string { return src.Tick })
entries, err := h.usecase.GetTickEntryByTickBatch(ctx.UserContext(), transferableTicks)
if err != nil {
return errors.Wrap(err, "error during GetTickEntryByTickBatch")
}
groupedtransferableTi := lo.GroupBy(transferables, func(src *entity.EventInscribeTransfer) wire.OutPoint { return src.SatPoint.OutPoint })
utxoList := make([]utxo, 0, len(groupedtransferableTi))
for outPoint, transferables := range groupedtransferableTi {
transferableInscriptions := make([]transferableInscription, 0, len(transferables))
for _, transferable := range transferables {
entry := entries[transferable.Tick]
transferableInscriptions = append(transferableInscriptions, transferableInscription{
Ticker: transferable.Tick,
Amount: decimals.ToUint256(transferable.Amount, entry.Decimals),
Decimals: entry.Decimals,
})
}
utxoList = append(utxoList, utxo{
TxHash: outPoint.Hash,
OutputIndex: outPoint.Index,
Extend: utxoExtend{
TransferableInscriptions: transferableInscriptions,
},
})
}
// TODO: filter tickers in pg query
// filter by req.Id if exists
{
utxoList = lo.Filter(utxoList, func(u utxo, _ int) bool {
for _, transferableInscriptions := range u.Extend.TransferableInscriptions {
if ok := strings.EqualFold(req.Id, transferableInscriptions.Ticker); ok {
return ok
}
}
return false
})
}
resp := getUTXOsByAddressResponse{
Result: &getUTXOsByAddressResult{
BlockHeight: blockHeight,
List: utxoList,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,18 @@
package httphandler
import (
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/brc20/internal/usecase"
)
type HttpHandler struct {
usecase *usecase.Usecase
network common.Network
}
func New(network common.Network, usecase *usecase.Usecase) *HttpHandler {
return &HttpHandler{
network: network,
usecase: usecase,
}
}

View File

@@ -0,0 +1,19 @@
package httphandler
import (
"github.com/gofiber/fiber/v2"
)
func (h *HttpHandler) Mount(router fiber.Router) error {
r := router.Group("/v2/brc20")
r.Post("/balances/wallet/batch", h.GetBalancesByAddressBatch)
r.Get("/balances/wallet/:wallet", h.GetBalancesByAddress)
r.Get("/transactions", h.GetTransactions)
r.Get("/holders/:id", h.GetHolders)
r.Get("/info/:id", h.GetTokenInfo)
r.Get("/utxos/wallet/:wallet", h.GetUTXOsByAddress)
r.Get("/block", h.GetCurrentBlock)
return nil
}

93
modules/brc20/brc20.go Normal file
View File

@@ -0,0 +1,93 @@
package brc20
import (
"context"
"strings"
"github.com/btcsuite/btcd/rpcclient"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/datasources"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/internal/postgres"
"github.com/gaze-network/indexer-network/modules/brc20/api/httphandler"
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
brc20postgres "github.com/gaze-network/indexer-network/modules/brc20/internal/repository/postgres"
"github.com/gaze-network/indexer-network/modules/brc20/internal/usecase"
"github.com/gaze-network/indexer-network/pkg/btcclient"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gofiber/fiber/v2"
"github.com/samber/do/v2"
"github.com/samber/lo"
)
func New(injector do.Injector) (indexer.IndexerWorker, error) {
ctx := do.MustInvoke[context.Context](injector)
conf := do.MustInvoke[config.Config](injector)
// reportingClient := do.MustInvoke[*reportingclient.ReportingClient](injector)
cleanupFuncs := make([]func(context.Context) error, 0)
var brc20Dg datagateway.BRC20DataGateway
var indexerInfoDg datagateway.IndexerInfoDataGateway
switch strings.ToLower(conf.Modules.BRC20.Database) {
case "postgresql", "postgres", "pg":
pg, err := postgres.NewPool(ctx, conf.Modules.BRC20.Postgres)
if err != nil {
if errors.Is(err, errs.InvalidArgument) {
return nil, errors.Wrap(err, "Invalid Postgres configuration for indexer")
}
return nil, errors.Wrap(err, "can't create Postgres connection pool")
}
cleanupFuncs = append(cleanupFuncs, func(ctx context.Context) error {
pg.Close()
return nil
})
brc20Repo := brc20postgres.NewRepository(pg)
brc20Dg = brc20Repo
indexerInfoDg = brc20Repo
default:
return nil, errors.Wrapf(errs.Unsupported, "%q database for indexer is not supported", conf.Modules.BRC20.Database)
}
var bitcoinDatasource datasources.Datasource[*types.Block]
var bitcoinClient btcclient.Contract
switch strings.ToLower(conf.Modules.BRC20.Datasource) {
case "bitcoin-node":
btcClient := do.MustInvoke[*rpcclient.Client](injector)
bitcoinNodeDatasource := datasources.NewBitcoinNode(btcClient)
bitcoinDatasource = bitcoinNodeDatasource
bitcoinClient = bitcoinNodeDatasource
default:
return nil, errors.Wrapf(errs.Unsupported, "%q datasource is not supported", conf.Modules.BRC20.Datasource)
}
processor, err := NewProcessor(brc20Dg, indexerInfoDg, bitcoinClient, conf.Network, cleanupFuncs)
if err != nil {
return nil, errors.WithStack(err)
}
if err := processor.VerifyStates(ctx); err != nil {
return nil, errors.WithStack(err)
}
// Mount API
apiHandlers := lo.Uniq(conf.Modules.BRC20.APIHandlers)
for _, handler := range apiHandlers {
switch handler { // TODO: support more handlers (e.g. gRPC)
case "http":
httpServer := do.MustInvoke[*fiber.App](injector)
uc := usecase.New(brc20Dg, bitcoinClient)
httpHandler := httphandler.New(conf.Network, uc)
if err := httpHandler.Mount(httpServer); err != nil {
return nil, errors.Wrap(err, "can't mount API")
}
logger.InfoContext(ctx, "Mounted HTTP handler")
default:
return nil, errors.Wrapf(errs.Unsupported, "%q API handler is not supported", handler)
}
}
indexer := indexer.New(processor, bitcoinDatasource)
return indexer, nil
}

View File

@@ -0,0 +1,10 @@
package config
import "github.com/gaze-network/indexer-network/internal/postgres"
type Config struct {
Datasource string `mapstructure:"datasource"` // Datasource to fetch bitcoin data for Meta-Protocol e.g. `bitcoin-node`
Database string `mapstructure:"database"` // Database to store data.
APIHandlers []string `mapstructure:"api_handlers"` // List of API handlers to enable. (e.g. `http`)
Postgres postgres.Config `mapstructure:"postgres"`
}

View File

@@ -0,0 +1,25 @@
package brc20
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/core/types"
)
const (
ClientVersion = "v0.0.1"
DBVersion = 1
EventHashVersion = 1
)
var startingBlockHeader = map[common.Network]types.BlockHeader{
common.NetworkMainnet: {
Height: 767429,
Hash: *utils.Must(chainhash.NewHashFromStr("00000000000000000002b35aef66eb15cd2b232a800f75a2f25cedca4cfe52c4")),
},
common.NetworkTestnet: {
Height: 2413342,
Hash: *utils.Must(chainhash.NewHashFromStr("00000000000022e97030b143af785de812f836dd0651b6ac2b7dd9e90dc9abf9")),
},
}

View File

@@ -0,0 +1,17 @@
BEGIN;
DROP TABLE IF EXISTS "brc20_indexer_states";
DROP TABLE IF EXISTS "brc20_indexed_blocks";
DROP TABLE IF EXISTS "brc20_processor_stats";
DROP TABLE IF EXISTS "brc20_tick_entries";
DROP TABLE IF EXISTS "brc20_tick_entry_states";
DROP TABLE IF EXISTS "brc20_event_deploys";
DROP TABLE IF EXISTS "brc20_event_mints";
DROP TABLE IF EXISTS "brc20_event_inscribe_transfers";
DROP TABLE IF EXISTS "brc20_event_transfer_transfers";
DROP TABLE IF EXISTS "brc20_balances";
DROP TABLE IF EXISTS "brc20_inscription_entries";
DROP TABLE IF EXISTS "brc20_inscription_entry_states";
DROP TABLE IF EXISTS "brc20_inscription_transfers";
COMMIT;

View File

@@ -0,0 +1,191 @@
BEGIN;
-- Indexer Client Information
CREATE TABLE IF NOT EXISTS "brc20_indexer_states" (
"id" BIGSERIAL PRIMARY KEY,
"client_version" TEXT NOT NULL,
"network" TEXT NOT NULL,
"db_version" INT NOT NULL,
"event_hash_version" INT NOT NULL,
"created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS brc20_indexer_state_created_at_idx ON "brc20_indexer_states" USING BTREE ("created_at" DESC);
-- BRC20 data
CREATE TABLE IF NOT EXISTS "brc20_indexed_blocks" (
"height" INT NOT NULL PRIMARY KEY,
"hash" TEXT NOT NULL,
"event_hash" TEXT NOT NULL,
"cumulative_event_hash" TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS "brc20_processor_stats" (
"block_height" INT NOT NULL PRIMARY KEY,
"cursed_inscription_count" INT NOT NULL,
"blessed_inscription_count" INT NOT NULL,
"lost_sats" BIGINT NOT NULL
);
CREATE TABLE IF NOT EXISTS "brc20_tick_entries" (
"tick" TEXT NOT NULL PRIMARY KEY, -- lowercase of original_tick
"original_tick" TEXT NOT NULL,
"total_supply" DECIMAL NOT NULL,
"decimals" SMALLINT NOT NULL,
"limit_per_mint" DECIMAL NOT NULL,
"is_self_mint" BOOLEAN NOT NULL,
"deploy_inscription_id" TEXT NOT NULL,
"deployed_at" TIMESTAMP NOT NULL,
"deployed_at_height" INT NOT NULL
);
CREATE TABLE IF NOT EXISTS "brc20_tick_entry_states" (
"tick" TEXT NOT NULL,
"block_height" INT NOT NULL,
"minted_amount" DECIMAL NOT NULL,
"burned_amount" DECIMAL NOT NULL,
"completed_at" TIMESTAMP,
"completed_at_height" INT,
PRIMARY KEY ("tick", "block_height")
);
CREATE TABLE IF NOT EXISTS "brc20_event_deploys" (
"id" BIGINT PRIMARY KEY NOT NULL,
"inscription_id" TEXT NOT NULL,
"inscription_number" BIGINT NOT NULL,
"tick" TEXT NOT NULL, -- lowercase of original_tick
"original_tick" TEXT NOT NULL,
"tx_hash" TEXT NOT NULL,
"block_height" INT NOT NULL,
"tx_index" INT NOT NULL,
"timestamp" TIMESTAMP NOT NULL,
"pkscript" TEXT NOT NULL,
"satpoint" TEXT NOT NULL,
"total_supply" DECIMAL NOT NULL,
"decimals" SMALLINT NOT NULL,
"limit_per_mint" DECIMAL NOT NULL,
"is_self_mint" BOOLEAN NOT NULL
);
CREATE INDEX IF NOT EXISTS brc20_event_deploys_block_height_idx ON "brc20_event_deploys" USING BTREE ("block_height");
CREATE TABLE IF NOT EXISTS "brc20_event_mints" (
"id" BIGINT PRIMARY KEY NOT NULL,
"inscription_id" TEXT NOT NULL,
"inscription_number" BIGINT NOT NULL,
"tick" TEXT NOT NULL, -- lowercase of original_tick
"original_tick" TEXT NOT NULL,
"tx_hash" TEXT NOT NULL,
"block_height" INT NOT NULL,
"tx_index" INT NOT NULL,
"timestamp" TIMESTAMP NOT NULL,
"pkscript" TEXT NOT NULL,
"satpoint" TEXT NOT NULL,
"amount" DECIMAL NOT NULL,
"parent_id" TEXT -- requires parent deploy inscription id if minting a self-mint ticker
);
CREATE INDEX IF NOT EXISTS brc20_event_mints_block_height_idx ON "brc20_event_mints" USING BTREE ("block_height");
CREATE TABLE IF NOT EXISTS "brc20_event_inscribe_transfers" (
"id" BIGINT PRIMARY KEY NOT NULL,
"inscription_id" TEXT NOT NULL,
"inscription_number" BIGINT NOT NULL,
"tick" TEXT NOT NULL, -- lowercase of original_tick
"original_tick" TEXT NOT NULL,
"tx_hash" TEXT NOT NULL,
"block_height" INT NOT NULL,
"tx_index" INT NOT NULL,
"timestamp" TIMESTAMP NOT NULL,
"pkscript" TEXT NOT NULL,
"satpoint" TEXT NOT NULL,
"output_index" INT NOT NULL,
"sats_amount" BIGINT NOT NULL,
"amount" DECIMAL NOT NULL
);
CREATE INDEX IF NOT EXISTS brc20_event_inscribe_transfers_block_height_idx ON "brc20_event_inscribe_transfers" USING BTREE ("block_height");
CREATE INDEX IF NOT EXISTS brc20_event_inscribe_transfers_inscription_id_idx ON "brc20_event_inscribe_transfers" USING BTREE ("inscription_id"); -- used for validating transfer transfer events
CREATE TABLE IF NOT EXISTS "brc20_event_transfer_transfers" (
"id" BIGINT PRIMARY KEY NOT NULL,
"inscription_id" TEXT NOT NULL,
"inscription_number" BIGINT NOT NULL,
"tick" TEXT NOT NULL, -- lowercase of original_tick
"original_tick" TEXT NOT NULL,
"tx_hash" TEXT NOT NULL,
"block_height" INT NOT NULL,
"tx_index" INT NOT NULL,
"timestamp" TIMESTAMP NOT NULL,
"from_pkscript" TEXT NOT NULL,
"from_satpoint" TEXT NOT NULL,
"from_input_index" INT NOT NULL,
"to_pkscript" TEXT NOT NULL,
"to_satpoint" TEXT NOT NULL,
"to_output_index" INT NOT NULL,
"spent_as_fee" BOOLEAN NOT NULL,
"amount" DECIMAL NOT NULL
);
CREATE INDEX IF NOT EXISTS brc20_event_transfer_transfers_block_height_idx ON "brc20_event_transfer_transfers" USING BTREE ("block_height");
CREATE TABLE IF NOT EXISTS "brc20_balances" (
"pkscript" TEXT NOT NULL,
"block_height" INT NOT NULL,
"tick" TEXT NOT NULL,
"overall_balance" DECIMAL NOT NULL, -- overall balance = available_balance + transferable_balance
"available_balance" DECIMAL NOT NULL,
PRIMARY KEY ("pkscript", "tick", "block_height")
);
CREATE TABLE IF NOT EXISTS "brc20_inscription_entries" (
"id" TEXT NOT NULL PRIMARY KEY,
"number" BIGINT NOT NULL,
"sequence_number" BIGINT NOT NULL,
"delegate" TEXT, -- delegate inscription id
"metadata" BYTEA,
"metaprotocol" TEXT,
"parents" TEXT[], -- parent inscription id, 0.14 only supports 1 parent per inscription
"pointer" BIGINT,
"content" JSONB, -- can use jsonb because we only track brc20 inscriptions
"content_encoding" TEXT,
"content_type" TEXT,
"cursed" BOOLEAN NOT NULL, -- inscriptions after jubilee are no longer cursed in 0.14, which affects inscription number
"cursed_for_brc20" BOOLEAN NOT NULL, -- however, inscriptions that would normally be cursed are still considered cursed for brc20
"created_at" TIMESTAMP NOT NULL,
"created_at_height" INT NOT NULL
);
CREATE INDEX IF NOT EXISTS brc20_inscription_entries_id_number_idx ON "brc20_inscription_entries" USING BTREE ("id", "number");
CREATE TABLE IF NOT EXISTS "brc20_inscription_entry_states" (
"id" TEXT NOT NULL,
"block_height" INT NOT NULL,
"transfer_count" INT NOT NULL,
PRIMARY KEY ("id", "block_height")
);
CREATE TABLE IF NOT EXISTS "brc20_inscription_transfers" (
"inscription_id" TEXT NOT NULL,
"inscription_number" BIGINT NOT NULL,
"inscription_sequence_number" BIGINT NOT NULL,
"block_height" INT NOT NULL,
"tx_index" INT NOT NULL,
"tx_hash" TEXT NOT NULL,
"from_input_index" INT NOT NULL,
"old_satpoint_tx_hash" TEXT,
"old_satpoint_out_idx" INT,
"old_satpoint_offset" BIGINT,
"new_satpoint_tx_hash" TEXT,
"new_satpoint_out_idx" INT,
"new_satpoint_offset" BIGINT,
"new_pkscript" TEXT NOT NULL,
"new_output_value" BIGINT NOT NULL,
"sent_as_fee" BOOLEAN NOT NULL,
"transfer_count" INT NOT NULL,
PRIMARY KEY ("inscription_id", "block_height", "tx_index")
);
CREATE INDEX IF NOT EXISTS brc20_inscription_transfers_block_height_tx_index_idx ON "brc20_inscription_transfers" USING BTREE ("block_height", "tx_index");
CREATE INDEX IF NOT EXISTS brc20_inscription_transfers_new_satpoint_idx ON "brc20_inscription_transfers" USING BTREE ("new_satpoint_tx_hash", "new_satpoint_out_idx", "new_satpoint_offset");
COMMIT;

View File

@@ -0,0 +1,263 @@
-- name: GetTransferableTransfersByPkScript :many
SELECT *
FROM "brc20_event_inscribe_transfers"
WHERE
pkscript = $1
AND "brc20_event_inscribe_transfers"."block_height" <= $2
AND NOT EXISTS (
SELECT NULL
FROM "brc20_event_transfer_transfers"
WHERE "brc20_event_transfer_transfers"."inscription_id" = "brc20_event_inscribe_transfers"."inscription_id"
)
ORDER BY "brc20_event_inscribe_transfers"."block_height" DESC;
-- name: GetBalancesByPkScript :many
WITH balances AS (
SELECT DISTINCT ON (tick) * FROM brc20_balances WHERE pkscript = $1 AND block_height <= $2 ORDER BY tick, overall_balance DESC
)
SELECT * FROM balances WHERE overall_balance > 0;
-- name: GetBalancesByTick :many
WITH balances AS (
SELECT DISTINCT ON (pkscript) * FROM brc20_balances WHERE tick = $1 AND block_height <= $2 ORDER BY pkscript, block_height DESC
)
SELECT * FROM balances WHERE overall_balance > 0;
-- name: GetDeployEventByTick :one
SELECT * FROM brc20_event_deploys WHERE tick = $1;
-- name: GetFirstLastInscriptionNumberByTick :one
SELECT
COALESCE(MIN("inscription_number"), -1)::BIGINT AS "first_inscription_number",
COALESCE(MAX("inscription_number"), -1)::BIGINT AS "last_inscription_number"
FROM (
SELECT inscription_number FROM "brc20_event_mints" WHERE "brc20_event_mints"."tick" = $1
UNION ALL
SELECT inscription_number FROM "brc20_event_inscribe_transfers" WHERE "brc20_event_inscribe_transfers"."tick" = $1
UNION ALL
SELECT inscription_number FROM "brc20_event_transfer_transfers" WHERE "brc20_event_transfer_transfers"."tick" = $1
) as events;
-- WITH
-- "first_mint" AS (SELECT "inscription_number" FROM "brc20_event_mints" WHERE "brc20_event_mints".tick = $1 ORDER BY "id" ASC LIMIT 1),
-- "latest_mint" AS (SELECT "inscription_number" FROM "brc20_event_mints" WHERE "brc20_event_mints".tick = $1 ORDER BY "id" DESC LIMIT 1),
-- "first_inscribe_transfer" AS (SELECT "inscription_number" FROM "brc20_event_inscribe_transfers" WHERE "brc20_event_inscribe_transfers".tick = $1 ORDER BY "id" ASC LIMIT 1),
-- "latest_inscribe_transfer" AS (SELECT "inscription_number" FROM "brc20_event_inscribe_transfers" WHERE "brc20_event_inscribe_transfers".tick = $1 ORDER BY "id" DESC LIMIT 1)
-- SELECT
-- COALESCE(
-- LEAST(
-- (SELECT "inscription_number" FROM "first_mint"),
-- (SELECT "inscription_number" FROM "first_inscribe_transfer")
-- ),
-- -1
-- ) AS "first_inscription_number",
-- COALESCE(
-- GREATEST(
-- (SELECT "inscription_number" FROM "latest_mint"),
-- (SELECT "inscription_number" FROM "latest_inscribe_transfer")
-- ),
-- -1
-- ) AS "last_inscription_number";
-- name: GetTickEntriesByTicksAndHeight :many
WITH "states" AS (
-- select latest state
SELECT DISTINCT ON ("tick") * FROM "brc20_tick_entry_states" WHERE "tick" = ANY(@ticks::text[]) AND block_height <= @height ORDER BY "tick", "block_height" DESC
)
SELECT * FROM "brc20_tick_entries"
LEFT JOIN "states" ON "brc20_tick_entries"."tick" = "states"."tick"
WHERE "brc20_tick_entries"."tick" = ANY(@ticks::text[]) AND deployed_at_height <= @height;;
-- name: GetDeployEvents :many
SELECT * FROM "brc20_event_deploys"
WHERE (
@filter_pk_script::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
OR pkscript = @pk_script
) AND (
@filter_ticker::BOOLEAN = FALSE -- if @filter_ticker is TRUE, apply ticker filter
OR tick = @ticker
) AND (
@block_height::INT = 0 OR block_height = @block_height::INT -- if @block_height > 0, apply block_height filter
);
-- name: GetMintEvents :many
SELECT * FROM "brc20_event_mints"
WHERE (
@filter_pk_script::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
OR pkscript = @pk_script
) AND (
@filter_ticker::BOOLEAN = FALSE -- if @filter_ticker is TRUE, apply ticker filter
OR tick = @ticker
) AND (
@block_height::INT = 0 OR block_height = @block_height::INT -- if @block_height > 0, apply block_height filter
);
-- name: GetInscribeTransferEvents :many
SELECT * FROM "brc20_event_inscribe_transfers"
WHERE (
@filter_pk_script::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
OR pkscript = @pk_script
) AND (
@filter_ticker::BOOLEAN = FALSE -- if @filter_ticker is TRUE, apply ticker filter
OR tick = @ticker
) AND (
@block_height::INT = 0 OR block_height = @block_height::INT -- if @block_height > 0, apply block_height filter
);
-- name: GetTransferTransferEvents :many
SELECT * FROM "brc20_event_transfer_transfers"
WHERE (
@filter_pk_script::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
OR from_pkscript = @pk_script
OR to_pkscript = @pk_script
) AND (
@filter_ticker::BOOLEAN = FALSE -- if @filter_ticker is TRUE, apply ticker filter
OR tick = @ticker
) AND (
@block_height::INT = 0 OR block_height = @block_height::INT -- if @block_height > 0, apply block_height filter
);
-- name: GetLatestIndexedBlock :one
SELECT * FROM "brc20_indexed_blocks" ORDER BY "height" DESC LIMIT 1;
-- name: GetIndexedBlockByHeight :one
SELECT * FROM "brc20_indexed_blocks" WHERE "height" = $1;
-- name: GetLatestProcessorStats :one
SELECT * FROM "brc20_processor_stats" ORDER BY "block_height" DESC LIMIT 1;
-- name: GetInscriptionTransfersInOutPoints :many
SELECT "it".*, "ie"."content" FROM (
SELECT
unnest(@tx_hash_arr::text[]) AS "tx_hash",
unnest(@tx_out_idx_arr::int[]) AS "tx_out_idx"
) "inputs"
INNER JOIN "brc20_inscription_transfers" it ON "inputs"."tx_hash" = "it"."new_satpoint_tx_hash" AND "inputs"."tx_out_idx" = "it"."new_satpoint_out_idx"
LEFT JOIN "brc20_inscription_entries" ie ON "it"."inscription_id" = "ie"."id";
;
-- name: GetInscriptionEntriesByIds :many
WITH "states" AS (
-- select latest state
SELECT DISTINCT ON ("id") * FROM "brc20_inscription_entry_states" WHERE "id" = ANY(@inscription_ids::text[]) ORDER BY "id", "block_height" DESC
)
SELECT * FROM "brc20_inscription_entries"
LEFT JOIN "states" ON "brc20_inscription_entries"."id" = "states"."id"
WHERE "brc20_inscription_entries"."id" = ANY(@inscription_ids::text[]);
-- name: GetTickEntriesByTicks :many
WITH "states" AS (
-- select latest state
SELECT DISTINCT ON ("tick") * FROM "brc20_tick_entry_states" WHERE "tick" = ANY(@ticks::text[]) ORDER BY "tick", "block_height" DESC
)
SELECT * FROM "brc20_tick_entries"
LEFT JOIN "states" ON "brc20_tick_entries"."tick" = "states"."tick"
WHERE "brc20_tick_entries"."tick" = ANY(@ticks::text[]);
-- name: GetInscriptionNumbersByIds :many
SELECT id, number FROM "brc20_inscription_entries" WHERE "id" = ANY(@inscription_ids::text[]);
-- name: GetInscriptionParentsByIds :many
SELECT id, parents FROM "brc20_inscription_entries" WHERE "id" = ANY(@inscription_ids::text[]);
-- name: GetLatestEventIds :one
WITH "latest_deploy_id" AS (
SELECT "id" FROM "brc20_event_deploys" ORDER BY "id" DESC LIMIT 1
),
"latest_mint_id" AS (
SELECT "id" FROM "brc20_event_mints" ORDER BY "id" DESC LIMIT 1
),
"latest_inscribe_transfer_id" AS (
SELECT "id" FROM "brc20_event_inscribe_transfers" ORDER BY "id" DESC LIMIT 1
),
"latest_transfer_transfer_id" AS (
SELECT "id" FROM "brc20_event_transfer_transfers" ORDER BY "id" DESC LIMIT 1
)
SELECT
COALESCE((SELECT "id" FROM "latest_deploy_id"), -1) AS "event_deploy_id",
COALESCE((SELECT "id" FROM "latest_mint_id"), -1) AS "event_mint_id",
COALESCE((SELECT "id" FROM "latest_inscribe_transfer_id"), -1) AS "event_inscribe_transfer_id",
COALESCE((SELECT "id" FROM "latest_transfer_transfer_id"), -1) AS "event_transfer_transfer_id";
-- name: GetBalancesBatchAtHeight :many
SELECT DISTINCT ON ("brc20_balances"."pkscript", "brc20_balances"."tick") "brc20_balances".* FROM "brc20_balances"
INNER JOIN (
SELECT
unnest(@pkscript_arr::text[]) AS "pkscript",
unnest(@tick_arr::text[]) AS "tick"
) "queries" ON "brc20_balances"."pkscript" = "queries"."pkscript" AND "brc20_balances"."tick" = "queries"."tick" AND "brc20_balances"."block_height" <= @block_height
ORDER BY "brc20_balances"."pkscript", "brc20_balances"."tick", "block_height" DESC;
-- name: GetEventInscribeTransfersByInscriptionIds :many
SELECT * FROM "brc20_event_inscribe_transfers" WHERE "inscription_id" = ANY(@inscription_ids::text[]);
-- name: CreateIndexedBlock :exec
INSERT INTO "brc20_indexed_blocks" ("height", "hash", "event_hash", "cumulative_event_hash") VALUES ($1, $2, $3, $4);
-- name: CreateProcessorStats :exec
INSERT INTO "brc20_processor_stats" ("block_height", "cursed_inscription_count", "blessed_inscription_count", "lost_sats") VALUES ($1, $2, $3, $4);
-- name: CreateTickEntries :batchexec
INSERT INTO "brc20_tick_entries" ("tick", "original_tick", "total_supply", "decimals", "limit_per_mint", "is_self_mint", "deploy_inscription_id", "deployed_at", "deployed_at_height") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9);
-- name: CreateTickEntryStates :batchexec
INSERT INTO "brc20_tick_entry_states" ("tick", "block_height", "minted_amount", "burned_amount", "completed_at", "completed_at_height") VALUES ($1, $2, $3, $4, $5, $6);
-- name: CreateInscriptionEntries :batchexec
INSERT INTO "brc20_inscription_entries" ("id", "number", "sequence_number", "delegate", "metadata", "metaprotocol", "parents", "pointer", "content", "content_encoding", "content_type", "cursed", "cursed_for_brc20", "created_at", "created_at_height") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15);
-- name: CreateInscriptionEntryStates :batchexec
INSERT INTO "brc20_inscription_entry_states" ("id", "block_height", "transfer_count") VALUES ($1, $2, $3);
-- name: CreateInscriptionTransfers :batchexec
INSERT INTO "brc20_inscription_transfers" ("inscription_id", "inscription_number", "inscription_sequence_number", "block_height", "tx_index", "tx_hash", "from_input_index", "old_satpoint_tx_hash", "old_satpoint_out_idx", "old_satpoint_offset", "new_satpoint_tx_hash", "new_satpoint_out_idx", "new_satpoint_offset", "new_pkscript", "new_output_value", "sent_as_fee", "transfer_count") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17);
-- name: CreateEventDeploys :batchexec
INSERT INTO "brc20_event_deploys" ("id", "inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "total_supply", "decimals", "limit_per_mint", "is_self_mint") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15);
-- name: CreateEventMints :batchexec
INSERT INTO "brc20_event_mints" ("id", "inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "amount", "parent_id") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13);
-- name: CreateEventInscribeTransfers :batchexec
INSERT INTO "brc20_event_inscribe_transfers" ("id", "inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "output_index", "sats_amount", "amount") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14);
-- name: CreateEventTransferTransfers :batchexec
INSERT INTO "brc20_event_transfer_transfers" ("id", "inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "from_pkscript", "from_satpoint", "from_input_index", "to_pkscript", "to_satpoint", "to_output_index", "spent_as_fee", "amount") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17);
-- name: CreateBalances :batchexec
INSERT INTO "brc20_balances" ("pkscript", "block_height", "tick", "overall_balance", "available_balance") VALUES ($1, $2, $3, $4, $5);
-- name: DeleteIndexedBlocksSinceHeight :exec
DELETE FROM "brc20_indexed_blocks" WHERE "height" >= $1;
-- name: DeleteProcessorStatsSinceHeight :exec
DELETE FROM "brc20_processor_stats" WHERE "block_height" >= $1;
-- name: DeleteTickEntriesSinceHeight :exec
DELETE FROM "brc20_tick_entries" WHERE "deployed_at_height" >= $1;
-- name: DeleteTickEntryStatesSinceHeight :exec
DELETE FROM "brc20_tick_entry_states" WHERE "block_height" >= $1;
-- name: DeleteEventDeploysSinceHeight :exec
DELETE FROM "brc20_event_deploys" WHERE "block_height" >= $1;
-- name: DeleteEventMintsSinceHeight :exec
DELETE FROM "brc20_event_mints" WHERE "block_height" >= $1;
-- name: DeleteEventInscribeTransfersSinceHeight :exec
DELETE FROM "brc20_event_inscribe_transfers" WHERE "block_height" >= $1;
-- name: DeleteEventTransferTransfersSinceHeight :exec
DELETE FROM "brc20_event_transfer_transfers" WHERE "block_height" >= $1;
-- name: DeleteBalancesSinceHeight :exec
DELETE FROM "brc20_balances" WHERE "block_height" >= $1;
-- name: DeleteInscriptionEntriesSinceHeight :exec
DELETE FROM "brc20_inscription_entries" WHERE "created_at_height" >= $1;
-- name: DeleteInscriptionEntryStatesSinceHeight :exec
DELETE FROM "brc20_inscription_entry_states" WHERE "block_height" >= $1;
-- name: DeleteInscriptionTransfersSinceHeight :exec
DELETE FROM "brc20_inscription_transfers" WHERE "block_height" >= $1;

View File

@@ -0,0 +1,5 @@
-- name: GetLatestIndexerState :one
SELECT * FROM brc20_indexer_states ORDER BY created_at DESC LIMIT 1;
-- name: CreateIndexerState :exec
INSERT INTO brc20_indexer_states (client_version, network, db_version, event_hash_version) VALUES ($1, $2, $3, $4);

View File

@@ -0,0 +1,69 @@
package brc20
import (
"encoding/hex"
"strconv"
"strings"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/samber/lo"
)
const eventHashSeparator = "|"
func getEventDeployString(event *entity.EventDeploy) string {
var sb strings.Builder
sb.WriteString("deploy-inscribe;")
sb.WriteString(event.InscriptionId.String() + ";")
sb.WriteString(hex.EncodeToString(event.PkScript) + ";")
sb.WriteString(event.Tick + ";")
sb.WriteString(event.OriginalTick + ";")
sb.WriteString(event.TotalSupply.StringFixed(int32(event.Decimals)) + ";")
sb.WriteString(strconv.Itoa(int(event.Decimals)) + ";")
sb.WriteString(event.LimitPerMint.StringFixed(int32(event.Decimals)) + ";")
sb.WriteString(lo.Ternary(event.IsSelfMint, "True", "False"))
return sb.String()
}
func getEventMintString(event *entity.EventMint, decimals uint16) string {
var sb strings.Builder
var parentId string
if event.ParentId != nil {
parentId = event.ParentId.String()
}
sb.WriteString("mint-inscribe;")
sb.WriteString(event.InscriptionId.String() + ";")
sb.WriteString(hex.EncodeToString(event.PkScript) + ";")
sb.WriteString(event.Tick + ";")
sb.WriteString(event.OriginalTick + ";")
sb.WriteString(event.Amount.StringFixed(int32(decimals)) + ";")
sb.WriteString(parentId)
return sb.String()
}
func getEventInscribeTransferString(event *entity.EventInscribeTransfer, decimals uint16) string {
var sb strings.Builder
sb.WriteString("inscribe-transfer;")
sb.WriteString(event.InscriptionId.String() + ";")
sb.WriteString(hex.EncodeToString(event.PkScript) + ";")
sb.WriteString(event.Tick + ";")
sb.WriteString(event.OriginalTick + ";")
sb.WriteString(event.Amount.StringFixed(int32(decimals)))
return sb.String()
}
func getEventTransferTransferString(event *entity.EventTransferTransfer, decimals uint16) string {
var sb strings.Builder
sb.WriteString("transfer-transfer;")
sb.WriteString(event.InscriptionId.String() + ";")
sb.WriteString(hex.EncodeToString(event.FromPkScript) + ";")
if event.SpentAsFee {
sb.WriteString(";")
} else {
sb.WriteString(hex.EncodeToString(event.ToPkScript) + ";")
}
sb.WriteString(event.Tick + ";")
sb.WriteString(event.OriginalTick + ";")
sb.WriteString(event.Amount.StringFixed(int32(decimals)))
return sb.String()
}

View File

@@ -0,0 +1,16 @@
package brc20
import "github.com/gaze-network/indexer-network/common"
var selfMintActivationHeights = map[common.Network]uint64{
common.NetworkMainnet: 837090,
common.NetworkTestnet: 837090,
}
func isSelfMintActivated(height uint64, network common.Network) bool {
activationHeight, ok := selfMintActivationHeights[network]
if !ok {
return false
}
return height >= activationHeight
}

View File

@@ -0,0 +1,21 @@
package brc20
type Operation string
const (
OperationDeploy Operation = "deploy"
OperationMint Operation = "mint"
OperationTransfer Operation = "transfer"
)
func (o Operation) IsValid() bool {
switch o {
case OperationDeploy, OperationMint, OperationTransfer:
return true
}
return false
}
func (o Operation) String() string {
return string(o)
}

View File

@@ -0,0 +1,173 @@
package brc20
import (
"encoding/json"
"math"
"math/big"
"strconv"
"strings"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/shopspring/decimal"
)
type rawPayload struct {
P string // required
Op string `json:"op"` // required
Tick string `json:"tick"` // required
// for deploy operations
Max string `json:"max"` // required
Lim *string `json:"lim"`
Dec *string `json:"dec"`
SelfMint *string `json:"self_mint"`
// for mint/transfer operations
Amt string `json:"amt"` // required
}
type Payload struct {
Transfer *entity.InscriptionTransfer
P string
Op Operation
Tick string // lower-cased tick
OriginalTick string // original tick before lower-cased
// for deploy operations
Max decimal.Decimal
Lim decimal.Decimal
Dec uint16
SelfMint bool
// for mint/transfer operations
Amt decimal.Decimal
}
var (
ErrInvalidProtocol = errors.New("invalid protocol: must be 'brc20'")
ErrInvalidOperation = errors.New("invalid operation for brc20: must be one of 'deploy', 'mint', or 'transfer'")
ErrInvalidTickLength = errors.New("invalid tick length: must be 4 or 5 bytes")
ErrEmptyTick = errors.New("empty tick")
ErrEmptyMax = errors.New("empty max")
ErrInvalidMax = errors.New("invalid max")
ErrInvalidDec = errors.New("invalid dec")
ErrInvalidSelfMint = errors.New("invalid self_mint")
ErrInvalidAmt = errors.New("invalid amt")
ErrNumberOverflow = errors.New("number overflow: max value is (2^64-1)")
)
func ParsePayload(transfer *entity.InscriptionTransfer) (*Payload, error) {
var p rawPayload
err := json.Unmarshal(transfer.Content, &p)
if err != nil {
return nil, errors.Wrap(err, "failed to unmarshal payload as json")
}
if p.P != "brc-20" {
return nil, errors.WithStack(ErrInvalidProtocol)
}
if !Operation(p.Op).IsValid() {
return nil, errors.WithStack(ErrInvalidOperation)
}
if p.Tick == "" {
return nil, errors.WithStack(ErrEmptyTick)
}
if len(p.Tick) != 4 && len(p.Tick) != 5 {
return nil, errors.WithStack(ErrInvalidTickLength)
}
parsed := Payload{
Transfer: transfer,
P: p.P,
Op: Operation(p.Op),
Tick: strings.ToLower(p.Tick),
OriginalTick: p.Tick,
}
switch parsed.Op {
case OperationDeploy:
if p.Max == "" {
return nil, errors.WithStack(ErrEmptyMax)
}
var rawDec string
if p.Dec != nil {
rawDec = *p.Dec
}
if rawDec == "" {
rawDec = "18"
}
dec, ok := strconv.ParseUint(rawDec, 10, 16)
if ok != nil {
return nil, errors.Wrap(ok, "failed to parse dec")
}
if dec > 18 {
return nil, errors.WithStack(ErrInvalidDec)
}
parsed.Dec = uint16(dec)
max, err := parseNumericString(p.Max, dec)
if err != nil {
return nil, errors.Wrap(err, "failed to parse max")
}
parsed.Max = max
limit := max
if p.Lim != nil {
limit, err = parseNumericString(*p.Lim, dec)
if err != nil {
return nil, errors.Wrap(err, "failed to parse lim")
}
}
parsed.Lim = limit
// 5-bytes ticks are self-mint only
if len(parsed.OriginalTick) == 5 {
if p.SelfMint == nil || *p.SelfMint != "true" {
return nil, errors.WithStack(ErrInvalidSelfMint)
}
// infinite mints if tick is self-mint, and max is set to 0
if parsed.Max.IsZero() {
parsed.Max = maxNumber
if parsed.Lim.IsZero() {
parsed.Lim = maxNumber
}
}
}
if parsed.Max.IsZero() {
return nil, errors.WithStack(ErrInvalidMax)
}
case OperationMint, OperationTransfer:
if p.Amt == "" {
return nil, errors.WithStack(ErrInvalidAmt)
}
// NOTE: check tick decimals after parsing payload
amt, err := parseNumericString(p.Amt, 18)
if err != nil {
return nil, errors.Wrap(err, "failed to parse amt")
}
parsed.Amt = amt
default:
return nil, errors.WithStack(ErrInvalidOperation)
}
return &parsed, nil
}
// max number for all numeric fields (except dec) is (2^64-1)
var (
maxNumber = decimal.NewFromBigInt(new(big.Int).SetUint64(math.MaxUint64), 0)
)
func parseNumericString(s string, maxDec uint64) (decimal.Decimal, error) {
d, err := decimal.NewFromString(s)
if err != nil {
return decimal.Decimal{}, errors.Wrap(err, "failed to parse decimal number")
}
if -d.Exponent() > int32(maxDec) {
return decimal.Decimal{}, errors.Errorf("cannot parse decimal number: too many decimal points: expected %d got %d", maxDec, d.Exponent())
}
if d.GreaterThan(maxNumber) {
return decimal.Decimal{}, errors.WithStack(ErrNumberOverflow)
}
return d, nil
}

View File

@@ -0,0 +1,81 @@
package datagateway
import (
"context"
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
)
type BRC20DataGateway interface {
BRC20ReaderDataGateway
BRC20WriterDataGateway
// BeginBRC20Tx returns a new BRC20DataGateway with transaction enabled. All write operations performed in this datagateway must be committed to persist changes.
BeginBRC20Tx(ctx context.Context) (BRC20DataGatewayWithTx, error)
}
type BRC20DataGatewayWithTx interface {
BRC20DataGateway
Tx
}
type BRC20ReaderDataGateway interface {
GetLatestBlock(ctx context.Context) (types.BlockHeader, error)
GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error)
GetProcessorStats(ctx context.Context) (*entity.ProcessorStats, error)
GetInscriptionTransfersInOutPoints(ctx context.Context, outPoints []wire.OutPoint) (map[ordinals.SatPoint][]*entity.InscriptionTransfer, error)
GetInscriptionEntriesByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*ordinals.InscriptionEntry, error)
GetInscriptionNumbersByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]int64, error)
GetInscriptionParentsByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]ordinals.InscriptionId, error)
GetBalancesBatchAtHeight(ctx context.Context, blockHeight uint64, queries []GetBalancesBatchAtHeightQuery) (map[string]map[string]*entity.Balance, error)
GetTickEntriesByTicks(ctx context.Context, ticks []string) (map[string]*entity.TickEntry, error)
GetEventInscribeTransfersByInscriptionIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*entity.EventInscribeTransfer, error)
GetLatestEventId(ctx context.Context) (int64, error)
GetBalancesByTick(ctx context.Context, tick string, blockHeight uint64) ([]*entity.Balance, error)
GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[string]*entity.Balance, error)
GetTransferableTransfersByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.EventInscribeTransfer, error)
GetDeployEventByTick(ctx context.Context, tick string) (*entity.EventDeploy, error)
GetFirstLastInscriptionNumberByTick(ctx context.Context, tick string) (first, last int64, err error)
GetDeployEvents(ctx context.Context, pkScript []byte, tick string, height uint64) ([]*entity.EventDeploy, error)
GetMintEvents(ctx context.Context, pkScript []byte, tick string, height uint64) ([]*entity.EventMint, error)
GetInscribeTransferEvents(ctx context.Context, pkScript []byte, tick string, height uint64) ([]*entity.EventInscribeTransfer, error)
GetTransferTransferEvents(ctx context.Context, pkScript []byte, tick string, height uint64) ([]*entity.EventTransferTransfer, error)
}
type BRC20WriterDataGateway interface {
CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error
CreateProcessorStats(ctx context.Context, stats *entity.ProcessorStats) error
CreateTickEntries(ctx context.Context, blockHeight uint64, entries []*entity.TickEntry) error
CreateTickEntryStates(ctx context.Context, blockHeight uint64, entryStates []*entity.TickEntry) error
CreateInscriptionEntries(ctx context.Context, blockHeight uint64, entries []*ordinals.InscriptionEntry) error
CreateInscriptionEntryStates(ctx context.Context, blockHeight uint64, entryStates []*ordinals.InscriptionEntry) error
CreateInscriptionTransfers(ctx context.Context, transfers []*entity.InscriptionTransfer) error
CreateEventDeploys(ctx context.Context, events []*entity.EventDeploy) error
CreateEventMints(ctx context.Context, events []*entity.EventMint) error
CreateEventInscribeTransfers(ctx context.Context, events []*entity.EventInscribeTransfer) error
CreateEventTransferTransfers(ctx context.Context, events []*entity.EventTransferTransfer) error
CreateBalances(ctx context.Context, balances []*entity.Balance) error
// used for revert data
DeleteIndexedBlocksSinceHeight(ctx context.Context, height uint64) error
DeleteProcessorStatsSinceHeight(ctx context.Context, height uint64) error
DeleteTickEntriesSinceHeight(ctx context.Context, height uint64) error
DeleteTickEntryStatesSinceHeight(ctx context.Context, height uint64) error
DeleteEventDeploysSinceHeight(ctx context.Context, height uint64) error
DeleteEventMintsSinceHeight(ctx context.Context, height uint64) error
DeleteEventInscribeTransfersSinceHeight(ctx context.Context, height uint64) error
DeleteEventTransferTransfersSinceHeight(ctx context.Context, height uint64) error
DeleteBalancesSinceHeight(ctx context.Context, height uint64) error
DeleteInscriptionEntriesSinceHeight(ctx context.Context, height uint64) error
DeleteInscriptionEntryStatesSinceHeight(ctx context.Context, height uint64) error
DeleteInscriptionTransfersSinceHeight(ctx context.Context, height uint64) error
}
type GetBalancesBatchAtHeightQuery struct {
PkScriptHex string
Tick string
BlockHeight uint64
}

View File

@@ -0,0 +1,12 @@
package datagateway
import (
"context"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
)
type IndexerInfoDataGateway interface {
GetLatestIndexerState(ctx context.Context) (entity.IndexerState, error)
CreateIndexerState(ctx context.Context, state entity.IndexerState) error
}

View File

@@ -0,0 +1,12 @@
package datagateway
import "context"
type Tx interface {
// Commit commits the DB transaction. All changes made after Begin() will be persisted. Calling Commit() will close the current transaction.
// If Commit() is called without a prior Begin(), it must be a no-op.
Commit(ctx context.Context) error
// Rollback rolls back the DB transaction. All changes made after Begin() will be discarded.
// Rollback() must be safe to call even if no transaction is active. Hence, a defer Rollback() is safe, even if Commit() was called prior with non-error conditions.
Rollback(ctx context.Context) error
}

View File

@@ -0,0 +1,11 @@
package entity
import "github.com/shopspring/decimal"
type Balance struct {
PkScript []byte
Tick string
BlockHeight uint64
OverallBalance decimal.Decimal
AvailableBalance decimal.Decimal
}

View File

@@ -0,0 +1,28 @@
package entity
import (
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/shopspring/decimal"
)
type EventDeploy struct {
Id int64
InscriptionId ordinals.InscriptionId
InscriptionNumber int64
Tick string
OriginalTick string
TxHash chainhash.Hash
BlockHeight uint64
TxIndex uint32
Timestamp time.Time
PkScript []byte
SatPoint ordinals.SatPoint
TotalSupply decimal.Decimal
Decimals uint16
LimitPerMint decimal.Decimal
IsSelfMint bool
}

View File

@@ -0,0 +1,27 @@
package entity
import (
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/shopspring/decimal"
)
type EventInscribeTransfer struct {
Id int64
InscriptionId ordinals.InscriptionId
InscriptionNumber int64
Tick string
OriginalTick string
TxHash chainhash.Hash
BlockHeight uint64
TxIndex uint32
Timestamp time.Time
PkScript []byte
SatPoint ordinals.SatPoint
OutputIndex uint32
SatsAmount uint64
Amount decimal.Decimal
}

View File

@@ -0,0 +1,26 @@
package entity
import (
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/shopspring/decimal"
)
type EventMint struct {
Id int64
InscriptionId ordinals.InscriptionId
InscriptionNumber int64
Tick string
OriginalTick string
TxHash chainhash.Hash
BlockHeight uint64
TxIndex uint32
Timestamp time.Time
PkScript []byte
SatPoint ordinals.SatPoint
Amount decimal.Decimal
ParentId *ordinals.InscriptionId
}

View File

@@ -0,0 +1,30 @@
package entity
import (
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/shopspring/decimal"
)
type EventTransferTransfer struct {
Id int64
InscriptionId ordinals.InscriptionId
InscriptionNumber int64
Tick string
OriginalTick string
TxHash chainhash.Hash
BlockHeight uint64
TxIndex uint32
Timestamp time.Time
FromPkScript []byte
FromSatPoint ordinals.SatPoint
FromInputIndex uint32
ToPkScript []byte
ToSatPoint ordinals.SatPoint
ToOutputIndex uint32
SpentAsFee bool
Amount decimal.Decimal
}

View File

@@ -0,0 +1,31 @@
package entity
import (
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
)
type OriginOld struct {
Content []byte
OldSatPoint ordinals.SatPoint
InputIndex uint32
}
type OriginNew struct {
Inscription ordinals.Inscription
Parent *ordinals.InscriptionId
Pointer *uint64
Fee uint64
Cursed bool
CursedForBRC20 bool
Hidden bool
Reinscription bool
Unbound bool
}
type Flotsam struct {
Tx *types.Transaction
OriginOld *OriginOld // OriginOld and OriginNew are mutually exclusive
OriginNew *OriginNew // OriginOld and OriginNew are mutually exclusive
Offset uint64
InscriptionId ordinals.InscriptionId
}

View File

@@ -0,0 +1,10 @@
package entity
import "github.com/btcsuite/btcd/chaincfg/chainhash"
type IndexedBlock struct {
Height uint64
Hash chainhash.Hash
EventHash []byte
CumulativeEventHash []byte
}

View File

@@ -0,0 +1,15 @@
package entity
import (
"time"
"github.com/gaze-network/indexer-network/common"
)
type IndexerState struct {
CreatedAt time.Time
ClientVersion string
DBVersion int32
EventHashVersion int32
Network common.Network
}

View File

@@ -0,0 +1,23 @@
package entity
import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
)
type InscriptionTransfer struct {
InscriptionId ordinals.InscriptionId
InscriptionNumber int64
InscriptionSequenceNumber uint64
BlockHeight uint64
TxIndex uint32
TxHash chainhash.Hash
Content []byte
FromInputIndex uint32
OldSatPoint ordinals.SatPoint
NewSatPoint ordinals.SatPoint
NewPkScript []byte
NewOutputValue uint64
SentAsFee bool
TransferCount uint32
}

View File

@@ -0,0 +1,8 @@
package entity
type ProcessorStats struct {
BlockHeight uint64
CursedInscriptionCount uint64
BlessedInscriptionCount uint64
LostSats uint64
}

View File

@@ -0,0 +1,25 @@
package entity
import (
"time"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/shopspring/decimal"
)
type TickEntry struct {
Tick string
OriginalTick string
TotalSupply decimal.Decimal
Decimals uint16
LimitPerMint decimal.Decimal
IsSelfMint bool
DeployInscriptionId ordinals.InscriptionId
DeployedAt time.Time
DeployedAtHeight uint64
MintedAmount decimal.Decimal
BurnedAmount decimal.Decimal
CompletedAt time.Time
CompletedAtHeight uint64
}

View File

@@ -0,0 +1,285 @@
package ordinals
import (
"bytes"
"encoding/binary"
"github.com/btcsuite/btcd/txscript"
"github.com/gaze-network/indexer-network/core/types"
"github.com/samber/lo"
)
type Envelope struct {
Inscription Inscription
InputIndex uint32 // Index of input that contains the envelope
Offset int // Number of envelope in the input
PushNum bool // True if envelope contains pushnum opcodes
Stutter bool // True if envelope matches stuttering curse structure
IncompleteField bool // True if payload is incomplete
DuplicateField bool // True if payload contains duplicated field
UnrecognizedEvenField bool // True if payload contains unrecognized even field
}
func ParseEnvelopesFromTx(tx *types.Transaction) []*Envelope {
envelopes := make([]*Envelope, 0)
for i, txIn := range tx.TxIn {
tapScript, ok := extractTapScript(txIn.Witness)
if !ok {
continue
}
newEnvelopes := envelopesFromTapScript(tapScript, i)
envelopes = append(envelopes, newEnvelopes...)
}
return envelopes
}
var protocolId = []byte("ord")
func envelopesFromTapScript(tokenizer txscript.ScriptTokenizer, inputIndex int) []*Envelope {
envelopes := make([]*Envelope, 0)
var stuttered bool
for tokenizer.Next() {
if tokenizer.Err() != nil {
break
}
if tokenizer.Opcode() == txscript.OP_FALSE {
envelope, stutter := envelopeFromTokenizer(tokenizer, inputIndex, len(envelopes), stuttered)
if envelope != nil {
envelopes = append(envelopes, envelope)
} else {
stuttered = stutter
}
}
}
if tokenizer.Err() != nil {
return envelopes
}
return envelopes
}
func envelopeFromTokenizer(tokenizer txscript.ScriptTokenizer, inputIndex int, offset int, stuttered bool) (*Envelope, bool) {
tokenizer.Next()
if tokenizer.Opcode() != txscript.OP_IF {
return nil, tokenizer.Opcode() == txscript.OP_FALSE
}
tokenizer.Next()
if !bytes.Equal(tokenizer.Data(), protocolId) {
return nil, tokenizer.Opcode() == txscript.OP_FALSE
}
var pushNum bool
payload := make([][]byte, 0)
for tokenizer.Next() {
if tokenizer.Err() != nil {
return nil, false
}
opCode := tokenizer.Opcode()
if opCode == txscript.OP_ENDIF {
break
}
switch opCode {
case txscript.OP_1NEGATE:
pushNum = true
payload = append(payload, []byte{0x81})
case txscript.OP_1:
pushNum = true
payload = append(payload, []byte{0x01})
case txscript.OP_2:
pushNum = true
payload = append(payload, []byte{0x02})
case txscript.OP_3:
pushNum = true
payload = append(payload, []byte{0x03})
case txscript.OP_4:
pushNum = true
payload = append(payload, []byte{0x04})
case txscript.OP_5:
pushNum = true
payload = append(payload, []byte{0x05})
case txscript.OP_6:
pushNum = true
payload = append(payload, []byte{0x06})
case txscript.OP_7:
pushNum = true
payload = append(payload, []byte{0x07})
case txscript.OP_8:
pushNum = true
payload = append(payload, []byte{0x08})
case txscript.OP_9:
pushNum = true
payload = append(payload, []byte{0x09})
case txscript.OP_10:
pushNum = true
payload = append(payload, []byte{0x10})
case txscript.OP_11:
pushNum = true
payload = append(payload, []byte{0x11})
case txscript.OP_12:
pushNum = true
payload = append(payload, []byte{0x12})
case txscript.OP_13:
pushNum = true
payload = append(payload, []byte{0x13})
case txscript.OP_14:
pushNum = true
payload = append(payload, []byte{0x14})
case txscript.OP_15:
pushNum = true
payload = append(payload, []byte{0x15})
case txscript.OP_16:
pushNum = true
payload = append(payload, []byte{0x16})
case txscript.OP_0:
// OP_0 is a special case, it is accepted in ord's implementation
payload = append(payload, []byte{})
default:
data := tokenizer.Data()
if data == nil {
return nil, false
}
payload = append(payload, data)
}
}
// incomplete envelope
if tokenizer.Done() && tokenizer.Opcode() != txscript.OP_ENDIF {
return nil, false
}
// find body (empty data push in even index payload)
bodyIndex := -1
for i, value := range payload {
if i%2 == 0 && len(value) == 0 {
bodyIndex = i
break
}
}
var fieldPayloads [][]byte
var body []byte
if bodyIndex != -1 {
fieldPayloads = payload[:bodyIndex]
body = lo.Flatten(payload[bodyIndex+1:])
} else {
fieldPayloads = payload[:]
}
var incompleteField bool
fields := make(Fields)
for _, chunk := range lo.Chunk(fieldPayloads, 2) {
if len(chunk) != 2 {
incompleteField = true
break
}
key := chunk[0]
value := chunk[1]
// key cannot be empty, as checked by bodyIndex above
tag := Tag(key[0])
fields[tag] = append(fields[tag], value)
}
var duplicateField bool
for _, values := range fields {
if len(values) > 1 {
duplicateField = true
break
}
}
rawContentEncoding := fields.Take(TagContentEncoding)
rawContentType := fields.Take(TagContentType)
rawDelegate := fields.Take(TagDelegate)
rawMetadata := fields.Take(TagMetadata)
rawMetaprotocol := fields.Take(TagMetaprotocol)
rawParent := fields.Take(TagParent)
rawPointer := fields.Take(TagPointer)
unrecognizedEvenField := lo.SomeBy(lo.Keys(fields), func(key Tag) bool {
return key%2 == 0
})
var delegate, parent *InscriptionId
inscriptionId, err := NewInscriptionIdFromString(string(rawDelegate))
if err == nil {
delegate = &inscriptionId
}
inscriptionId, err = NewInscriptionIdFromString(string(rawParent))
if err == nil {
parent = &inscriptionId
}
var pointer *uint64
// if rawPointer is not nil and fits in uint64
if rawPointer != nil && (len(rawPointer) <= 8 || lo.EveryBy(rawPointer[8:], func(value byte) bool {
return value != 0
})) {
// pad zero bytes to 8 bytes
if len(rawPointer) < 8 {
rawPointer = append(rawPointer, make([]byte, 8-len(rawPointer))...)
}
pointer = lo.ToPtr(binary.LittleEndian.Uint64(rawPointer))
}
inscription := Inscription{
Content: body,
ContentEncoding: string(rawContentEncoding),
ContentType: string(rawContentType),
Delegate: delegate,
Metadata: rawMetadata,
Metaprotocol: string(rawMetaprotocol),
Parent: parent,
Pointer: pointer,
}
return &Envelope{
Inscription: inscription,
InputIndex: uint32(inputIndex),
Offset: offset,
PushNum: pushNum,
Stutter: stuttered,
IncompleteField: incompleteField,
DuplicateField: duplicateField,
UnrecognizedEvenField: unrecognizedEvenField,
}, false
}
type Fields map[Tag][][]byte
func (fields Fields) Take(tag Tag) []byte {
values, ok := fields[tag]
if !ok {
return nil
}
if tag.IsChunked() {
delete(fields, tag)
return lo.Flatten(values)
} else {
first := values[0]
values = values[1:]
if len(values) == 0 {
delete(fields, tag)
} else {
fields[tag] = values
}
return first
}
}
func extractTapScript(witness [][]byte) (txscript.ScriptTokenizer, bool) {
witness = removeAnnexFromWitness(witness)
if len(witness) < 2 {
return txscript.ScriptTokenizer{}, false
}
script := witness[len(witness)-2]
return txscript.MakeScriptTokenizer(0, script), true
}
func removeAnnexFromWitness(witness [][]byte) [][]byte {
if len(witness) >= 2 && len(witness[len(witness)-1]) > 0 && witness[len(witness)-1][0] == txscript.TaprootAnnexTag {
return witness[:len(witness)-1]
}
return witness
}

View File

@@ -0,0 +1,742 @@
package ordinals
import (
"testing"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/core/types"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
)
func TestParseEnvelopesFromTx(t *testing.T) {
testTx := func(t *testing.T, tx *types.Transaction, expected []*Envelope) {
t.Helper()
envelopes := ParseEnvelopesFromTx(tx)
assert.Equal(t, expected, envelopes)
}
testParseWitness := func(t *testing.T, tapScript []byte, expected []*Envelope) {
t.Helper()
tx := &types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*types.TxIn{
{
Witness: wire.TxWitness{
tapScript,
{},
},
},
},
}
testTx(t, tx, expected)
}
testEnvelope := func(t *testing.T, payload [][]byte, expected []*Envelope) {
t.Helper()
builder := NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF)
for _, data := range payload {
builder.AddData(data)
}
builder.AddOp(txscript.OP_ENDIF)
script, err := builder.Script()
assert.NoError(t, err)
testParseWitness(
t,
script,
expected,
)
}
t.Run("empty_witness", func(t *testing.T) {
testTx(t, &types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*types.TxIn{{
Witness: wire.TxWitness{},
}},
}, []*Envelope{})
})
t.Run("ignore_key_path_spends", func(t *testing.T) {
testTx(t, &types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*types.TxIn{{
Witness: wire.TxWitness{
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script()),
},
}},
}, []*Envelope{})
})
t.Run("ignore_key_path_spends_with_annex", func(t *testing.T) {
testTx(t, &types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*types.TxIn{{
Witness: wire.TxWitness{
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script()),
[]byte{txscript.TaprootAnnexTag},
},
}},
}, []*Envelope{})
})
t.Run("parse_from_tapscript", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script()),
[]*Envelope{{}},
)
})
t.Run("ignore_unparsable_scripts", func(t *testing.T) {
script := utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script())
script = append(script, 0x01)
testParseWitness(
t,
script,
[]*Envelope{
{},
},
)
})
t.Run("no_inscription", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
Script()),
[]*Envelope{},
)
})
t.Run("duplicate_field", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagNop.Bytes(),
{},
TagNop.Bytes(),
{},
},
[]*Envelope{
{
DuplicateField: true,
},
},
)
})
t.Run("with_content_type", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagBody.Bytes(),
[]byte("ord"),
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("ord"),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("with_content_encoding", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagContentEncoding.Bytes(),
[]byte("br"),
TagBody.Bytes(),
[]byte("ord"),
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("ord"),
ContentType: "text/plain;charset=utf-8",
ContentEncoding: "br",
},
},
},
)
})
t.Run("with_unknown_tag", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagNop.Bytes(),
[]byte("bar"),
TagBody.Bytes(),
[]byte("ord"),
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("ord"),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("no_body", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
},
[]*Envelope{
{
Inscription: Inscription{
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("no_content_type", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagBody.Bytes(),
[]byte("foo"),
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("foo"),
},
},
},
)
})
t.Run("valid_body_in_multiple_pushes", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagBody.Bytes(),
[]byte("foo"),
[]byte("bar"),
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("foobar"),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("valid_body_in_zero_pushes", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagBody.Bytes(),
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte(""),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("valid_body_in_multiple_empty_pushes", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagBody.Bytes(),
{},
{},
{},
{},
{},
{},
{},
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte(""),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("valid_ignore_trailing", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddData(TagContentType.Bytes()).
AddData([]byte("text/plain;charset=utf-8")).
AddData(TagBody.Bytes()).
AddData([]byte("ord")).
AddOp(txscript.OP_ENDIF).
AddOp(txscript.OP_CHECKSIG).
Script()),
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("ord"),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("valid_ignore_preceding", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_CHECKSIG).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddData(TagContentType.Bytes()).
AddData([]byte("text/plain;charset=utf-8")).
AddData(TagBody.Bytes()).
AddData([]byte("ord")).
AddOp(txscript.OP_ENDIF).
Script()),
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("ord"),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("multiple_inscriptions_in_a_single_witness", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddData(TagContentType.Bytes()).
AddData([]byte("text/plain;charset=utf-8")).
AddData(TagBody.Bytes()).
AddData([]byte("foo")).
AddOp(txscript.OP_ENDIF).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddData(TagContentType.Bytes()).
AddData([]byte("text/plain;charset=utf-8")).
AddData(TagBody.Bytes()).
AddData([]byte("bar")).
AddOp(txscript.OP_ENDIF).
Script()),
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("foo"),
ContentType: "text/plain;charset=utf-8",
},
},
{
Inscription: Inscription{
Content: []byte("bar"),
ContentType: "text/plain;charset=utf-8",
},
Offset: 1,
},
},
)
})
t.Run("invalid_utf8_does_not_render_inscription_invalid", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagBody.Bytes(),
{0b10000000},
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte{0b10000000},
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("no_endif", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
Script()),
[]*Envelope{},
)
})
t.Run("no_op_false", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script()),
[]*Envelope{},
)
})
t.Run("empty_envelope", func(t *testing.T) {
testEnvelope(
t,
[][]byte{},
[]*Envelope{},
)
})
t.Run("wrong_protocol_identifier", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
[]byte("foo"),
},
[]*Envelope{},
)
})
t.Run("extract_from_second_input", func(t *testing.T) {
testTx(
t,
&types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*types.TxIn{{}, {
Witness: wire.TxWitness{
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddData(TagContentType.Bytes()).
AddData([]byte("text/plain;charset=utf-8")).
AddData(TagBody.Bytes()).
AddData([]byte("ord")).
AddOp(txscript.OP_ENDIF).
Script(),
),
{},
},
}},
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("ord"),
ContentType: "text/plain;charset=utf-8",
},
InputIndex: 1,
},
},
)
})
t.Run("inscribe_png", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("image/png"),
TagBody.Bytes(),
{0x01, 0x02, 0x03},
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte{0x01, 0x02, 0x03},
ContentType: "image/png",
},
},
},
)
})
t.Run("unknown_odd_fields", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagNop.Bytes(),
{0x00},
},
[]*Envelope{
{
Inscription: Inscription{},
},
},
)
})
t.Run("unknown_even_fields", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagUnbound.Bytes(),
{0x00},
},
[]*Envelope{
{
Inscription: Inscription{},
UnrecognizedEvenField: true,
},
},
)
})
t.Run("pointer_field_is_recognized", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagPointer.Bytes(),
{0x01},
},
[]*Envelope{
{
Inscription: Inscription{
Pointer: lo.ToPtr(uint64(1)),
},
},
},
)
})
t.Run("duplicate_pointer_field_makes_inscription_unbound", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagPointer.Bytes(),
{0x01},
TagPointer.Bytes(),
{0x00},
},
[]*Envelope{
{
Inscription: Inscription{
Pointer: lo.ToPtr(uint64(1)),
},
DuplicateField: true,
UnrecognizedEvenField: true,
},
},
)
})
t.Run("incomplete_field", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagNop.Bytes(),
},
[]*Envelope{
{
Inscription: Inscription{},
IncompleteField: true,
},
},
)
})
t.Run("metadata_is_parsed_correctly", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagMetadata.Bytes(),
{},
},
[]*Envelope{
{
Inscription: Inscription{
Metadata: []byte{},
},
},
},
)
})
t.Run("metadata_is_parsed_correctly_from_chunks", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagMetadata.Bytes(),
{0x00},
TagMetadata.Bytes(),
{0x01},
},
[]*Envelope{
{
Inscription: Inscription{
Metadata: []byte{0x00, 0x01},
},
DuplicateField: true,
},
},
)
})
t.Run("pushnum_opcodes_are_parsed_correctly", func(t *testing.T) {
pushNumOpCodes := map[byte][]byte{
txscript.OP_1NEGATE: {0x81},
txscript.OP_1: {0x01},
txscript.OP_2: {0x02},
txscript.OP_3: {0x03},
txscript.OP_4: {0x04},
txscript.OP_5: {0x05},
txscript.OP_6: {0x06},
txscript.OP_7: {0x07},
txscript.OP_8: {0x08},
txscript.OP_9: {0x09},
txscript.OP_10: {0x10},
txscript.OP_11: {0x11},
txscript.OP_12: {0x12},
txscript.OP_13: {0x13},
txscript.OP_14: {0x14},
txscript.OP_15: {0x15},
txscript.OP_16: {0x16},
}
for opCode, value := range pushNumOpCodes {
script := utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddData(TagBody.Bytes()).
AddOp(opCode).
AddOp(txscript.OP_ENDIF).
Script())
testParseWitness(
t,
script,
[]*Envelope{
{
Inscription: Inscription{
Content: value,
},
PushNum: true,
},
},
)
}
})
t.Run("stuttering", func(t *testing.T) {
script := utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script())
testParseWitness(
t,
script,
[]*Envelope{
{
Inscription: Inscription{},
Stutter: true,
},
},
)
script = utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script())
testParseWitness(
t,
script,
[]*Envelope{
{
Inscription: Inscription{},
Stutter: true,
},
},
)
script = utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_AND).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script())
testParseWitness(
t,
script,
[]*Envelope{
{
Inscription: Inscription{},
Stutter: false,
},
},
)
})
}

View File

@@ -0,0 +1,13 @@
package ordinals
import "github.com/gaze-network/indexer-network/common"
func GetJubileeHeight(network common.Network) uint64 {
switch network {
case common.NetworkMainnet:
return 824544
case common.NetworkTestnet:
return 2544192
}
panic("unsupported network")
}

View File

@@ -0,0 +1,27 @@
package ordinals
import "time"
type Inscription struct {
Content []byte
ContentEncoding string
ContentType string
Delegate *InscriptionId
Metadata []byte
Metaprotocol string
Parent *InscriptionId // in 0.14, inscription has only one parent
Pointer *uint64
}
// TODO: refactor ordinals.InscriptionEntry to entity.InscriptionEntry
type InscriptionEntry struct {
Id InscriptionId
Number int64
SequenceNumber uint64
Cursed bool
CursedForBRC20 bool
CreatedAt time.Time
CreatedAtHeight uint64
Inscription Inscription
TransferCount uint32
}

View File

@@ -0,0 +1,67 @@
package ordinals
import (
"fmt"
"strconv"
"strings"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
)
type InscriptionId struct {
TxHash chainhash.Hash
Index uint32
}
func (i InscriptionId) String() string {
return fmt.Sprintf("%si%d", i.TxHash.String(), i.Index)
}
func NewInscriptionId(txHash chainhash.Hash, index uint32) InscriptionId {
return InscriptionId{
TxHash: txHash,
Index: index,
}
}
var ErrInscriptionIdInvalidSeparator = fmt.Errorf("invalid inscription id: must contain exactly one separator")
func NewInscriptionIdFromString(s string) (InscriptionId, error) {
parts := strings.SplitN(s, "i", 2)
if len(parts) != 2 {
return InscriptionId{}, errors.WithStack(ErrInscriptionIdInvalidSeparator)
}
txHash, err := chainhash.NewHashFromStr(parts[0])
if err != nil {
return InscriptionId{}, errors.Wrap(err, "invalid inscription id: cannot parse txHash")
}
index, err := strconv.ParseUint(parts[1], 10, 32)
if err != nil {
return InscriptionId{}, errors.Wrap(err, "invalid inscription id: cannot parse index")
}
return InscriptionId{
TxHash: *txHash,
Index: uint32(index),
}, nil
}
// MarshalJSON implements json.Marshaler
func (r InscriptionId) MarshalJSON() ([]byte, error) {
return []byte(`"` + r.String() + `"`), nil
}
// UnmarshalJSON implements json.Unmarshaler
func (r *InscriptionId) UnmarshalJSON(data []byte) error {
// data must be quoted
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("must be string")
}
data = data[1 : len(data)-1]
parsed, err := NewInscriptionIdFromString(string(data))
if err != nil {
return errors.WithStack(err)
}
*r = parsed
return nil
}

View File

@@ -0,0 +1,109 @@
package ordinals
import (
"testing"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/stretchr/testify/assert"
)
func TestNewInscriptionIdFromString(t *testing.T) {
tests := []struct {
name string
input string
expected InscriptionId
shouldError bool
}{
{
name: "valid inscription id 1",
input: "1111111111111111111111111111111111111111111111111111111111111111i0",
expected: InscriptionId{
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 0,
},
},
{
name: "valid inscription id 2",
input: "1111111111111111111111111111111111111111111111111111111111111111i1",
expected: InscriptionId{
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 1,
},
},
{
name: "valid inscription id 3",
input: "1111111111111111111111111111111111111111111111111111111111111111i4294967295",
expected: InscriptionId{
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 4294967295,
},
},
{
name: "error no separator",
input: "abc",
shouldError: true,
},
{
name: "error invalid index",
input: "xyzixyz",
shouldError: true,
},
{
name: "error invalid index",
input: "abcixyz",
shouldError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual, err := NewInscriptionIdFromString(tt.input)
if tt.shouldError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expected, actual)
}
})
}
}
func TestInscriptionIdString(t *testing.T) {
tests := []struct {
name string
expected string
input InscriptionId
}{
{
name: "valid inscription id 1",
expected: "1111111111111111111111111111111111111111111111111111111111111111i0",
input: InscriptionId{
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 0,
},
},
{
name: "valid inscription id 2",
expected: "1111111111111111111111111111111111111111111111111111111111111111i1",
input: InscriptionId{
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 1,
},
},
{
name: "valid inscription id 3",
expected: "1111111111111111111111111111111111111111111111111111111111111111i4294967295",
input: InscriptionId{
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 4294967295,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.expected, tt.input.String())
})
}
}

View File

@@ -0,0 +1,68 @@
package ordinals
import (
"fmt"
"strconv"
"strings"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
)
type SatPoint struct {
OutPoint wire.OutPoint
Offset uint64
}
func (s SatPoint) String() string {
return fmt.Sprintf("%s:%d", s.OutPoint.String(), s.Offset)
}
var ErrSatPointInvalidSeparator = fmt.Errorf("invalid sat point: must contain exactly two separators")
func NewSatPointFromString(s string) (SatPoint, error) {
parts := strings.SplitN(s, ":", 3)
if len(parts) != 3 {
return SatPoint{}, errors.WithStack(ErrSatPointInvalidSeparator)
}
txHash, err := chainhash.NewHashFromStr(parts[0])
if err != nil {
return SatPoint{}, errors.Wrap(err, "invalid inscription id: cannot parse txHash")
}
index, err := strconv.ParseUint(parts[1], 10, 32)
if err != nil {
return SatPoint{}, errors.Wrap(err, "invalid inscription id: cannot parse index")
}
offset, err := strconv.ParseUint(parts[2], 10, 64)
if err != nil {
return SatPoint{}, errors.Wrap(err, "invalid sat point: cannot parse offset")
}
return SatPoint{
OutPoint: wire.OutPoint{
Hash: *txHash,
Index: uint32(index),
},
Offset: offset,
}, nil
}
// MarshalJSON implements json.Marshaler
func (r SatPoint) MarshalJSON() ([]byte, error) {
return []byte(`"` + r.String() + `"`), nil
}
// UnmarshalJSON implements json.Unmarshaler
func (r *SatPoint) UnmarshalJSON(data []byte) error {
// data must be quoted
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("must be string")
}
data = data[1 : len(data)-1]
parsed, err := NewSatPointFromString(string(data))
if err != nil {
return errors.WithStack(err)
}
*r = parsed
return nil
}

View File

@@ -0,0 +1,89 @@
package ordinals
import (
"testing"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/stretchr/testify/assert"
)
func TestNewSatPointFromString(t *testing.T) {
tests := []struct {
name string
input string
expected SatPoint
shouldError bool
}{
{
name: "valid sat point",
input: "1111111111111111111111111111111111111111111111111111111111111111:1:2",
expected: SatPoint{
OutPoint: wire.OutPoint{
Hash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 1,
},
Offset: 2,
},
},
{
name: "error no separator",
input: "abc",
shouldError: true,
},
{
name: "error invalid output index",
input: "abc:xyz",
shouldError: true,
},
{
name: "error no offset",
input: "1111111111111111111111111111111111111111111111111111111111111111:1",
shouldError: true,
},
{
name: "error invalid offset",
input: "1111111111111111111111111111111111111111111111111111111111111111:1:foo",
shouldError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual, err := NewSatPointFromString(tt.input)
if tt.shouldError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expected, actual)
}
})
}
}
func TestSatPointString(t *testing.T) {
tests := []struct {
name string
input SatPoint
expected string
}{
{
name: "valid sat point",
input: SatPoint{
OutPoint: wire.OutPoint{
Hash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 1,
},
Offset: 2,
},
expected: "1111111111111111111111111111111111111111111111111111111111111111:1:2",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.expected, tt.input.String())
})
}
}

View File

@@ -0,0 +1,170 @@
package ordinals
import (
"encoding/binary"
"fmt"
"github.com/btcsuite/btcd/txscript"
)
// PushScriptBuilder is a helper to build scripts that requires data pushes to use OP_PUSHDATA* or OP_DATA_* opcodes only.
// Empty data pushes are still encoded as OP_0.
type PushScriptBuilder struct {
script []byte
err error
}
func NewPushScriptBuilder() *PushScriptBuilder {
return &PushScriptBuilder{}
}
// canonicalDataSize returns the number of bytes the canonical encoding of the
// data will take.
func canonicalDataSize(data []byte) int {
dataLen := len(data)
// When the data consists of a single number that can be represented
// by one of the "small integer" opcodes, that opcode will be instead
// of a data push opcode followed by the number.
if dataLen == 0 {
return 1
}
if dataLen < txscript.OP_PUSHDATA1 {
return 1 + dataLen
} else if dataLen <= 0xff {
return 2 + dataLen
} else if dataLen <= 0xffff {
return 3 + dataLen
}
return 5 + dataLen
}
func pushDataToBytes(data []byte) []byte {
if len(data) == 0 {
return []byte{txscript.OP_0}
}
script := make([]byte, 0)
dataLen := len(data)
if dataLen < txscript.OP_PUSHDATA1 {
script = append(script, byte(txscript.OP_DATA_1-1+dataLen))
} else if dataLen <= 0xff {
script = append(script, txscript.OP_PUSHDATA1, byte(dataLen))
} else if dataLen <= 0xffff {
buf := make([]byte, 2)
binary.LittleEndian.PutUint16(buf, uint16(dataLen))
script = append(script, txscript.OP_PUSHDATA2)
script = append(script, buf...)
} else {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, uint32(dataLen))
script = append(script, txscript.OP_PUSHDATA4)
script = append(script, buf...)
}
// Append the actual data.
script = append(script, data...)
return script
}
// AddData pushes the passed data to the end of the script. It automatically
// chooses canonical opcodes depending on the length of the data. A zero length
// buffer will lead to a push of empty data onto the stack (OP_0) and any push
// of data greater than MaxScriptElementSize will not modify the script since
// that is not allowed by the script engine. Also, the script will not be
// modified if pushing the data would cause the script to exceed the maximum
// allowed script engine size.
func (b *PushScriptBuilder) AddData(data []byte) *PushScriptBuilder {
if b.err != nil {
return b
}
// Pushes that would cause the script to exceed the largest allowed
// script size would result in a non-canonical script.
dataSize := canonicalDataSize(data)
if len(b.script)+dataSize > txscript.MaxScriptSize {
str := fmt.Sprintf("adding %d bytes of data would exceed the "+
"maximum allowed canonical script length of %d",
dataSize, txscript.MaxScriptSize)
b.err = txscript.ErrScriptNotCanonical(str)
return b
}
// Pushes larger than the max script element size would result in a
// script that is not canonical.
dataLen := len(data)
if dataLen > txscript.MaxScriptElementSize {
str := fmt.Sprintf("adding a data element of %d bytes would "+
"exceed the maximum allowed script element size of %d",
dataLen, txscript.MaxScriptElementSize)
b.err = txscript.ErrScriptNotCanonical(str)
return b
}
b.script = append(b.script, pushDataToBytes(data)...)
return b
}
// AddFullData should not typically be used by ordinary users as it does not
// include the checks which prevent data pushes larger than the maximum allowed
// sizes which leads to scripts that can't be executed. This is provided for
// testing purposes such as regression tests where sizes are intentionally made
// larger than allowed.
//
// Use AddData instead.
func (b *PushScriptBuilder) AddFullData(data []byte) *PushScriptBuilder {
if b.err != nil {
return b
}
b.script = append(b.script, pushDataToBytes(data)...)
return b
}
// AddOp pushes the passed opcode to the end of the script. The script will not
// be modified if pushing the opcode would cause the script to exceed the
// maximum allowed script engine size.
func (b *PushScriptBuilder) AddOp(opcode byte) *PushScriptBuilder {
if b.err != nil {
return b
}
// Pushes that would cause the script to exceed the largest allowed
// script size would result in a non-canonical script.
if len(b.script)+1 > txscript.MaxScriptSize {
str := fmt.Sprintf("adding an opcode would exceed the maximum "+
"allowed canonical script length of %d", txscript.MaxScriptSize)
b.err = txscript.ErrScriptNotCanonical(str)
return b
}
b.script = append(b.script, opcode)
return b
}
// AddOps pushes the passed opcodes to the end of the script. The script will
// not be modified if pushing the opcodes would cause the script to exceed the
// maximum allowed script engine size.
func (b *PushScriptBuilder) AddOps(opcodes []byte) *PushScriptBuilder {
if b.err != nil {
return b
}
// Pushes that would cause the script to exceed the largest allowed
// script size would result in a non-canonical script.
if len(b.script)+len(opcodes) > txscript.MaxScriptSize {
str := fmt.Sprintf("adding opcodes would exceed the maximum "+
"allowed canonical script length of %d", txscript.MaxScriptSize)
b.err = txscript.ErrScriptNotCanonical(str)
return b
}
b.script = append(b.script, opcodes...)
return b
}
// Script returns the currently built script. When any errors occurred while
// building the script, the script will be returned up the point of the first
// error along with the error.
func (b *PushScriptBuilder) Script() ([]byte, error) {
return b.script, b.err
}

View File

@@ -0,0 +1,81 @@
package ordinals
// Tags represent data fields in a runestone. Unrecognized odd tags are ignored. Unrecognized even tags produce a cenotaph.
type Tag uint8
var (
TagBody = Tag(0)
TagPointer = Tag(2)
// TagUnbound is unrecognized
TagUnbound = Tag(66)
TagContentType = Tag(1)
TagParent = Tag(3)
TagMetadata = Tag(5)
TagMetaprotocol = Tag(7)
TagContentEncoding = Tag(9)
TagDelegate = Tag(11)
// TagNop is unrecognized
TagNop = Tag(255)
)
var allTags = map[Tag]struct{}{
TagPointer: {},
TagContentType: {},
TagParent: {},
TagMetadata: {},
TagMetaprotocol: {},
TagContentEncoding: {},
TagDelegate: {},
}
func (t Tag) IsValid() bool {
_, ok := allTags[t]
return ok
}
var chunkedTags = map[Tag]struct{}{
TagMetadata: {},
}
func (t Tag) IsChunked() bool {
_, ok := chunkedTags[t]
return ok
}
func (t Tag) Bytes() []byte {
if t == TagBody {
return []byte{} // body tag is empty data push
}
return []byte{byte(t)}
}
func ParseTag(input interface{}) (Tag, error) {
switch input := input.(type) {
case Tag:
return input, nil
case int:
return Tag(input), nil
case int8:
return Tag(input), nil
case int16:
return Tag(input), nil
case int32:
return Tag(input), nil
case int64:
return Tag(input), nil
case uint:
return Tag(input), nil
case uint8:
return Tag(input), nil
case uint16:
return Tag(input), nil
case uint32:
return Tag(input), nil
case uint64:
return Tag(input), nil
default:
panic("invalid tag input type")
}
}

View File

@@ -0,0 +1,716 @@
package postgres
import (
"context"
"encoding/hex"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/gaze-network/indexer-network/modules/brc20/internal/repository/postgres/gen"
"github.com/jackc/pgx/v5"
"github.com/samber/lo"
)
var _ datagateway.BRC20DataGateway = (*Repository)(nil)
// warning: GetLatestBlock currently returns a types.BlockHeader with only Height and Hash fields populated.
// This is because it is known that all usage of this function only requires these fields. In the future, we may want to populate all fields for type safety.
func (r *Repository) GetLatestBlock(ctx context.Context) (types.BlockHeader, error) {
block, err := r.queries.GetLatestIndexedBlock(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return types.BlockHeader{}, errors.WithStack(errs.NotFound)
}
return types.BlockHeader{}, errors.Wrap(err, "error during query")
}
hash, err := chainhash.NewHashFromStr(block.Hash)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to parse block hash")
}
return types.BlockHeader{
Height: int64(block.Height),
Hash: *hash,
}, nil
}
// GetIndexedBlockByHeight implements datagateway.BRC20DataGateway.
func (r *Repository) GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error) {
model, err := r.queries.GetIndexedBlockByHeight(ctx, int32(height))
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, errors.WithStack(errs.NotFound)
}
return nil, errors.Wrap(err, "error during query")
}
indexedBlock, err := mapIndexedBlockModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse indexed block model")
}
return &indexedBlock, nil
}
func (r *Repository) GetProcessorStats(ctx context.Context) (*entity.ProcessorStats, error) {
model, err := r.queries.GetLatestProcessorStats(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, errors.WithStack(errs.NotFound)
}
return nil, errors.WithStack(err)
}
stats := mapProcessorStatsModelToType(model)
return &stats, nil
}
func (r *Repository) GetInscriptionTransfersInOutPoints(ctx context.Context, outPoints []wire.OutPoint) (map[ordinals.SatPoint][]*entity.InscriptionTransfer, error) {
txHashArr := lo.Map(outPoints, func(outPoint wire.OutPoint, _ int) string {
return outPoint.Hash.String()
})
txOutIdxArr := lo.Map(outPoints, func(outPoint wire.OutPoint, _ int) int32 {
return int32(outPoint.Index)
})
models, err := r.queries.GetInscriptionTransfersInOutPoints(ctx, gen.GetInscriptionTransfersInOutPointsParams{
TxHashArr: txHashArr,
TxOutIdxArr: txOutIdxArr,
})
if err != nil {
return nil, errors.WithStack(err)
}
results := make(map[ordinals.SatPoint][]*entity.InscriptionTransfer)
for _, model := range models {
inscriptionTransfer, err := mapInscriptionTransferModelToType(model)
if err != nil {
return nil, errors.WithStack(err)
}
results[inscriptionTransfer.NewSatPoint] = append(results[inscriptionTransfer.NewSatPoint], &inscriptionTransfer)
}
return results, nil
}
func (r *Repository) GetInscriptionEntriesByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*ordinals.InscriptionEntry, error) {
idStrs := lo.Map(ids, func(id ordinals.InscriptionId, _ int) string { return id.String() })
models, err := r.queries.GetInscriptionEntriesByIds(ctx, idStrs)
if err != nil {
return nil, errors.WithStack(err)
}
result := make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
for _, model := range models {
inscriptionEntry, err := mapInscriptionEntryModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse inscription entry model")
}
result[inscriptionEntry.Id] = &inscriptionEntry
}
return result, nil
}
func (r *Repository) GetInscriptionNumbersByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]int64, error) {
idStrs := lo.Map(ids, func(id ordinals.InscriptionId, _ int) string { return id.String() })
models, err := r.queries.GetInscriptionNumbersByIds(ctx, idStrs)
if err != nil {
return nil, errors.WithStack(err)
}
result := make(map[ordinals.InscriptionId]int64)
for _, model := range models {
inscriptionId, err := ordinals.NewInscriptionIdFromString(model.Id)
if err != nil {
return nil, errors.Wrap(err, "failed to parse inscription id")
}
result[inscriptionId] = model.Number
}
return result, nil
}
func (r *Repository) GetInscriptionParentsByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]ordinals.InscriptionId, error) {
idStrs := lo.Map(ids, func(id ordinals.InscriptionId, _ int) string { return id.String() })
models, err := r.queries.GetInscriptionParentsByIds(ctx, idStrs)
if err != nil {
return nil, errors.WithStack(err)
}
result := make(map[ordinals.InscriptionId]ordinals.InscriptionId)
for _, model := range models {
if len(model.Parents) == 0 {
// no parent
continue
}
if len(model.Parents) > 1 {
// sanity check, should not happen since 0.14 ord supports only 1 parent
continue
}
inscriptionId, err := ordinals.NewInscriptionIdFromString(model.Id)
if err != nil {
return nil, errors.Wrap(err, "failed to parse inscription id")
}
parentId, err := ordinals.NewInscriptionIdFromString(model.Parents[0])
if err != nil {
return nil, errors.Wrap(err, "failed to parse parent id")
}
result[inscriptionId] = parentId
}
return result, nil
}
func (r *Repository) GetLatestEventId(ctx context.Context) (int64, error) {
row, err := r.queries.GetLatestEventIds(ctx)
if err != nil {
return 0, errors.WithStack(err)
}
return max(row.EventDeployID.(int64), row.EventMintID.(int64), row.EventInscribeTransferID.(int64), row.EventTransferTransferID.(int64)), nil
}
func (r *Repository) GetBalancesBatchAtHeight(ctx context.Context, blockHeight uint64, queries []datagateway.GetBalancesBatchAtHeightQuery) (map[string]map[string]*entity.Balance, error) {
pkScripts := make([]string, 0)
ticks := make([]string, 0)
for _, query := range queries {
pkScripts = append(pkScripts, query.PkScriptHex)
ticks = append(ticks, query.Tick)
}
models, err := r.queries.GetBalancesBatchAtHeight(ctx, gen.GetBalancesBatchAtHeightParams{
PkscriptArr: pkScripts,
TickArr: ticks,
BlockHeight: int32(blockHeight),
})
if err != nil {
return nil, errors.WithStack(err)
}
result := make(map[string]map[string]*entity.Balance)
for _, model := range models {
balance, err := mapBalanceModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
if _, ok := result[model.Pkscript]; !ok {
result[model.Pkscript] = make(map[string]*entity.Balance)
}
result[model.Pkscript][model.Tick] = &balance
}
return result, nil
}
func (r *Repository) GetEventInscribeTransfersByInscriptionIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*entity.EventInscribeTransfer, error) {
idStrs := lo.Map(ids, func(id ordinals.InscriptionId, _ int) string { return id.String() })
models, err := r.queries.GetEventInscribeTransfersByInscriptionIds(ctx, idStrs)
if err != nil {
return nil, errors.WithStack(err)
}
result := make(map[ordinals.InscriptionId]*entity.EventInscribeTransfer)
for _, model := range models {
event, err := mapEventInscribeTransferModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse event inscribe transfer model")
}
result[event.InscriptionId] = &event
}
return result, nil
}
func (r *Repository) GetTickEntriesByTicks(ctx context.Context, ticks []string) (map[string]*entity.TickEntry, error) {
models, err := r.queries.GetTickEntriesByTicks(ctx, ticks)
if err != nil {
return nil, errors.WithStack(err)
}
result := make(map[string]*entity.TickEntry)
for _, model := range models {
tickEntry, err := mapTickEntryModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse tick entry model")
}
result[tickEntry.Tick] = &tickEntry
}
return result, nil
}
func (r *Repository) GetBalancesByTick(ctx context.Context, tick string, blockHeight uint64) ([]*entity.Balance, error) {
models, err := r.queries.GetBalancesByTick(ctx, gen.GetBalancesByTickParams{
Tick: tick,
BlockHeight: int32(blockHeight),
})
if err != nil {
return nil, errors.WithStack(err)
}
result := make([]*entity.Balance, 0, len(models))
for _, model := range models {
balance, err := mapBalanceModelToType(gen.Brc20Balance(model))
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
result = append(result, &balance)
}
return result, nil
}
func (r *Repository) GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[string]*entity.Balance, error) {
models, err := r.queries.GetBalancesByPkScript(ctx, gen.GetBalancesByPkScriptParams{
Pkscript: hex.EncodeToString(pkScript),
BlockHeight: int32(blockHeight),
})
if err != nil {
return nil, errors.WithStack(err)
}
result := make(map[string]*entity.Balance)
for _, model := range models {
balance, err := mapBalanceModelToType(gen.Brc20Balance(model))
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
result[balance.Tick] = &balance
}
return result, nil
}
func (r *Repository) GetTransferableTransfersByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.EventInscribeTransfer, error) {
models, err := r.queries.GetTransferableTransfersByPkScript(ctx, gen.GetTransferableTransfersByPkScriptParams{
Pkscript: hex.EncodeToString(pkScript),
BlockHeight: int32(blockHeight),
})
if err != nil {
return nil, errors.WithStack(err)
}
result := make([]*entity.EventInscribeTransfer, 0, len(models))
for _, model := range models {
ent, err := mapEventInscribeTransferModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse event model")
}
result = append(result, &ent)
}
return result, nil
}
func (r *Repository) GetDeployEventByTick(ctx context.Context, tick string) (*entity.EventDeploy, error) {
model, err := r.queries.GetDeployEventByTick(ctx, tick)
if err != nil {
return nil, errors.WithStack(err)
}
ent, err := mapEventDeployModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse event model")
}
return &ent, nil
}
func (r *Repository) GetFirstLastInscriptionNumberByTick(ctx context.Context, tick string) (first, last int64, err error) {
model, err := r.queries.GetFirstLastInscriptionNumberByTick(ctx, tick)
if err != nil {
return -1, -1, errors.WithStack(err)
}
return model.FirstInscriptionNumber, model.LastInscriptionNumber, nil
}
func (r *Repository) GetDeployEvents(ctx context.Context, pkScript []byte, tick string, height uint64) ([]*entity.EventDeploy, error) {
models, err := r.queries.GetDeployEvents(ctx, gen.GetDeployEventsParams{
FilterPkScript: pkScript != nil,
PkScript: hex.EncodeToString(pkScript),
FilterTicker: tick != "",
Ticker: tick,
BlockHeight: int32(height),
})
if err != nil {
return nil, errors.WithStack(err)
}
result := make([]*entity.EventDeploy, 0, len(models))
for _, model := range models {
ent, err := mapEventDeployModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse event model")
}
result = append(result, &ent)
}
return result, nil
}
func (r *Repository) GetMintEvents(ctx context.Context, pkScript []byte, tick string, height uint64) ([]*entity.EventMint, error) {
models, err := r.queries.GetMintEvents(ctx, gen.GetMintEventsParams{
FilterPkScript: pkScript != nil,
PkScript: hex.EncodeToString(pkScript),
FilterTicker: tick != "",
Ticker: tick,
BlockHeight: int32(height),
})
if err != nil {
return nil, errors.WithStack(err)
}
result := make([]*entity.EventMint, 0, len(models))
for _, model := range models {
ent, err := mapEventMintModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse event model")
}
result = append(result, &ent)
}
return result, nil
}
func (r *Repository) GetInscribeTransferEvents(ctx context.Context, pkScript []byte, tick string, height uint64) ([]*entity.EventInscribeTransfer, error) {
models, err := r.queries.GetInscribeTransferEvents(ctx, gen.GetInscribeTransferEventsParams{
FilterPkScript: pkScript != nil,
PkScript: hex.EncodeToString(pkScript),
FilterTicker: tick != "",
Ticker: tick,
BlockHeight: int32(height),
})
if err != nil {
return nil, errors.WithStack(err)
}
result := make([]*entity.EventInscribeTransfer, 0, len(models))
for _, model := range models {
ent, err := mapEventInscribeTransferModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse event model")
}
result = append(result, &ent)
}
return result, nil
}
func (r *Repository) GetTransferTransferEvents(ctx context.Context, pkScript []byte, tick string, height uint64) ([]*entity.EventTransferTransfer, error) {
models, err := r.queries.GetTransferTransferEvents(ctx, gen.GetTransferTransferEventsParams{
FilterPkScript: pkScript != nil,
PkScript: hex.EncodeToString(pkScript),
FilterTicker: tick != "",
Ticker: tick,
BlockHeight: int32(height),
})
if err != nil {
return nil, errors.WithStack(err)
}
result := make([]*entity.EventTransferTransfer, 0, len(models))
for _, model := range models {
ent, err := mapEventTransferTransferModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse event model")
}
result = append(result, &ent)
}
return result, nil
}
func (r *Repository) CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error {
params := mapIndexedBlockTypeToParams(*block)
if err := r.queries.CreateIndexedBlock(ctx, params); err != nil {
return errors.WithStack(err)
}
return nil
}
func (r *Repository) CreateProcessorStats(ctx context.Context, stats *entity.ProcessorStats) error {
params := mapProcessorStatsTypeToParams(*stats)
if err := r.queries.CreateProcessorStats(ctx, params); err != nil {
return errors.WithStack(err)
}
return nil
}
func (r *Repository) CreateTickEntries(ctx context.Context, blockHeight uint64, entries []*entity.TickEntry) error {
entryParams := make([]gen.CreateTickEntriesParams, 0)
for _, entry := range entries {
params, _, err := mapTickEntryTypeToParams(*entry, blockHeight)
if err != nil {
return errors.Wrap(err, "cannot map tick entry to create params")
}
entryParams = append(entryParams, params)
}
results := r.queries.CreateTickEntries(ctx, entryParams)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateTickEntryStates(ctx context.Context, blockHeight uint64, entryStates []*entity.TickEntry) error {
entryParams := make([]gen.CreateTickEntryStatesParams, 0)
for _, entry := range entryStates {
_, params, err := mapTickEntryTypeToParams(*entry, blockHeight)
if err != nil {
return errors.Wrap(err, "cannot map tick entry to create params")
}
entryParams = append(entryParams, params)
}
results := r.queries.CreateTickEntryStates(ctx, entryParams)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateInscriptionEntries(ctx context.Context, blockHeight uint64, entries []*ordinals.InscriptionEntry) error {
inscriptionEntryParams := make([]gen.CreateInscriptionEntriesParams, 0)
for _, entry := range entries {
params, _, err := mapInscriptionEntryTypeToParams(*entry, blockHeight)
if err != nil {
return errors.Wrap(err, "cannot map inscription entry to create params")
}
inscriptionEntryParams = append(inscriptionEntryParams, params)
}
results := r.queries.CreateInscriptionEntries(ctx, inscriptionEntryParams)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateInscriptionEntryStates(ctx context.Context, blockHeight uint64, entryStates []*ordinals.InscriptionEntry) error {
inscriptionEntryStatesParams := make([]gen.CreateInscriptionEntryStatesParams, 0)
for _, entry := range entryStates {
_, params, err := mapInscriptionEntryTypeToParams(*entry, blockHeight)
if err != nil {
return errors.Wrap(err, "cannot map inscription entry to create params")
}
inscriptionEntryStatesParams = append(inscriptionEntryStatesParams, params)
}
results := r.queries.CreateInscriptionEntryStates(ctx, inscriptionEntryStatesParams)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateInscriptionTransfers(ctx context.Context, transfers []*entity.InscriptionTransfer) error {
params := lo.Map(transfers, func(transfer *entity.InscriptionTransfer, _ int) gen.CreateInscriptionTransfersParams {
return mapInscriptionTransferTypeToParams(*transfer)
})
results := r.queries.CreateInscriptionTransfers(ctx, params)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateEventDeploys(ctx context.Context, events []*entity.EventDeploy) error {
params := make([]gen.CreateEventDeploysParams, 0)
for _, event := range events {
param, err := mapEventDeployTypeToParams(*event)
if err != nil {
return errors.Wrap(err, "cannot map event deploy to create params")
}
params = append(params, param)
}
results := r.queries.CreateEventDeploys(ctx, params)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateEventMints(ctx context.Context, events []*entity.EventMint) error {
params := make([]gen.CreateEventMintsParams, 0)
for _, event := range events {
param, err := mapEventMintTypeToParams(*event)
if err != nil {
return errors.Wrap(err, "cannot map event mint to create params")
}
params = append(params, param)
}
results := r.queries.CreateEventMints(ctx, params)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateEventInscribeTransfers(ctx context.Context, events []*entity.EventInscribeTransfer) error {
params := make([]gen.CreateEventInscribeTransfersParams, 0)
for _, event := range events {
param, err := mapEventInscribeTransferTypeToParams(*event)
if err != nil {
return errors.Wrap(err, "cannot map event transfer to create params")
}
params = append(params, param)
}
results := r.queries.CreateEventInscribeTransfers(ctx, params)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateEventTransferTransfers(ctx context.Context, events []*entity.EventTransferTransfer) error {
params := make([]gen.CreateEventTransferTransfersParams, 0)
for _, event := range events {
param, err := mapEventTransferTransferTypeToParams(*event)
if err != nil {
return errors.Wrap(err, "cannot map event transfer to create params")
}
params = append(params, param)
}
results := r.queries.CreateEventTransferTransfers(ctx, params)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateBalances(ctx context.Context, balances []*entity.Balance) error {
params := lo.Map(balances, func(balance *entity.Balance, _ int) gen.CreateBalancesParams {
return mapBalanceTypeToParams(*balance)
})
results := r.queries.CreateBalances(ctx, params)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) DeleteIndexedBlocksSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteIndexedBlocksSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteProcessorStatsSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteProcessorStatsSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteTickEntriesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteTickEntriesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteTickEntryStatesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteTickEntryStatesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteEventDeploysSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteEventDeploysSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteEventMintsSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteEventMintsSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteEventInscribeTransfersSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteEventInscribeTransfersSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteEventTransferTransfersSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteEventTransferTransfersSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteBalancesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteBalancesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteInscriptionEntriesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteInscriptionEntriesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteInscriptionEntryStatesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteInscriptionEntryStatesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteInscriptionTransfersSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteInscriptionTransfersSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}

View File

@@ -0,0 +1,696 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: batch.go
package gen
import (
"context"
"errors"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgtype"
)
var (
ErrBatchAlreadyClosed = errors.New("batch already closed")
)
const createBalances = `-- name: CreateBalances :batchexec
INSERT INTO "brc20_balances" ("pkscript", "block_height", "tick", "overall_balance", "available_balance") VALUES ($1, $2, $3, $4, $5)
`
type CreateBalancesBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateBalancesParams struct {
Pkscript string
BlockHeight int32
Tick string
OverallBalance pgtype.Numeric
AvailableBalance pgtype.Numeric
}
func (q *Queries) CreateBalances(ctx context.Context, arg []CreateBalancesParams) *CreateBalancesBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Pkscript,
a.BlockHeight,
a.Tick,
a.OverallBalance,
a.AvailableBalance,
}
batch.Queue(createBalances, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateBalancesBatchResults{br, len(arg), false}
}
func (b *CreateBalancesBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateBalancesBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createEventDeploys = `-- name: CreateEventDeploys :batchexec
INSERT INTO "brc20_event_deploys" ("id", "inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "total_supply", "decimals", "limit_per_mint", "is_self_mint") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
`
type CreateEventDeploysBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateEventDeploysParams struct {
Id int64
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
Pkscript string
Satpoint string
TotalSupply pgtype.Numeric
Decimals int16
LimitPerMint pgtype.Numeric
IsSelfMint bool
}
func (q *Queries) CreateEventDeploys(ctx context.Context, arg []CreateEventDeploysParams) *CreateEventDeploysBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Id,
a.InscriptionID,
a.InscriptionNumber,
a.Tick,
a.OriginalTick,
a.TxHash,
a.BlockHeight,
a.TxIndex,
a.Timestamp,
a.Pkscript,
a.Satpoint,
a.TotalSupply,
a.Decimals,
a.LimitPerMint,
a.IsSelfMint,
}
batch.Queue(createEventDeploys, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateEventDeploysBatchResults{br, len(arg), false}
}
func (b *CreateEventDeploysBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateEventDeploysBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createEventInscribeTransfers = `-- name: CreateEventInscribeTransfers :batchexec
INSERT INTO "brc20_event_inscribe_transfers" ("id", "inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "output_index", "sats_amount", "amount") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
`
type CreateEventInscribeTransfersBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateEventInscribeTransfersParams struct {
Id int64
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
Pkscript string
Satpoint string
OutputIndex int32
SatsAmount int64
Amount pgtype.Numeric
}
func (q *Queries) CreateEventInscribeTransfers(ctx context.Context, arg []CreateEventInscribeTransfersParams) *CreateEventInscribeTransfersBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Id,
a.InscriptionID,
a.InscriptionNumber,
a.Tick,
a.OriginalTick,
a.TxHash,
a.BlockHeight,
a.TxIndex,
a.Timestamp,
a.Pkscript,
a.Satpoint,
a.OutputIndex,
a.SatsAmount,
a.Amount,
}
batch.Queue(createEventInscribeTransfers, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateEventInscribeTransfersBatchResults{br, len(arg), false}
}
func (b *CreateEventInscribeTransfersBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateEventInscribeTransfersBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createEventMints = `-- name: CreateEventMints :batchexec
INSERT INTO "brc20_event_mints" ("id", "inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "amount", "parent_id") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
`
type CreateEventMintsBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateEventMintsParams struct {
Id int64
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
Pkscript string
Satpoint string
Amount pgtype.Numeric
ParentID pgtype.Text
}
func (q *Queries) CreateEventMints(ctx context.Context, arg []CreateEventMintsParams) *CreateEventMintsBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Id,
a.InscriptionID,
a.InscriptionNumber,
a.Tick,
a.OriginalTick,
a.TxHash,
a.BlockHeight,
a.TxIndex,
a.Timestamp,
a.Pkscript,
a.Satpoint,
a.Amount,
a.ParentID,
}
batch.Queue(createEventMints, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateEventMintsBatchResults{br, len(arg), false}
}
func (b *CreateEventMintsBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateEventMintsBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createEventTransferTransfers = `-- name: CreateEventTransferTransfers :batchexec
INSERT INTO "brc20_event_transfer_transfers" ("id", "inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "from_pkscript", "from_satpoint", "from_input_index", "to_pkscript", "to_satpoint", "to_output_index", "spent_as_fee", "amount") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)
`
type CreateEventTransferTransfersBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateEventTransferTransfersParams struct {
Id int64
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
FromPkscript string
FromSatpoint string
FromInputIndex int32
ToPkscript string
ToSatpoint string
ToOutputIndex int32
SpentAsFee bool
Amount pgtype.Numeric
}
func (q *Queries) CreateEventTransferTransfers(ctx context.Context, arg []CreateEventTransferTransfersParams) *CreateEventTransferTransfersBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Id,
a.InscriptionID,
a.InscriptionNumber,
a.Tick,
a.OriginalTick,
a.TxHash,
a.BlockHeight,
a.TxIndex,
a.Timestamp,
a.FromPkscript,
a.FromSatpoint,
a.FromInputIndex,
a.ToPkscript,
a.ToSatpoint,
a.ToOutputIndex,
a.SpentAsFee,
a.Amount,
}
batch.Queue(createEventTransferTransfers, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateEventTransferTransfersBatchResults{br, len(arg), false}
}
func (b *CreateEventTransferTransfersBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateEventTransferTransfersBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createInscriptionEntries = `-- name: CreateInscriptionEntries :batchexec
INSERT INTO "brc20_inscription_entries" ("id", "number", "sequence_number", "delegate", "metadata", "metaprotocol", "parents", "pointer", "content", "content_encoding", "content_type", "cursed", "cursed_for_brc20", "created_at", "created_at_height") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
`
type CreateInscriptionEntriesBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateInscriptionEntriesParams struct {
Id string
Number int64
SequenceNumber int64
Delegate pgtype.Text
Metadata []byte
Metaprotocol pgtype.Text
Parents []string
Pointer pgtype.Int8
Content []byte
ContentEncoding pgtype.Text
ContentType pgtype.Text
Cursed bool
CursedForBrc20 bool
CreatedAt pgtype.Timestamp
CreatedAtHeight int32
}
func (q *Queries) CreateInscriptionEntries(ctx context.Context, arg []CreateInscriptionEntriesParams) *CreateInscriptionEntriesBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Id,
a.Number,
a.SequenceNumber,
a.Delegate,
a.Metadata,
a.Metaprotocol,
a.Parents,
a.Pointer,
a.Content,
a.ContentEncoding,
a.ContentType,
a.Cursed,
a.CursedForBrc20,
a.CreatedAt,
a.CreatedAtHeight,
}
batch.Queue(createInscriptionEntries, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateInscriptionEntriesBatchResults{br, len(arg), false}
}
func (b *CreateInscriptionEntriesBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateInscriptionEntriesBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createInscriptionEntryStates = `-- name: CreateInscriptionEntryStates :batchexec
INSERT INTO "brc20_inscription_entry_states" ("id", "block_height", "transfer_count") VALUES ($1, $2, $3)
`
type CreateInscriptionEntryStatesBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateInscriptionEntryStatesParams struct {
Id string
BlockHeight int32
TransferCount int32
}
func (q *Queries) CreateInscriptionEntryStates(ctx context.Context, arg []CreateInscriptionEntryStatesParams) *CreateInscriptionEntryStatesBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Id,
a.BlockHeight,
a.TransferCount,
}
batch.Queue(createInscriptionEntryStates, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateInscriptionEntryStatesBatchResults{br, len(arg), false}
}
func (b *CreateInscriptionEntryStatesBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateInscriptionEntryStatesBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createInscriptionTransfers = `-- name: CreateInscriptionTransfers :batchexec
INSERT INTO "brc20_inscription_transfers" ("inscription_id", "inscription_number", "inscription_sequence_number", "block_height", "tx_index", "tx_hash", "from_input_index", "old_satpoint_tx_hash", "old_satpoint_out_idx", "old_satpoint_offset", "new_satpoint_tx_hash", "new_satpoint_out_idx", "new_satpoint_offset", "new_pkscript", "new_output_value", "sent_as_fee", "transfer_count") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)
`
type CreateInscriptionTransfersBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateInscriptionTransfersParams struct {
InscriptionID string
InscriptionNumber int64
InscriptionSequenceNumber int64
BlockHeight int32
TxIndex int32
TxHash string
FromInputIndex int32
OldSatpointTxHash pgtype.Text
OldSatpointOutIdx pgtype.Int4
OldSatpointOffset pgtype.Int8
NewSatpointTxHash pgtype.Text
NewSatpointOutIdx pgtype.Int4
NewSatpointOffset pgtype.Int8
NewPkscript string
NewOutputValue int64
SentAsFee bool
TransferCount int32
}
func (q *Queries) CreateInscriptionTransfers(ctx context.Context, arg []CreateInscriptionTransfersParams) *CreateInscriptionTransfersBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.InscriptionID,
a.InscriptionNumber,
a.InscriptionSequenceNumber,
a.BlockHeight,
a.TxIndex,
a.TxHash,
a.FromInputIndex,
a.OldSatpointTxHash,
a.OldSatpointOutIdx,
a.OldSatpointOffset,
a.NewSatpointTxHash,
a.NewSatpointOutIdx,
a.NewSatpointOffset,
a.NewPkscript,
a.NewOutputValue,
a.SentAsFee,
a.TransferCount,
}
batch.Queue(createInscriptionTransfers, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateInscriptionTransfersBatchResults{br, len(arg), false}
}
func (b *CreateInscriptionTransfersBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateInscriptionTransfersBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createTickEntries = `-- name: CreateTickEntries :batchexec
INSERT INTO "brc20_tick_entries" ("tick", "original_tick", "total_supply", "decimals", "limit_per_mint", "is_self_mint", "deploy_inscription_id", "deployed_at", "deployed_at_height") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
`
type CreateTickEntriesBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateTickEntriesParams struct {
Tick string
OriginalTick string
TotalSupply pgtype.Numeric
Decimals int16
LimitPerMint pgtype.Numeric
IsSelfMint bool
DeployInscriptionID string
DeployedAt pgtype.Timestamp
DeployedAtHeight int32
}
func (q *Queries) CreateTickEntries(ctx context.Context, arg []CreateTickEntriesParams) *CreateTickEntriesBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Tick,
a.OriginalTick,
a.TotalSupply,
a.Decimals,
a.LimitPerMint,
a.IsSelfMint,
a.DeployInscriptionID,
a.DeployedAt,
a.DeployedAtHeight,
}
batch.Queue(createTickEntries, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateTickEntriesBatchResults{br, len(arg), false}
}
func (b *CreateTickEntriesBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateTickEntriesBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createTickEntryStates = `-- name: CreateTickEntryStates :batchexec
INSERT INTO "brc20_tick_entry_states" ("tick", "block_height", "minted_amount", "burned_amount", "completed_at", "completed_at_height") VALUES ($1, $2, $3, $4, $5, $6)
`
type CreateTickEntryStatesBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateTickEntryStatesParams struct {
Tick string
BlockHeight int32
MintedAmount pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}
func (q *Queries) CreateTickEntryStates(ctx context.Context, arg []CreateTickEntryStatesParams) *CreateTickEntryStatesBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Tick,
a.BlockHeight,
a.MintedAmount,
a.BurnedAmount,
a.CompletedAt,
a.CompletedAtHeight,
}
batch.Queue(createTickEntryStates, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateTickEntryStatesBatchResults{br, len(arg), false}
}
func (b *CreateTickEntryStatesBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateTickEntryStatesBatchResults) Close() error {
b.closed = true
return b.br.Close()
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,33 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
package gen
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
)
type DBTX interface {
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
QueryRow(context.Context, string, ...interface{}) pgx.Row
SendBatch(context.Context, *pgx.Batch) pgx.BatchResults
}
func New(db DBTX) *Queries {
return &Queries{db: db}
}
type Queries struct {
db DBTX
}
func (q *Queries) WithTx(tx pgx.Tx) *Queries {
return &Queries{
db: tx,
}
}

View File

@@ -0,0 +1,49 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: info.sql
package gen
import (
"context"
)
const createIndexerState = `-- name: CreateIndexerState :exec
INSERT INTO brc20_indexer_states (client_version, network, db_version, event_hash_version) VALUES ($1, $2, $3, $4)
`
type CreateIndexerStateParams struct {
ClientVersion string
Network string
DbVersion int32
EventHashVersion int32
}
func (q *Queries) CreateIndexerState(ctx context.Context, arg CreateIndexerStateParams) error {
_, err := q.db.Exec(ctx, createIndexerState,
arg.ClientVersion,
arg.Network,
arg.DbVersion,
arg.EventHashVersion,
)
return err
}
const getLatestIndexerState = `-- name: GetLatestIndexerState :one
SELECT id, client_version, network, db_version, event_hash_version, created_at FROM brc20_indexer_states ORDER BY created_at DESC LIMIT 1
`
func (q *Queries) GetLatestIndexerState(ctx context.Context) (Brc20IndexerState, error) {
row := q.db.QueryRow(ctx, getLatestIndexerState)
var i Brc20IndexerState
err := row.Scan(
&i.Id,
&i.ClientVersion,
&i.Network,
&i.DbVersion,
&i.EventHashVersion,
&i.CreatedAt,
)
return i, err
}

View File

@@ -0,0 +1,176 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
package gen
import (
"github.com/jackc/pgx/v5/pgtype"
)
type Brc20Balance struct {
Pkscript string
BlockHeight int32
Tick string
OverallBalance pgtype.Numeric
AvailableBalance pgtype.Numeric
}
type Brc20EventDeploy struct {
Id int64
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
Pkscript string
Satpoint string
TotalSupply pgtype.Numeric
Decimals int16
LimitPerMint pgtype.Numeric
IsSelfMint bool
}
type Brc20EventInscribeTransfer struct {
Id int64
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
Pkscript string
Satpoint string
OutputIndex int32
SatsAmount int64
Amount pgtype.Numeric
}
type Brc20EventMint struct {
Id int64
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
Pkscript string
Satpoint string
Amount pgtype.Numeric
ParentID pgtype.Text
}
type Brc20EventTransferTransfer struct {
Id int64
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
FromPkscript string
FromSatpoint string
FromInputIndex int32
ToPkscript string
ToSatpoint string
ToOutputIndex int32
SpentAsFee bool
Amount pgtype.Numeric
}
type Brc20IndexedBlock struct {
Height int32
Hash string
EventHash string
CumulativeEventHash string
}
type Brc20IndexerState struct {
Id int64
ClientVersion string
Network string
DbVersion int32
EventHashVersion int32
CreatedAt pgtype.Timestamptz
}
type Brc20InscriptionEntry struct {
Id string
Number int64
SequenceNumber int64
Delegate pgtype.Text
Metadata []byte
Metaprotocol pgtype.Text
Parents []string
Pointer pgtype.Int8
Content []byte
ContentEncoding pgtype.Text
ContentType pgtype.Text
Cursed bool
CursedForBrc20 bool
CreatedAt pgtype.Timestamp
CreatedAtHeight int32
}
type Brc20InscriptionEntryState struct {
Id string
BlockHeight int32
TransferCount int32
}
type Brc20InscriptionTransfer struct {
InscriptionID string
InscriptionNumber int64
InscriptionSequenceNumber int64
BlockHeight int32
TxIndex int32
TxHash string
FromInputIndex int32
OldSatpointTxHash pgtype.Text
OldSatpointOutIdx pgtype.Int4
OldSatpointOffset pgtype.Int8
NewSatpointTxHash pgtype.Text
NewSatpointOutIdx pgtype.Int4
NewSatpointOffset pgtype.Int8
NewPkscript string
NewOutputValue int64
SentAsFee bool
TransferCount int32
}
type Brc20ProcessorStat struct {
BlockHeight int32
CursedInscriptionCount int32
BlessedInscriptionCount int32
LostSats int64
}
type Brc20TickEntry struct {
Tick string
OriginalTick string
TotalSupply pgtype.Numeric
Decimals int16
LimitPerMint pgtype.Numeric
IsSelfMint bool
DeployInscriptionID string
DeployedAt pgtype.Timestamp
DeployedAtHeight int32
}
type Brc20TickEntryState struct {
Tick string
BlockHeight int32
MintedAmount pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}

View File

@@ -0,0 +1,33 @@
package postgres
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/jackc/pgx/v5"
)
var _ datagateway.IndexerInfoDataGateway = (*Repository)(nil)
func (r *Repository) GetLatestIndexerState(ctx context.Context) (entity.IndexerState, error) {
model, err := r.queries.GetLatestIndexerState(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return entity.IndexerState{}, errors.WithStack(errs.NotFound)
}
return entity.IndexerState{}, errors.Wrap(err, "error during query")
}
state := mapIndexerStatesModelToType(model)
return state, nil
}
func (r *Repository) CreateIndexerState(ctx context.Context, state entity.IndexerState) error {
params := mapIndexerStatesTypeToParams(state)
if err := r.queries.CreateIndexerState(ctx, params); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}

View File

@@ -0,0 +1,622 @@
package postgres
import (
"encoding/hex"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/gaze-network/indexer-network/modules/brc20/internal/repository/postgres/gen"
"github.com/jackc/pgx/v5/pgtype"
"github.com/samber/lo"
"github.com/shopspring/decimal"
)
func decimalFromNumeric(src pgtype.Numeric) decimal.NullDecimal {
if !src.Valid || src.NaN || src.InfinityModifier != pgtype.Finite {
return decimal.NullDecimal{}
}
result := decimal.NewFromBigInt(src.Int, src.Exp)
return decimal.NewNullDecimal(result)
}
func numericFromDecimal(src decimal.Decimal) pgtype.Numeric {
result := pgtype.Numeric{
Int: src.Coefficient(),
Exp: src.Exponent(),
NaN: false,
InfinityModifier: pgtype.Finite,
Valid: true,
}
return result
}
func numericFromNullDecimal(src decimal.NullDecimal) pgtype.Numeric {
if !src.Valid {
return pgtype.Numeric{}
}
return numericFromDecimal(src.Decimal)
}
func mapIndexerStatesModelToType(src gen.Brc20IndexerState) entity.IndexerState {
var createdAt time.Time
if src.CreatedAt.Valid {
createdAt = src.CreatedAt.Time
}
return entity.IndexerState{
ClientVersion: src.ClientVersion,
Network: common.Network(src.Network),
DBVersion: int32(src.DbVersion),
EventHashVersion: int32(src.EventHashVersion),
CreatedAt: createdAt,
}
}
func mapIndexerStatesTypeToParams(src entity.IndexerState) gen.CreateIndexerStateParams {
return gen.CreateIndexerStateParams{
ClientVersion: src.ClientVersion,
Network: string(src.Network),
DbVersion: int32(src.DBVersion),
EventHashVersion: int32(src.EventHashVersion),
}
}
func mapIndexedBlockModelToType(src gen.Brc20IndexedBlock) (entity.IndexedBlock, error) {
hash, err := chainhash.NewHashFromStr(src.Hash)
if err != nil {
return entity.IndexedBlock{}, errors.Wrap(err, "invalid block hash")
}
eventHash, err := hex.DecodeString(src.EventHash)
if err != nil {
return entity.IndexedBlock{}, errors.Wrap(err, "invalid event hash")
}
cumulativeEventHash, err := hex.DecodeString(src.CumulativeEventHash)
if err != nil {
return entity.IndexedBlock{}, errors.Wrap(err, "invalid cumulative event hash")
}
return entity.IndexedBlock{
Height: uint64(src.Height),
Hash: *hash,
EventHash: eventHash,
CumulativeEventHash: cumulativeEventHash,
}, nil
}
func mapIndexedBlockTypeToParams(src entity.IndexedBlock) gen.CreateIndexedBlockParams {
return gen.CreateIndexedBlockParams{
Height: int32(src.Height),
Hash: src.Hash.String(),
EventHash: hex.EncodeToString(src.EventHash),
CumulativeEventHash: hex.EncodeToString(src.CumulativeEventHash),
}
}
func mapProcessorStatsModelToType(src gen.Brc20ProcessorStat) entity.ProcessorStats {
return entity.ProcessorStats{
BlockHeight: uint64(src.BlockHeight),
CursedInscriptionCount: uint64(src.CursedInscriptionCount),
BlessedInscriptionCount: uint64(src.BlessedInscriptionCount),
LostSats: uint64(src.LostSats),
}
}
func mapProcessorStatsTypeToParams(src entity.ProcessorStats) gen.CreateProcessorStatsParams {
return gen.CreateProcessorStatsParams{
BlockHeight: int32(src.BlockHeight),
CursedInscriptionCount: int32(src.CursedInscriptionCount),
BlessedInscriptionCount: int32(src.BlessedInscriptionCount),
LostSats: int64(src.LostSats),
}
}
func mapTickEntryModelToType(src gen.GetTickEntriesByTicksRow) (entity.TickEntry, error) {
deployInscriptionId, err := ordinals.NewInscriptionIdFromString(src.DeployInscriptionID)
if err != nil {
return entity.TickEntry{}, errors.Wrap(err, "invalid deployInscriptionId")
}
var completedAt time.Time
if src.CompletedAt.Valid {
completedAt = src.CompletedAt.Time
}
return entity.TickEntry{
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TotalSupply: decimalFromNumeric(src.TotalSupply).Decimal,
Decimals: uint16(src.Decimals),
LimitPerMint: decimalFromNumeric(src.LimitPerMint).Decimal,
IsSelfMint: src.IsSelfMint,
DeployInscriptionId: deployInscriptionId,
DeployedAt: src.DeployedAt.Time,
DeployedAtHeight: uint64(src.DeployedAtHeight),
MintedAmount: decimalFromNumeric(src.MintedAmount).Decimal,
BurnedAmount: decimalFromNumeric(src.BurnedAmount).Decimal,
CompletedAt: completedAt,
CompletedAtHeight: lo.Ternary(src.CompletedAtHeight.Valid, uint64(src.CompletedAtHeight.Int32), 0),
}, nil
}
func mapTickEntryTypeToParams(src entity.TickEntry, blockHeight uint64) (gen.CreateTickEntriesParams, gen.CreateTickEntryStatesParams, error) {
return gen.CreateTickEntriesParams{
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TotalSupply: numericFromDecimal(src.TotalSupply),
Decimals: int16(src.Decimals),
LimitPerMint: numericFromDecimal(src.LimitPerMint),
IsSelfMint: src.IsSelfMint,
DeployInscriptionID: src.DeployInscriptionId.String(),
DeployedAt: pgtype.Timestamp{Time: src.DeployedAt, Valid: true},
DeployedAtHeight: int32(src.DeployedAtHeight),
}, gen.CreateTickEntryStatesParams{
Tick: src.Tick,
BlockHeight: int32(blockHeight),
CompletedAt: pgtype.Timestamp{Time: src.CompletedAt, Valid: !src.CompletedAt.IsZero()},
CompletedAtHeight: pgtype.Int4{Int32: int32(src.CompletedAtHeight), Valid: src.CompletedAtHeight != 0},
MintedAmount: numericFromDecimal(src.MintedAmount),
BurnedAmount: numericFromDecimal(src.BurnedAmount),
}, nil
}
func mapInscriptionEntryModelToType(src gen.GetInscriptionEntriesByIdsRow) (ordinals.InscriptionEntry, error) {
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.Id)
if err != nil {
return ordinals.InscriptionEntry{}, errors.Wrap(err, "invalid inscription id")
}
var delegate, parent *ordinals.InscriptionId
if src.Delegate.Valid {
delegateValue, err := ordinals.NewInscriptionIdFromString(src.Delegate.String)
if err != nil {
return ordinals.InscriptionEntry{}, errors.Wrap(err, "invalid delegate id")
}
delegate = &delegateValue
}
// ord 0.14.0 supports only one parent
if len(src.Parents) > 0 {
parentValue, err := ordinals.NewInscriptionIdFromString(src.Parents[0])
if err != nil {
return ordinals.InscriptionEntry{}, errors.Wrap(err, "invalid parent id")
}
parent = &parentValue
}
inscription := ordinals.Inscription{
Content: src.Content,
ContentEncoding: lo.Ternary(src.ContentEncoding.Valid, src.ContentEncoding.String, ""),
ContentType: lo.Ternary(src.ContentType.Valid, src.ContentType.String, ""),
Delegate: delegate,
Metadata: src.Metadata,
Metaprotocol: lo.Ternary(src.Metaprotocol.Valid, src.Metaprotocol.String, ""),
Parent: parent,
Pointer: lo.Ternary(src.Pointer.Valid, lo.ToPtr(uint64(src.Pointer.Int64)), nil),
}
return ordinals.InscriptionEntry{
Id: inscriptionId,
Number: src.Number,
SequenceNumber: uint64(src.SequenceNumber),
Cursed: src.Cursed,
CursedForBRC20: src.CursedForBrc20,
CreatedAt: lo.Ternary(src.CreatedAt.Valid, src.CreatedAt.Time, time.Time{}),
CreatedAtHeight: uint64(src.CreatedAtHeight),
Inscription: inscription,
TransferCount: lo.Ternary(src.TransferCount.Valid, uint32(src.TransferCount.Int32), 0),
}, nil
}
func mapInscriptionEntryTypeToParams(src ordinals.InscriptionEntry, blockHeight uint64) (gen.CreateInscriptionEntriesParams, gen.CreateInscriptionEntryStatesParams, error) {
var delegate, metaprotocol, contentEncoding, contentType pgtype.Text
if src.Inscription.Delegate != nil {
delegate = pgtype.Text{String: src.Inscription.Delegate.String(), Valid: true}
}
if src.Inscription.Metaprotocol != "" {
metaprotocol = pgtype.Text{String: src.Inscription.Metaprotocol, Valid: true}
}
if src.Inscription.ContentEncoding != "" {
contentEncoding = pgtype.Text{String: src.Inscription.ContentEncoding, Valid: true}
}
if src.Inscription.ContentType != "" {
contentType = pgtype.Text{String: src.Inscription.ContentType, Valid: true}
}
var parents []string
if src.Inscription.Parent != nil {
parents = append(parents, src.Inscription.Parent.String())
}
var pointer pgtype.Int8
if src.Inscription.Pointer != nil {
pointer = pgtype.Int8{Int64: int64(*src.Inscription.Pointer), Valid: true}
}
return gen.CreateInscriptionEntriesParams{
Id: src.Id.String(),
Number: src.Number,
SequenceNumber: int64(src.SequenceNumber),
Delegate: delegate,
Metadata: src.Inscription.Metadata,
Metaprotocol: metaprotocol,
Parents: parents,
Pointer: pointer,
Content: src.Inscription.Content,
ContentEncoding: contentEncoding,
ContentType: contentType,
Cursed: src.Cursed,
CursedForBrc20: src.CursedForBRC20,
CreatedAt: lo.Ternary(!src.CreatedAt.IsZero(), pgtype.Timestamp{Time: src.CreatedAt, Valid: true}, pgtype.Timestamp{}),
CreatedAtHeight: int32(src.CreatedAtHeight),
}, gen.CreateInscriptionEntryStatesParams{
Id: src.Id.String(),
BlockHeight: int32(blockHeight),
TransferCount: int32(src.TransferCount),
}, nil
}
func mapInscriptionTransferModelToType(src gen.GetInscriptionTransfersInOutPointsRow) (entity.InscriptionTransfer, error) {
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
if err != nil {
return entity.InscriptionTransfer{}, errors.Wrap(err, "invalid inscription id")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return entity.InscriptionTransfer{}, errors.Wrap(err, "invalid tx hash")
}
var oldSatPoint, newSatPoint ordinals.SatPoint
if src.OldSatpointTxHash.Valid {
if !src.OldSatpointOutIdx.Valid || !src.OldSatpointOffset.Valid {
return entity.InscriptionTransfer{}, errors.New("old satpoint out idx and offset must exist if hash exists")
}
txHash, err := chainhash.NewHashFromStr(src.OldSatpointTxHash.String)
if err != nil {
return entity.InscriptionTransfer{}, errors.Wrap(err, "invalid old satpoint tx hash")
}
oldSatPoint = ordinals.SatPoint{
OutPoint: wire.OutPoint{
Hash: *txHash,
Index: uint32(src.OldSatpointOutIdx.Int32),
},
Offset: uint64(src.OldSatpointOffset.Int64),
}
}
if src.NewSatpointTxHash.Valid {
if !src.NewSatpointOutIdx.Valid || !src.NewSatpointOffset.Valid {
return entity.InscriptionTransfer{}, errors.New("new satpoint out idx and offset must exist if hash exists")
}
txHash, err := chainhash.NewHashFromStr(src.NewSatpointTxHash.String)
if err != nil {
return entity.InscriptionTransfer{}, errors.Wrap(err, "invalid new satpoint tx hash")
}
newSatPoint = ordinals.SatPoint{
OutPoint: wire.OutPoint{
Hash: *txHash,
Index: uint32(src.NewSatpointOutIdx.Int32),
},
Offset: uint64(src.NewSatpointOffset.Int64),
}
}
newPkScript, err := hex.DecodeString(src.NewPkscript)
if err != nil {
return entity.InscriptionTransfer{}, errors.Wrap(err, "failed to parse pkscript")
}
return entity.InscriptionTransfer{
InscriptionId: inscriptionId,
InscriptionNumber: src.InscriptionNumber,
InscriptionSequenceNumber: uint64(src.InscriptionSequenceNumber),
BlockHeight: uint64(src.BlockHeight),
TxIndex: uint32(src.TxIndex),
TxHash: *txHash,
FromInputIndex: uint32(src.FromInputIndex),
Content: src.Content,
OldSatPoint: oldSatPoint,
NewSatPoint: newSatPoint,
NewPkScript: newPkScript,
NewOutputValue: uint64(src.NewOutputValue),
SentAsFee: src.SentAsFee,
TransferCount: uint32(src.TransferCount),
}, nil
}
func mapInscriptionTransferTypeToParams(src entity.InscriptionTransfer) gen.CreateInscriptionTransfersParams {
return gen.CreateInscriptionTransfersParams{
InscriptionID: src.InscriptionId.String(),
InscriptionNumber: src.InscriptionNumber,
InscriptionSequenceNumber: int64(src.InscriptionSequenceNumber),
BlockHeight: int32(src.BlockHeight),
TxIndex: int32(src.TxIndex),
TxHash: src.TxHash.String(),
FromInputIndex: int32(src.FromInputIndex),
OldSatpointTxHash: lo.Ternary(src.OldSatPoint != ordinals.SatPoint{}, pgtype.Text{String: src.OldSatPoint.OutPoint.Hash.String(), Valid: true}, pgtype.Text{}),
OldSatpointOutIdx: lo.Ternary(src.OldSatPoint != ordinals.SatPoint{}, pgtype.Int4{Int32: int32(src.OldSatPoint.OutPoint.Index), Valid: true}, pgtype.Int4{}),
OldSatpointOffset: lo.Ternary(src.OldSatPoint != ordinals.SatPoint{}, pgtype.Int8{Int64: int64(src.OldSatPoint.Offset), Valid: true}, pgtype.Int8{}),
NewSatpointTxHash: lo.Ternary(src.NewSatPoint != ordinals.SatPoint{}, pgtype.Text{String: src.NewSatPoint.OutPoint.Hash.String(), Valid: true}, pgtype.Text{}),
NewSatpointOutIdx: lo.Ternary(src.NewSatPoint != ordinals.SatPoint{}, pgtype.Int4{Int32: int32(src.NewSatPoint.OutPoint.Index), Valid: true}, pgtype.Int4{}),
NewSatpointOffset: lo.Ternary(src.NewSatPoint != ordinals.SatPoint{}, pgtype.Int8{Int64: int64(src.NewSatPoint.Offset), Valid: true}, pgtype.Int8{}),
NewPkscript: hex.EncodeToString(src.NewPkScript),
NewOutputValue: int64(src.NewOutputValue),
SentAsFee: src.SentAsFee,
TransferCount: int32(src.TransferCount),
}
}
func mapEventDeployModelToType(src gen.Brc20EventDeploy) (entity.EventDeploy, error) {
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
if err != nil {
return entity.EventDeploy{}, errors.Wrap(err, "invalid inscription id")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return entity.EventDeploy{}, errors.Wrap(err, "invalid tx hash")
}
pkScript, err := hex.DecodeString(src.Pkscript)
if err != nil {
return entity.EventDeploy{}, errors.Wrap(err, "failed to parse pkscript")
}
satPoint, err := ordinals.NewSatPointFromString(src.Satpoint)
if err != nil {
return entity.EventDeploy{}, errors.Wrap(err, "cannot parse satpoint")
}
return entity.EventDeploy{
Id: src.Id,
InscriptionId: inscriptionId,
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: *txHash,
BlockHeight: uint64(src.BlockHeight),
TxIndex: uint32(src.TxIndex),
Timestamp: src.Timestamp.Time,
PkScript: pkScript,
SatPoint: satPoint,
TotalSupply: decimalFromNumeric(src.TotalSupply).Decimal,
Decimals: uint16(src.Decimals),
LimitPerMint: decimalFromNumeric(src.LimitPerMint).Decimal,
IsSelfMint: src.IsSelfMint,
}, nil
}
func mapEventDeployTypeToParams(src entity.EventDeploy) (gen.CreateEventDeploysParams, error) {
var timestamp pgtype.Timestamp
if !src.Timestamp.IsZero() {
timestamp = pgtype.Timestamp{Time: src.Timestamp, Valid: true}
}
return gen.CreateEventDeploysParams{
Id: src.Id,
InscriptionID: src.InscriptionId.String(),
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: src.TxHash.String(),
BlockHeight: int32(src.BlockHeight),
TxIndex: int32(src.TxIndex),
Timestamp: timestamp,
Pkscript: hex.EncodeToString(src.PkScript),
Satpoint: src.SatPoint.String(),
TotalSupply: numericFromDecimal(src.TotalSupply),
Decimals: int16(src.Decimals),
LimitPerMint: numericFromDecimal(src.LimitPerMint),
IsSelfMint: src.IsSelfMint,
}, nil
}
func mapEventMintModelToType(src gen.Brc20EventMint) (entity.EventMint, error) {
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
if err != nil {
return entity.EventMint{}, errors.Wrap(err, "invalid inscription id")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return entity.EventMint{}, errors.Wrap(err, "invalid tx hash")
}
pkScript, err := hex.DecodeString(src.Pkscript)
if err != nil {
return entity.EventMint{}, errors.Wrap(err, "failed to parse pkscript")
}
satPoint, err := ordinals.NewSatPointFromString(src.Satpoint)
if err != nil {
return entity.EventMint{}, errors.Wrap(err, "cannot parse satpoint")
}
var parentId *ordinals.InscriptionId
if src.ParentID.Valid {
parentIdValue, err := ordinals.NewInscriptionIdFromString(src.ParentID.String)
if err != nil {
return entity.EventMint{}, errors.Wrap(err, "invalid parent id")
}
parentId = &parentIdValue
}
return entity.EventMint{
Id: src.Id,
InscriptionId: inscriptionId,
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: *txHash,
BlockHeight: uint64(src.BlockHeight),
TxIndex: uint32(src.TxIndex),
Timestamp: src.Timestamp.Time,
PkScript: pkScript,
SatPoint: satPoint,
Amount: decimalFromNumeric(src.Amount).Decimal,
ParentId: parentId,
}, nil
}
func mapEventMintTypeToParams(src entity.EventMint) (gen.CreateEventMintsParams, error) {
var timestamp pgtype.Timestamp
if !src.Timestamp.IsZero() {
timestamp = pgtype.Timestamp{Time: src.Timestamp, Valid: true}
}
var parentId pgtype.Text
if src.ParentId != nil {
parentId = pgtype.Text{String: src.ParentId.String(), Valid: true}
}
return gen.CreateEventMintsParams{
Id: src.Id,
InscriptionID: src.InscriptionId.String(),
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: src.TxHash.String(),
BlockHeight: int32(src.BlockHeight),
TxIndex: int32(src.TxIndex),
Timestamp: timestamp,
Pkscript: hex.EncodeToString(src.PkScript),
Satpoint: src.SatPoint.String(),
Amount: numericFromDecimal(src.Amount),
ParentID: parentId,
}, nil
}
func mapEventInscribeTransferModelToType(src gen.Brc20EventInscribeTransfer) (entity.EventInscribeTransfer, error) {
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
if err != nil {
return entity.EventInscribeTransfer{}, errors.Wrap(err, "cannot parse inscription id")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return entity.EventInscribeTransfer{}, errors.Wrap(err, "cannot parse hash")
}
pkScript, err := hex.DecodeString(src.Pkscript)
if err != nil {
return entity.EventInscribeTransfer{}, errors.Wrap(err, "cannot parse pkScript")
}
satPoint, err := ordinals.NewSatPointFromString(src.Satpoint)
if err != nil {
return entity.EventInscribeTransfer{}, errors.Wrap(err, "cannot parse satPoint")
}
return entity.EventInscribeTransfer{
Id: src.Id,
InscriptionId: inscriptionId,
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: *txHash,
BlockHeight: uint64(src.BlockHeight),
TxIndex: uint32(src.TxIndex),
Timestamp: src.Timestamp.Time,
PkScript: pkScript,
SatPoint: satPoint,
OutputIndex: uint32(src.OutputIndex),
SatsAmount: uint64(src.SatsAmount),
Amount: decimalFromNumeric(src.Amount).Decimal,
}, nil
}
func mapEventInscribeTransferTypeToParams(src entity.EventInscribeTransfer) (gen.CreateEventInscribeTransfersParams, error) {
var timestamp pgtype.Timestamp
if !src.Timestamp.IsZero() {
timestamp = pgtype.Timestamp{Time: src.Timestamp, Valid: true}
}
return gen.CreateEventInscribeTransfersParams{
Id: src.Id,
InscriptionID: src.InscriptionId.String(),
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: src.TxHash.String(),
BlockHeight: int32(src.BlockHeight),
TxIndex: int32(src.TxIndex),
Timestamp: timestamp,
Pkscript: hex.EncodeToString(src.PkScript),
Satpoint: src.SatPoint.String(),
OutputIndex: int32(src.OutputIndex),
SatsAmount: int64(src.SatsAmount),
Amount: numericFromDecimal(src.Amount),
}, nil
}
func mapEventTransferTransferModelToType(src gen.Brc20EventTransferTransfer) (entity.EventTransferTransfer, error) {
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
if err != nil {
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse inscription id")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse hash")
}
fromPkScript, err := hex.DecodeString(src.FromPkscript)
if err != nil {
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse fromPkScript")
}
fromSatPoint, err := ordinals.NewSatPointFromString(src.FromSatpoint)
if err != nil {
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse fromSatPoint")
}
toPkScript, err := hex.DecodeString(src.ToPkscript)
if err != nil {
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse toPkScript")
}
toSatPoint, err := ordinals.NewSatPointFromString(src.ToSatpoint)
if err != nil {
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse toSatPoint")
}
return entity.EventTransferTransfer{
Id: src.Id,
InscriptionId: inscriptionId,
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: *txHash,
BlockHeight: uint64(src.BlockHeight),
TxIndex: uint32(src.TxIndex),
Timestamp: src.Timestamp.Time,
FromPkScript: fromPkScript,
FromSatPoint: fromSatPoint,
FromInputIndex: uint32(src.FromInputIndex),
ToPkScript: toPkScript,
ToSatPoint: toSatPoint,
ToOutputIndex: uint32(src.ToOutputIndex),
SpentAsFee: src.SpentAsFee,
Amount: decimalFromNumeric(src.Amount).Decimal,
}, nil
}
func mapEventTransferTransferTypeToParams(src entity.EventTransferTransfer) (gen.CreateEventTransferTransfersParams, error) {
var timestamp pgtype.Timestamp
if !src.Timestamp.IsZero() {
timestamp = pgtype.Timestamp{Time: src.Timestamp, Valid: true}
}
return gen.CreateEventTransferTransfersParams{
Id: src.Id,
InscriptionID: src.InscriptionId.String(),
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: src.TxHash.String(),
BlockHeight: int32(src.BlockHeight),
TxIndex: int32(src.TxIndex),
Timestamp: timestamp,
FromPkscript: hex.EncodeToString(src.FromPkScript),
FromSatpoint: src.FromSatPoint.String(),
FromInputIndex: int32(src.FromInputIndex),
ToPkscript: hex.EncodeToString(src.ToPkScript),
ToSatpoint: src.ToSatPoint.String(),
ToOutputIndex: int32(src.ToOutputIndex),
SpentAsFee: src.SpentAsFee,
Amount: numericFromDecimal(src.Amount),
}, nil
}
func mapBalanceModelToType(src gen.Brc20Balance) (entity.Balance, error) {
pkScript, err := hex.DecodeString(src.Pkscript)
if err != nil {
return entity.Balance{}, errors.Wrap(err, "failed to parse pkscript")
}
return entity.Balance{
PkScript: pkScript,
Tick: src.Tick,
BlockHeight: uint64(src.BlockHeight),
OverallBalance: decimalFromNumeric(src.OverallBalance).Decimal,
AvailableBalance: decimalFromNumeric(src.AvailableBalance).Decimal,
}, nil
}
func mapBalanceTypeToParams(src entity.Balance) gen.CreateBalancesParams {
return gen.CreateBalancesParams{
Pkscript: hex.EncodeToString(src.PkScript),
Tick: src.Tick,
BlockHeight: int32(src.BlockHeight),
OverallBalance: numericFromDecimal(src.OverallBalance),
AvailableBalance: numericFromDecimal(src.AvailableBalance),
}
}

View File

@@ -0,0 +1,20 @@
package postgres
import (
"github.com/gaze-network/indexer-network/internal/postgres"
"github.com/gaze-network/indexer-network/modules/brc20/internal/repository/postgres/gen"
"github.com/jackc/pgx/v5"
)
type Repository struct {
db postgres.DB
queries *gen.Queries
tx pgx.Tx
}
func NewRepository(db postgres.DB) *Repository {
return &Repository{
db: db,
queries: gen.New(db),
}
}

View File

@@ -0,0 +1,62 @@
package postgres
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/jackc/pgx/v5"
)
var ErrTxAlreadyExists = errors.New("Transaction already exists. Call Commit() or Rollback() first.")
func (r *Repository) begin(ctx context.Context) (*Repository, error) {
if r.tx != nil {
return nil, errors.WithStack(ErrTxAlreadyExists)
}
tx, err := r.db.Begin(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to begin transaction")
}
return &Repository{
db: r.db,
queries: r.queries.WithTx(tx),
tx: tx,
}, nil
}
func (r *Repository) BeginBRC20Tx(ctx context.Context) (datagateway.BRC20DataGatewayWithTx, error) {
repo, err := r.begin(ctx)
if err != nil {
return nil, errors.WithStack(err)
}
return repo, nil
}
func (r *Repository) Commit(ctx context.Context) error {
if r.tx == nil {
return nil
}
err := r.tx.Commit(ctx)
if err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
r.tx = nil
return nil
}
func (r *Repository) Rollback(ctx context.Context) error {
if r.tx == nil {
return nil
}
err := r.tx.Rollback(ctx)
if err != nil && !errors.Is(err, pgx.ErrTxClosed) {
return errors.Wrap(err, "failed to rollback transaction")
}
if err == nil {
logger.DebugContext(ctx, "rolled back transaction")
}
r.tx = nil
return nil
}

View File

@@ -0,0 +1,24 @@
package usecase
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
)
func (u *Usecase) GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[string]*entity.Balance, error) {
balances, err := u.dg.GetBalancesByPkScript(ctx, pkScript, blockHeight)
if err != nil {
return nil, errors.Wrap(err, "error during GetBalancesByPkScript")
}
return balances, nil
}
func (u *Usecase) GetBalancesByTick(ctx context.Context, tick string, blockHeight uint64) ([]*entity.Balance, error) {
balances, err := u.dg.GetBalancesByTick(ctx, tick, blockHeight)
if err != nil {
return nil, errors.Wrap(err, "failed to get balance by tick")
}
return balances, nil
}

View File

@@ -0,0 +1,33 @@
package usecase
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
)
func (u *Usecase) GetTickEntryByTickBatch(ctx context.Context, ticks []string) (map[string]*entity.TickEntry, error) {
entries, err := u.dg.GetTickEntriesByTicks(ctx, ticks)
if err != nil {
return nil, errors.Wrap(err, "error during GetTickEntriesByTicks")
}
return entries, nil
}
func (u *Usecase) GetTickEntryByTickAndHeight(ctx context.Context, tick string, blockHeight uint64) (*entity.TickEntry, error) {
entries, err := u.GetTickEntryByTickAndHeightBatch(ctx, []string{tick}, blockHeight)
if err != nil {
return nil, errors.WithStack(err)
}
entry, ok := entries[tick]
if !ok {
return nil, errors.Wrap(errs.NotFound, "entry not found")
}
return entry, nil
}
func (u *Usecase) GetTickEntryByTickAndHeightBatch(ctx context.Context, ticks []string, blockHeight uint64) (map[string]*entity.TickEntry, error) {
return nil, nil
}

View File

@@ -0,0 +1,15 @@
package usecase
import (
"context"
"github.com/cockroachdb/errors"
)
func (u *Usecase) GetFirstLastInscriptionNumberByTick(ctx context.Context, tick string) (int64, int64, error) {
first, last, err := u.dg.GetFirstLastInscriptionNumberByTick(ctx, tick)
if err != nil {
return -1, -1, errors.Wrap(err, "error during GetFirstLastInscriptionNumberByTick")
}
return first, last, nil
}

View File

@@ -0,0 +1,16 @@
package usecase
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
)
func (u *Usecase) GetLatestBlock(ctx context.Context) (types.BlockHeader, error) {
blockHeader, err := u.dg.GetLatestBlock(ctx)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to get latest block")
}
return blockHeader, nil
}

View File

@@ -0,0 +1,16 @@
package usecase
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
)
func (u *Usecase) GetDeployEventByTick(ctx context.Context, tick string) (*entity.EventDeploy, error) {
result, err := u.dg.GetDeployEventByTick(ctx, tick)
if err != nil {
return nil, errors.Wrap(err, "error during GetDeployEventByTick")
}
return result, nil
}

View File

@@ -0,0 +1,40 @@
package usecase
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
)
func (u *Usecase) GetDeployEvents(ctx context.Context, pkScript []byte, tick string, height uint64) ([]*entity.EventDeploy, error) {
result, err := u.dg.GetDeployEvents(ctx, pkScript, tick, height)
if err != nil {
return nil, errors.Wrap(err, "error during GetDeployEvents")
}
return result, nil
}
func (u *Usecase) GetMintEvents(ctx context.Context, pkScript []byte, tick string, height uint64) ([]*entity.EventMint, error) {
result, err := u.dg.GetMintEvents(ctx, pkScript, tick, height)
if err != nil {
return nil, errors.Wrap(err, "error during GetMintEvents")
}
return result, nil
}
func (u *Usecase) GetInscribeTransferEvents(ctx context.Context, pkScript []byte, tick string, height uint64) ([]*entity.EventInscribeTransfer, error) {
result, err := u.dg.GetInscribeTransferEvents(ctx, pkScript, tick, height)
if err != nil {
return nil, errors.Wrap(err, "error during GetInscribeTransferEvents")
}
return result, nil
}
func (u *Usecase) GetTransferTransferEvents(ctx context.Context, pkScript []byte, tick string, height uint64) ([]*entity.EventTransferTransfer, error) {
result, err := u.dg.GetTransferTransferEvents(ctx, pkScript, tick, height)
if err != nil {
return nil, errors.Wrap(err, "error during GetTransferTransfersEvents")
}
return result, nil
}

View File

@@ -0,0 +1,16 @@
package usecase
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
)
func (u *Usecase) GetTransferableTransfersByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.EventInscribeTransfer, error) {
result, err := u.dg.GetTransferableTransfersByPkScript(ctx, pkScript, blockHeight)
if err != nil {
return nil, errors.Wrap(err, "error during GetTransferableTransfersByPkScript")
}
return result, nil
}

View File

@@ -0,0 +1,18 @@
package usecase
import (
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
"github.com/gaze-network/indexer-network/pkg/btcclient"
)
type Usecase struct {
dg datagateway.BRC20DataGateway
bitcoinClient btcclient.Contract
}
func New(dg datagateway.BRC20DataGateway, bitcoinClient btcclient.Contract) *Usecase {
return &Usecase{
dg: dg,
bitcoinClient: bitcoinClient,
}
}

238
modules/brc20/processor.go Normal file
View File

@@ -0,0 +1,238 @@
package brc20
import (
"context"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/gaze-network/indexer-network/pkg/btcclient"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/lru"
)
// Make sure to implement the Bitcoin Processor interface
var _ indexer.Processor[*types.Block] = (*Processor)(nil)
type Processor struct {
brc20Dg datagateway.BRC20DataGateway
indexerInfoDg datagateway.IndexerInfoDataGateway
btcClient btcclient.Contract
network common.Network
cleanupFuncs []func(context.Context) error
// block states
flotsamsSentAsFee []*entity.Flotsam
blockReward uint64
// processor stats
cursedInscriptionCount uint64
blessedInscriptionCount uint64
lostSats uint64
// cache
outPointValueCache *lru.Cache[wire.OutPoint, uint64]
// flush buffers - inscription states
newInscriptionTransfers []*entity.InscriptionTransfer
newInscriptionEntries map[ordinals.InscriptionId]*ordinals.InscriptionEntry
newInscriptionEntryStates map[ordinals.InscriptionId]*ordinals.InscriptionEntry
// flush buffers - brc20 states
newTickEntries map[string]*entity.TickEntry
newTickEntryStates map[string]*entity.TickEntry
newEventDeploys []*entity.EventDeploy
newEventMints []*entity.EventMint
newEventInscribeTransfers []*entity.EventInscribeTransfer
newEventTransferTransfers []*entity.EventTransferTransfer
newBalances map[string]map[string]*entity.Balance // pkscript -> tick -> balance
eventHashString string
}
// TODO: move this to config
const outPointValueCacheSize = 100000
func NewProcessor(brc20Dg datagateway.BRC20DataGateway, indexerInfoDg datagateway.IndexerInfoDataGateway, btcClient btcclient.Contract, network common.Network, cleanupFuncs []func(context.Context) error) (*Processor, error) {
outPointValueCache, err := lru.New[wire.OutPoint, uint64](outPointValueCacheSize)
if err != nil {
return nil, errors.Wrap(err, "failed to create outPointValueCache")
}
return &Processor{
brc20Dg: brc20Dg,
indexerInfoDg: indexerInfoDg,
btcClient: btcClient,
network: network,
cleanupFuncs: cleanupFuncs,
flotsamsSentAsFee: make([]*entity.Flotsam, 0),
blockReward: 0,
cursedInscriptionCount: 0, // to be initialized by p.VerifyStates()
blessedInscriptionCount: 0, // to be initialized by p.VerifyStates()
lostSats: 0, // to be initialized by p.VerifyStates()
outPointValueCache: outPointValueCache,
newInscriptionTransfers: make([]*entity.InscriptionTransfer, 0),
newInscriptionEntries: make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry),
newInscriptionEntryStates: make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry),
newTickEntries: make(map[string]*entity.TickEntry),
newTickEntryStates: make(map[string]*entity.TickEntry),
newEventDeploys: make([]*entity.EventDeploy, 0),
newEventMints: make([]*entity.EventMint, 0),
newEventInscribeTransfers: make([]*entity.EventInscribeTransfer, 0),
newEventTransferTransfers: make([]*entity.EventTransferTransfer, 0),
newBalances: make(map[string]map[string]*entity.Balance),
}, nil
}
// VerifyStates implements indexer.Processor.
func (p *Processor) VerifyStates(ctx context.Context) error {
indexerState, err := p.indexerInfoDg.GetLatestIndexerState(ctx)
if err != nil && !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "failed to get latest indexer state")
}
// if not found, create indexer state
if errors.Is(err, errs.NotFound) {
if err := p.indexerInfoDg.CreateIndexerState(ctx, entity.IndexerState{
ClientVersion: ClientVersion,
DBVersion: DBVersion,
EventHashVersion: EventHashVersion,
Network: p.network,
}); err != nil {
return errors.Wrap(err, "failed to set indexer state")
}
} else {
if indexerState.DBVersion != DBVersion {
return errors.Wrapf(errs.ConflictSetting, "db version mismatch: current version is %d. Please upgrade to version %d", indexerState.DBVersion, DBVersion)
}
if indexerState.EventHashVersion != EventHashVersion {
return errors.Wrapf(errs.ConflictSetting, "event version mismatch: current version is %d. Please reset rune's db first.", indexerState.EventHashVersion, EventHashVersion)
}
if indexerState.Network != p.network {
return errors.Wrapf(errs.ConflictSetting, "network mismatch: latest indexed network is %d, configured network is %d. If you want to change the network, please reset the database", indexerState.Network, p.network)
}
}
stats, err := p.brc20Dg.GetProcessorStats(ctx)
if err != nil {
if !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "failed to count cursed inscriptions")
}
stats = &entity.ProcessorStats{
BlockHeight: uint64(startingBlockHeader[p.network].Height),
CursedInscriptionCount: 0,
BlessedInscriptionCount: 0,
LostSats: 0,
}
}
p.cursedInscriptionCount = stats.CursedInscriptionCount
p.blessedInscriptionCount = stats.BlessedInscriptionCount
p.lostSats = stats.LostSats
return nil
}
// CurrentBlock implements indexer.Processor.
func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error) {
blockHeader, err := p.brc20Dg.GetLatestBlock(ctx)
if err != nil {
if errors.Is(err, errs.NotFound) {
return startingBlockHeader[p.network], nil
}
return types.BlockHeader{}, errors.Wrap(err, "failed to get latest block")
}
return blockHeader, nil
}
// GetIndexedBlock implements indexer.Processor.
func (p *Processor) GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error) {
block, err := p.brc20Dg.GetIndexedBlockByHeight(ctx, height)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to get indexed block")
}
return types.BlockHeader{
Height: int64(block.Height),
Hash: block.Hash,
}, nil
}
// Name implements indexer.Processor.
func (p *Processor) Name() string {
return "brc20"
}
// RevertData implements indexer.Processor.
func (p *Processor) RevertData(ctx context.Context, from int64) error {
brc20DgTx, err := p.brc20Dg.BeginBRC20Tx(ctx)
if err != nil {
return errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if err := brc20DgTx.Rollback(ctx); err != nil {
logger.WarnContext(ctx, "failed to rollback transaction",
slogx.Error(err),
slogx.String("event", "rollback_brc20_insertion"),
)
}
}()
if err := brc20DgTx.DeleteIndexedBlocksSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete indexed blocks")
}
if err := brc20DgTx.DeleteProcessorStatsSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete processor stats")
}
if err := brc20DgTx.DeleteTickEntriesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete ticks")
}
if err := brc20DgTx.DeleteTickEntryStatesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete tick states")
}
if err := brc20DgTx.DeleteEventDeploysSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete deploy events")
}
if err := brc20DgTx.DeleteEventMintsSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete mint events")
}
if err := brc20DgTx.DeleteEventInscribeTransfersSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete inscribe transfer events")
}
if err := brc20DgTx.DeleteEventTransferTransfersSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete transfer transfer events")
}
if err := brc20DgTx.DeleteBalancesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete balances")
}
if err := brc20DgTx.DeleteInscriptionEntriesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete inscription entries")
}
if err := brc20DgTx.DeleteInscriptionEntryStatesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete inscription entry states")
}
if err := brc20DgTx.DeleteInscriptionTransfersSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete inscription transfers")
}
if err := brc20DgTx.Commit(ctx); err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
return nil
}
func (p *Processor) Shutdown(ctx context.Context) error {
var errs []error
for _, cleanup := range p.cleanupFuncs {
if err := cleanup(ctx); err != nil {
errs = append(errs, err)
}
}
return errors.WithStack(errors.Join(errs...))
}

View File

@@ -0,0 +1,448 @@
package brc20
import (
"bytes"
"context"
"encoding/hex"
"strings"
"time"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/brc20"
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/samber/lo"
"github.com/shopspring/decimal"
)
func (p *Processor) processBRC20States(ctx context.Context, transfers []*entity.InscriptionTransfer, blockHeader types.BlockHeader) error {
payloads := make([]*brc20.Payload, 0)
ticks := make(map[string]struct{})
for _, transfer := range transfers {
if transfer.Content == nil {
// skip empty content
continue
}
payload, err := brc20.ParsePayload(transfer)
if err != nil {
// skip invalid payloads
continue
}
payloads = append(payloads, payload)
ticks[payload.Tick] = struct{}{}
}
if len(payloads) == 0 {
// skip if no valid payloads
return nil
}
// TODO: concurrently fetch from db to optimize speed
tickEntries, err := p.brc20Dg.GetTickEntriesByTicks(ctx, lo.Keys(ticks))
if err != nil {
return errors.Wrap(err, "failed to get inscription entries by ids")
}
// preload required data to reduce individual data fetching during process
inscriptionIds := make([]ordinals.InscriptionId, 0)
inscriptionIdsToFetchParent := make([]ordinals.InscriptionId, 0)
inscriptionIdsToFetchEventInscribeTransfer := make([]ordinals.InscriptionId, 0)
balancesToFetch := make([]datagateway.GetBalancesBatchAtHeightQuery, 0) // pkscript -> tick -> struct{}
for _, payload := range payloads {
inscriptionIds = append(inscriptionIds, payload.Transfer.InscriptionId)
if payload.Op == brc20.OperationMint {
// preload parent id to validate mint events with self mint
if entry := tickEntries[payload.Tick]; entry.IsSelfMint {
inscriptionIdsToFetchParent = append(inscriptionIdsToFetchParent, payload.Transfer.InscriptionId)
}
}
if payload.Op == brc20.OperationTransfer {
if payload.Transfer.OldSatPoint == (ordinals.SatPoint{}) {
// preload balance to validate inscribe transfer event
balancesToFetch = append(balancesToFetch, datagateway.GetBalancesBatchAtHeightQuery{
PkScriptHex: hex.EncodeToString(payload.Transfer.NewPkScript),
Tick: payload.Tick,
})
} else {
// preload inscribe-transfer events to validate transfer-transfer event
inscriptionIdsToFetchEventInscribeTransfer = append(inscriptionIdsToFetchEventInscribeTransfer, payload.Transfer.InscriptionId)
}
}
}
inscriptionIdsToNumber, err := p.getInscriptionNumbersByIds(ctx, lo.Uniq(inscriptionIds))
if err != nil {
return errors.Wrap(err, "failed to get inscription numbers by ids")
}
inscriptionIdsToParent, err := p.getInscriptionParentsByIds(ctx, lo.Uniq(inscriptionIdsToFetchParent))
if err != nil {
return errors.Wrap(err, "failed to get inscription parents by ids")
}
latestEventId, err := p.brc20Dg.GetLatestEventId(ctx)
if err != nil {
return errors.Wrap(err, "failed to get latest event id")
}
// pkscript -> tick -> balance
balances, err := p.brc20Dg.GetBalancesBatchAtHeight(ctx, uint64(blockHeader.Height-1), balancesToFetch)
if err != nil {
return errors.Wrap(err, "failed to get balances batch at height")
}
eventInscribeTransfers, err := p.brc20Dg.GetEventInscribeTransfersByInscriptionIds(ctx, lo.Uniq(inscriptionIdsToFetchEventInscribeTransfer))
if err != nil {
return errors.Wrap(err, "failed to get event inscribe transfers by inscription ids")
}
newTickEntries := make(map[string]*entity.TickEntry)
newTickEntryStates := make(map[string]*entity.TickEntry)
newEventDeploys := make([]*entity.EventDeploy, 0)
newEventMints := make([]*entity.EventMint, 0)
newEventInscribeTransfers := make([]*entity.EventInscribeTransfer, 0)
newEventTransferTransfers := make([]*entity.EventTransferTransfer, 0)
newBalances := make(map[string]map[string]*entity.Balance)
var eventHashBuilder strings.Builder
handleEventDeploy := func(payload *brc20.Payload, tickEntry *entity.TickEntry) {
if payload.Transfer.TransferCount > 1 {
// skip used deploy inscriptions
return
}
if tickEntry != nil {
// skip deploy inscriptions for duplicate ticks
return
}
newEntry := &entity.TickEntry{
Tick: payload.Tick,
OriginalTick: payload.OriginalTick,
TotalSupply: payload.Max,
Decimals: payload.Dec,
LimitPerMint: payload.Lim,
IsSelfMint: payload.SelfMint,
DeployInscriptionId: payload.Transfer.InscriptionId,
DeployedAt: blockHeader.Timestamp,
DeployedAtHeight: payload.Transfer.BlockHeight,
MintedAmount: decimal.Zero,
BurnedAmount: decimal.Zero,
CompletedAt: time.Time{},
CompletedAtHeight: 0,
}
newTickEntries[payload.Tick] = newEntry
newTickEntryStates[payload.Tick] = newEntry
// update entries for other operations in same block
tickEntries[payload.Tick] = newEntry
event := &entity.EventDeploy{
Id: latestEventId + 1,
InscriptionId: payload.Transfer.InscriptionId,
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
Tick: payload.Tick,
OriginalTick: payload.OriginalTick,
TxHash: payload.Transfer.TxHash,
BlockHeight: payload.Transfer.BlockHeight,
TxIndex: payload.Transfer.TxIndex,
Timestamp: blockHeader.Timestamp,
PkScript: payload.Transfer.NewPkScript,
SatPoint: payload.Transfer.NewSatPoint,
TotalSupply: payload.Max,
Decimals: payload.Dec,
LimitPerMint: payload.Lim,
IsSelfMint: payload.SelfMint,
}
newEventDeploys = append(newEventDeploys, event)
latestEventId++
eventHashBuilder.WriteString(getEventDeployString(event) + eventHashSeparator)
}
handleEventMint := func(payload *brc20.Payload, tickEntry *entity.TickEntry) {
if payload.Transfer.TransferCount > 1 {
// skip used mint inscriptions that are already used
return
}
if tickEntry == nil {
// skip mint inscriptions for non-existent ticks
return
}
if -payload.Amt.Exponent() > int32(tickEntry.Decimals) {
// skip mint inscriptions with decimals greater than allowed
return
}
if tickEntry.MintedAmount.GreaterThanOrEqual(tickEntry.TotalSupply) {
// skip mint inscriptions for ticks with completed mints
return
}
if payload.Amt.GreaterThan(tickEntry.LimitPerMint) {
// skip mint inscriptions with amount greater than limit per mint
return
}
mintableAmount := tickEntry.TotalSupply.Sub(tickEntry.MintedAmount)
if payload.Amt.GreaterThan(mintableAmount) {
payload.Amt = mintableAmount
}
var parentId *ordinals.InscriptionId
if tickEntry.IsSelfMint {
parentIdValue, ok := inscriptionIdsToParent[payload.Transfer.InscriptionId]
if !ok {
// skip mint inscriptions for self mint ticks without parent inscription
return
}
if parentIdValue != tickEntry.DeployInscriptionId {
// skip mint inscriptions for self mint ticks with invalid parent inscription
return
}
parentId = &parentIdValue
}
tickEntry.MintedAmount = tickEntry.MintedAmount.Add(payload.Amt)
// mark as completed if this mint completes the total supply
if tickEntry.MintedAmount.GreaterThanOrEqual(tickEntry.TotalSupply) {
tickEntry.CompletedAt = blockHeader.Timestamp
tickEntry.CompletedAtHeight = payload.Transfer.BlockHeight
}
newTickEntryStates[payload.Tick] = tickEntry
event := &entity.EventMint{
Id: latestEventId + 1,
InscriptionId: payload.Transfer.InscriptionId,
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
Tick: payload.Tick,
OriginalTick: payload.OriginalTick,
TxHash: payload.Transfer.TxHash,
BlockHeight: payload.Transfer.BlockHeight,
TxIndex: payload.Transfer.TxIndex,
Timestamp: blockHeader.Timestamp,
PkScript: payload.Transfer.NewPkScript,
SatPoint: payload.Transfer.NewSatPoint,
Amount: payload.Amt,
ParentId: parentId,
}
newEventMints = append(newEventMints, event)
latestEventId++
eventHashBuilder.WriteString(getEventMintString(event, tickEntry.Decimals) + eventHashSeparator)
}
handleEventInscribeTransfer := func(payload *brc20.Payload, tickEntry *entity.TickEntry) {
// inscribe transfer event
pkScriptHex := hex.EncodeToString(payload.Transfer.NewPkScript)
balance, ok := balances[pkScriptHex][payload.Tick]
if !ok {
balance = &entity.Balance{
PkScript: payload.Transfer.NewPkScript,
Tick: payload.Tick,
BlockHeight: uint64(blockHeader.Height - 1),
OverallBalance: decimal.Zero, // defaults balance to zero if not found
AvailableBalance: decimal.Zero,
}
}
if payload.Amt.GreaterThan(balance.AvailableBalance) {
// skip inscribe transfer event if amount exceeds available balance
return
}
// update balance state
balance.BlockHeight = uint64(blockHeader.Height)
balance.AvailableBalance = balance.AvailableBalance.Sub(payload.Amt)
if _, ok := balances[pkScriptHex]; !ok {
balances[pkScriptHex] = make(map[string]*entity.Balance)
}
balances[pkScriptHex][payload.Tick] = balance
if _, ok := newBalances[pkScriptHex]; !ok {
newBalances[pkScriptHex] = make(map[string]*entity.Balance)
}
newBalances[pkScriptHex][payload.Tick] = &entity.Balance{}
event := &entity.EventInscribeTransfer{
Id: latestEventId + 1,
InscriptionId: payload.Transfer.InscriptionId,
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
Tick: payload.Tick,
OriginalTick: payload.OriginalTick,
TxHash: payload.Transfer.TxHash,
BlockHeight: payload.Transfer.BlockHeight,
TxIndex: payload.Transfer.TxIndex,
Timestamp: blockHeader.Timestamp,
PkScript: payload.Transfer.NewPkScript,
SatPoint: payload.Transfer.NewSatPoint,
OutputIndex: payload.Transfer.NewSatPoint.OutPoint.Index,
SatsAmount: payload.Transfer.NewOutputValue,
Amount: payload.Amt,
}
latestEventId++
eventInscribeTransfers[payload.Transfer.InscriptionId] = event
newEventInscribeTransfers = append(newEventInscribeTransfers, event)
eventHashBuilder.WriteString(getEventInscribeTransferString(event, tickEntry.Decimals) + eventHashSeparator)
}
handleEventTransferTransferAsFee := func(payload *brc20.Payload, tickEntry *entity.TickEntry, inscribeTransfer *entity.EventInscribeTransfer) {
// return balance to sender
fromPkScriptHex := hex.EncodeToString(inscribeTransfer.PkScript)
fromBalance, ok := balances[fromPkScriptHex][payload.Tick]
if !ok {
fromBalance = &entity.Balance{
PkScript: inscribeTransfer.PkScript,
Tick: payload.Tick,
BlockHeight: uint64(blockHeader.Height),
OverallBalance: decimal.Zero, // defaults balance to zero if not found
AvailableBalance: decimal.Zero,
}
}
fromBalance.BlockHeight = uint64(blockHeader.Height)
fromBalance.AvailableBalance = fromBalance.AvailableBalance.Add(payload.Amt)
if _, ok := balances[fromPkScriptHex]; !ok {
balances[fromPkScriptHex] = make(map[string]*entity.Balance)
}
balances[fromPkScriptHex][payload.Tick] = fromBalance
if _, ok := newBalances[fromPkScriptHex]; !ok {
newBalances[fromPkScriptHex] = make(map[string]*entity.Balance)
}
newBalances[fromPkScriptHex][payload.Tick] = fromBalance
event := &entity.EventTransferTransfer{
Id: latestEventId + 1,
InscriptionId: payload.Transfer.InscriptionId,
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
Tick: payload.Tick,
OriginalTick: payload.OriginalTick,
TxHash: payload.Transfer.TxHash,
BlockHeight: payload.Transfer.BlockHeight,
TxIndex: payload.Transfer.TxIndex,
Timestamp: blockHeader.Timestamp,
FromPkScript: inscribeTransfer.PkScript,
FromSatPoint: inscribeTransfer.SatPoint,
FromInputIndex: payload.Transfer.FromInputIndex,
ToPkScript: payload.Transfer.NewPkScript,
ToSatPoint: payload.Transfer.NewSatPoint,
ToOutputIndex: payload.Transfer.NewSatPoint.OutPoint.Index,
SpentAsFee: true,
Amount: payload.Amt,
}
newEventTransferTransfers = append(newEventTransferTransfers, event)
eventHashBuilder.WriteString(getEventTransferTransferString(event, tickEntry.Decimals) + eventHashSeparator)
}
handleEventTransferTransferNormal := func(payload *brc20.Payload, tickEntry *entity.TickEntry, inscribeTransfer *entity.EventInscribeTransfer) {
// subtract balance from sender
fromPkScriptHex := hex.EncodeToString(inscribeTransfer.PkScript)
fromBalance, ok := balances[fromPkScriptHex][payload.Tick]
if !ok {
// skip transfer transfer event if from balance does not exist
return
}
fromBalance.BlockHeight = uint64(blockHeader.Height)
fromBalance.OverallBalance = fromBalance.OverallBalance.Sub(payload.Amt)
if _, ok := balances[fromPkScriptHex]; !ok {
balances[fromPkScriptHex] = make(map[string]*entity.Balance)
}
balances[fromPkScriptHex][payload.Tick] = fromBalance
if _, ok := newBalances[fromPkScriptHex]; !ok {
newBalances[fromPkScriptHex] = make(map[string]*entity.Balance)
}
newBalances[fromPkScriptHex][payload.Tick] = fromBalance
// add balance to receiver
if bytes.Equal(payload.Transfer.NewPkScript, []byte{0x6a}) {
// burn if sent to OP_RETURN
tickEntry.BurnedAmount = tickEntry.BurnedAmount.Add(payload.Amt)
tickEntries[payload.Tick] = tickEntry
newTickEntryStates[payload.Tick] = tickEntry
} else {
toPkScriptHex := hex.EncodeToString(payload.Transfer.NewPkScript)
toBalance, ok := balances[toPkScriptHex][payload.Tick]
if !ok {
toBalance = &entity.Balance{
PkScript: payload.Transfer.NewPkScript,
Tick: payload.Tick,
BlockHeight: uint64(blockHeader.Height),
OverallBalance: decimal.Zero, // defaults balance to zero if not found
AvailableBalance: decimal.Zero,
}
}
toBalance.BlockHeight = uint64(blockHeader.Height)
toBalance.OverallBalance = toBalance.OverallBalance.Add(payload.Amt)
toBalance.AvailableBalance = toBalance.AvailableBalance.Add(payload.Amt)
if _, ok := balances[toPkScriptHex]; !ok {
balances[toPkScriptHex] = make(map[string]*entity.Balance)
}
balances[toPkScriptHex][payload.Tick] = toBalance
if _, ok := newBalances[toPkScriptHex]; !ok {
newBalances[toPkScriptHex] = make(map[string]*entity.Balance)
}
newBalances[toPkScriptHex][payload.Tick] = toBalance
}
event := &entity.EventTransferTransfer{
Id: latestEventId + 1,
InscriptionId: payload.Transfer.InscriptionId,
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
Tick: payload.Tick,
OriginalTick: payload.OriginalTick,
TxHash: payload.Transfer.TxHash,
BlockHeight: payload.Transfer.BlockHeight,
TxIndex: payload.Transfer.TxIndex,
Timestamp: blockHeader.Timestamp,
FromPkScript: inscribeTransfer.PkScript,
FromSatPoint: inscribeTransfer.SatPoint,
FromInputIndex: payload.Transfer.FromInputIndex,
ToPkScript: payload.Transfer.NewPkScript,
ToSatPoint: payload.Transfer.NewSatPoint,
ToOutputIndex: payload.Transfer.NewSatPoint.OutPoint.Index,
SpentAsFee: false,
Amount: payload.Amt,
}
newEventTransferTransfers = append(newEventTransferTransfers, event)
eventHashBuilder.WriteString(getEventTransferTransferString(event, tickEntry.Decimals) + eventHashSeparator)
}
for _, payload := range payloads {
tickEntry := tickEntries[payload.Tick]
if payload.Transfer.SentAsFee && payload.Transfer.OldSatPoint == (ordinals.SatPoint{}) {
// skip inscriptions inscribed as fee
continue
}
switch payload.Op {
case brc20.OperationDeploy:
handleEventDeploy(payload, tickEntry)
case brc20.OperationMint:
handleEventMint(payload, tickEntry)
case brc20.OperationTransfer:
if payload.Transfer.TransferCount > 2 {
// skip used transfer inscriptions
continue
}
if tickEntry == nil {
// skip transfer inscriptions for non-existent ticks
continue
}
if -payload.Amt.Exponent() > int32(tickEntry.Decimals) {
// skip transfer inscriptions with decimals greater than allowed
continue
}
if payload.Transfer.OldSatPoint == (ordinals.SatPoint{}) {
handleEventInscribeTransfer(payload, tickEntry)
} else {
// transfer transfer event
inscribeTransfer, ok := eventInscribeTransfers[payload.Transfer.InscriptionId]
if !ok {
// skip transfer transfer event if prior inscribe transfer event does not exist
continue
}
if payload.Transfer.SentAsFee {
handleEventTransferTransferAsFee(payload, tickEntry, inscribeTransfer)
} else {
handleEventTransferTransferNormal(payload, tickEntry, inscribeTransfer)
}
}
}
}
p.newTickEntries = newTickEntries
p.newTickEntryStates = newTickEntryStates
p.newEventDeploys = newEventDeploys
p.newEventMints = newEventMints
p.newEventInscribeTransfers = newEventInscribeTransfers
p.newEventTransferTransfers = newEventTransferTransfers
p.newBalances = newBalances
p.eventHashString = eventHashBuilder.String()
return nil
}

View File

@@ -0,0 +1,625 @@
package brc20
import (
"context"
"encoding/json"
"slices"
"sync"
"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/samber/lo"
"golang.org/x/sync/errgroup"
)
func (p *Processor) processInscriptionTx(ctx context.Context, tx *types.Transaction, blockHeader types.BlockHeader, transfersInOutPoints map[wire.OutPoint]map[ordinals.SatPoint][]*entity.InscriptionTransfer, outpointValues map[wire.OutPoint]uint64) error {
ctx = logger.WithContext(ctx, slogx.String("tx_hash", tx.TxHash.String()))
envelopes := ordinals.ParseEnvelopesFromTx(tx)
inputOutPoints := lo.Map(tx.TxIn, func(txIn *types.TxIn, _ int) wire.OutPoint {
return wire.OutPoint{
Hash: txIn.PreviousOutTxHash,
Index: txIn.PreviousOutIndex,
}
})
// cache outpoint values for future blocks
for outIndex, txOut := range tx.TxOut {
outPoint := wire.OutPoint{
Hash: tx.TxHash,
Index: uint32(outIndex),
}
p.outPointValueCache.Add(outPoint, uint64(txOut.Value))
outpointValues[outPoint] = uint64(txOut.Value)
}
outPointsWithTransfers := lo.Keys(transfersInOutPoints)
txContainsTransfers := len(lo.Intersect(inputOutPoints, outPointsWithTransfers)) > 0
isCoinbase := tx.TxIn[0].PreviousOutTxHash.IsEqual(&chainhash.Hash{})
if len(envelopes) == 0 && !txContainsTransfers && !isCoinbase {
// no inscription activity, skip
return nil
}
// Ensure outpoint values exists for all inputs. Some tx inputs may not be prefetched if it contains inscriptions transfers from other txs in the same block.
if err := p.ensureOutPointValues(ctx, outpointValues, inputOutPoints); err != nil {
return errors.Wrap(err, "failed to ensure outpoint values")
}
floatingInscriptions := make([]*entity.Flotsam, 0)
totalInputValue := uint64(0)
totalOutputValue := lo.SumBy(tx.TxOut, func(txOut *types.TxOut) uint64 { return uint64(txOut.Value) })
inscribeOffsets := make(map[uint64]*struct {
inscriptionId ordinals.InscriptionId
count int
})
idCounter := uint32(0)
for i, input := range tx.TxIn {
// skip coinbase inputs since there can't be an inscription in coinbase
if input.PreviousOutTxHash.IsEqual(&chainhash.Hash{}) {
totalInputValue += p.getBlockSubsidy(uint64(tx.BlockHeight))
continue
}
inputOutPoint := wire.OutPoint{
Hash: input.PreviousOutTxHash,
Index: input.PreviousOutIndex,
}
inputValue, ok := outpointValues[inputOutPoint]
if !ok {
return errors.Wrapf(errs.NotFound, "outpoint value not found for %s", inputOutPoint.String())
}
transfersInOutPoint := transfersInOutPoints[inputOutPoint]
for satPoint, transfers := range transfersInOutPoint {
offset := totalInputValue + satPoint.Offset
for _, transfer := range transfers {
floatingInscriptions = append(floatingInscriptions, &entity.Flotsam{
Offset: offset,
InscriptionId: transfer.InscriptionId,
Tx: tx,
OriginOld: &entity.OriginOld{
OldSatPoint: satPoint,
Content: transfer.Content,
InputIndex: uint32(i),
},
})
if _, ok := inscribeOffsets[offset]; !ok {
inscribeOffsets[offset] = &struct {
inscriptionId ordinals.InscriptionId
count int
}{transfer.InscriptionId, 0}
}
inscribeOffsets[offset].count++
}
}
// offset on output to inscribe new inscriptions from this input
offset := totalInputValue
totalInputValue += inputValue
envelopesInInput := lo.Filter(envelopes, func(envelope *ordinals.Envelope, _ int) bool {
return envelope.InputIndex == uint32(i)
})
for _, envelope := range envelopesInInput {
inscriptionId := ordinals.InscriptionId{
TxHash: tx.TxHash,
Index: idCounter,
}
var cursed, cursedForBRC20 bool
if envelope.UnrecognizedEvenField || // unrecognized even field
envelope.DuplicateField || // duplicate field
envelope.IncompleteField || // incomplete field
envelope.InputIndex != 0 || // not first input
envelope.Offset != 0 || // not first envelope in input
envelope.Inscription.Pointer != nil || // contains pointer
envelope.PushNum || // contains pushnum opcodes
envelope.Stutter { // contains stuttering curse structure
cursed = true
cursedForBRC20 = true
}
if initial, ok := inscribeOffsets[offset]; !cursed && ok {
if initial.count > 1 {
cursed = true // reinscription
cursedForBRC20 = true
} else {
initialInscriptionEntry, err := p.getInscriptionEntryById(ctx, initial.inscriptionId)
if err != nil {
return errors.Wrapf(err, "failed to get inscription entry id %s", initial.inscriptionId)
}
if !initialInscriptionEntry.Cursed {
cursed = true // reinscription curse if initial inscription is not cursed
}
if !initialInscriptionEntry.CursedForBRC20 {
cursedForBRC20 = true
}
}
}
// inscriptions are no longer cursed after jubilee, but BRC20 still considers them as cursed
if cursed && uint64(tx.BlockHeight) >= ordinals.GetJubileeHeight(p.network) {
cursed = false
}
unbound := inputValue == 0 || envelope.UnrecognizedEvenField
if envelope.Inscription.Pointer != nil && *envelope.Inscription.Pointer < totalOutputValue {
offset = *envelope.Inscription.Pointer
}
floatingInscriptions = append(floatingInscriptions, &entity.Flotsam{
Offset: offset,
InscriptionId: inscriptionId,
Tx: tx,
OriginNew: &entity.OriginNew{
Reinscription: inscribeOffsets[offset] != nil,
Cursed: cursed,
CursedForBRC20: cursedForBRC20,
Fee: 0,
Hidden: false, // we don't care about this field for brc20
Parent: envelope.Inscription.Parent,
Pointer: envelope.Inscription.Pointer,
Unbound: unbound,
Inscription: envelope.Inscription,
},
})
if _, ok := inscribeOffsets[offset]; !ok {
inscribeOffsets[offset] = &struct {
inscriptionId ordinals.InscriptionId
count int
}{inscriptionId, 0}
}
inscribeOffsets[offset].count++
idCounter++
}
}
// parents must exist in floatingInscriptions to be valid
potentialParents := make(map[ordinals.InscriptionId]struct{})
for _, flotsam := range floatingInscriptions {
potentialParents[flotsam.InscriptionId] = struct{}{}
}
for _, flotsam := range floatingInscriptions {
if flotsam.OriginNew != nil && flotsam.OriginNew.Parent != nil {
if _, ok := potentialParents[*flotsam.OriginNew.Parent]; !ok {
// parent not found, ignore parent
flotsam.OriginNew.Parent = nil
}
}
}
// calculate fee for each new inscription
for _, flotsam := range floatingInscriptions {
if flotsam.OriginNew != nil {
flotsam.OriginNew.Fee = (totalInputValue - totalOutputValue) / uint64(idCounter)
}
}
// if tx is coinbase, add inscriptions sent as fee to outputs of this tx
ownInscriptionCount := len(floatingInscriptions)
if isCoinbase {
floatingInscriptions = append(floatingInscriptions, p.flotsamsSentAsFee...)
}
// sort floatingInscriptions by offset
slices.SortFunc(floatingInscriptions, func(i, j *entity.Flotsam) int {
return int(i.Offset) - int(j.Offset)
})
outputValue := uint64(0)
curIncrIdx := 0
// newLocations := make(map[ordinals.SatPoint][]*Flotsam)
type location struct {
satPoint ordinals.SatPoint
flotsam *entity.Flotsam
sentAsFee bool
}
newLocations := make([]*location, 0)
outputToSumValue := make([]uint64, 0, len(tx.TxOut))
for outIndex, txOut := range tx.TxOut {
end := outputValue + uint64(txOut.Value)
// process all inscriptions that are supposed to be inscribed in this output
for curIncrIdx < len(floatingInscriptions) && floatingInscriptions[curIncrIdx].Offset < end {
newSatPoint := ordinals.SatPoint{
OutPoint: wire.OutPoint{
Hash: tx.TxHash,
Index: uint32(outIndex),
},
Offset: floatingInscriptions[curIncrIdx].Offset - outputValue,
}
// newLocations[newSatPoint] = append(newLocations[newSatPoint], floatingInscriptions[curIncrIdx])
newLocations = append(newLocations, &location{
satPoint: newSatPoint,
flotsam: floatingInscriptions[curIncrIdx],
sentAsFee: isCoinbase && curIncrIdx >= ownInscriptionCount, // if curIncrIdx >= ownInscriptionCount, then current inscription came from p.flotSamsSentAsFee
})
curIncrIdx++
}
outputValue = end
outputToSumValue = append(outputToSumValue, outputValue)
}
for _, loc := range newLocations {
satPoint := loc.satPoint
flotsam := loc.flotsam
sentAsFee := loc.sentAsFee
// TODO: not sure if we still need to handle pointer here, it's already handled above.
if flotsam.OriginNew != nil && flotsam.OriginNew.Pointer != nil {
pointer := *flotsam.OriginNew.Pointer
for outIndex, outputValue := range outputToSumValue {
start := uint64(0)
if outIndex > 0 {
start = outputToSumValue[outIndex-1]
}
end := outputValue
if start <= pointer && pointer < end {
satPoint.Offset = pointer - start
break
}
}
}
if err := p.updateInscriptionLocation(ctx, satPoint, flotsam, sentAsFee, tx, blockHeader, transfersInOutPoints); err != nil {
return errors.Wrap(err, "failed to update inscription location")
}
}
// handle leftover flotsams (flotsams with offset over total output value) )
if isCoinbase {
// if there are leftover inscriptions in coinbase, they are lost permanently
for _, flotsam := range floatingInscriptions[curIncrIdx:] {
newSatPoint := ordinals.SatPoint{
OutPoint: wire.OutPoint{},
Offset: p.lostSats + flotsam.Offset - totalOutputValue,
}
if err := p.updateInscriptionLocation(ctx, newSatPoint, flotsam, false, tx, blockHeader, transfersInOutPoints); err != nil {
return errors.Wrap(err, "failed to update inscription location")
}
}
p.lostSats += p.blockReward - totalOutputValue
} else {
// if there are leftover inscriptions in non-coinbase tx, they are stored in p.flotsamsSentAsFee for processing in this block's coinbase tx
for _, flotsam := range floatingInscriptions[curIncrIdx:] {
flotsam.Offset = p.blockReward + flotsam.Offset - totalOutputValue
p.flotsamsSentAsFee = append(p.flotsamsSentAsFee, flotsam)
}
// add fees to block reward
p.blockReward = totalInputValue - totalOutputValue
}
return nil
}
func (p *Processor) updateInscriptionLocation(ctx context.Context, newSatPoint ordinals.SatPoint, flotsam *entity.Flotsam, sentAsFee bool, tx *types.Transaction, blockHeader types.BlockHeader, transfersInOutPoints map[wire.OutPoint]map[ordinals.SatPoint][]*entity.InscriptionTransfer) error {
txOut := tx.TxOut[newSatPoint.OutPoint.Index]
if flotsam.OriginOld != nil {
entry, err := p.getInscriptionEntryById(ctx, flotsam.InscriptionId)
if err != nil {
return errors.Wrapf(err, "failed to get inscription entry id %s", flotsam.InscriptionId)
}
entry.TransferCount++
transfer := &entity.InscriptionTransfer{
InscriptionId: flotsam.InscriptionId,
InscriptionNumber: entry.Number,
InscriptionSequenceNumber: entry.SequenceNumber,
BlockHeight: uint64(flotsam.Tx.BlockHeight), // use flotsam's tx to track tx that initiated the transfer
TxIndex: flotsam.Tx.Index, // use flotsam's tx to track tx that initiated the transfer
TxHash: flotsam.Tx.TxHash,
Content: flotsam.OriginOld.Content,
FromInputIndex: flotsam.OriginOld.InputIndex,
OldSatPoint: flotsam.OriginOld.OldSatPoint,
NewSatPoint: newSatPoint,
NewPkScript: txOut.PkScript,
NewOutputValue: uint64(txOut.Value),
SentAsFee: sentAsFee,
TransferCount: entry.TransferCount,
}
// track transfers even if transfer count exceeds 2 (because we need to check for reinscriptions)
p.newInscriptionTransfers = append(p.newInscriptionTransfers, transfer)
p.newInscriptionEntryStates[entry.Id] = entry
// add new transfer to transfersInOutPoints cache
if _, ok := transfersInOutPoints[newSatPoint.OutPoint]; !ok {
transfersInOutPoints[newSatPoint.OutPoint] = make(map[ordinals.SatPoint][]*entity.InscriptionTransfer)
}
transfersInOutPoints[newSatPoint.OutPoint][newSatPoint] = append(transfersInOutPoints[newSatPoint.OutPoint][newSatPoint], transfer)
return nil
}
if flotsam.OriginNew != nil {
origin := flotsam.OriginNew
var inscriptionNumber int64
sequenceNumber := p.cursedInscriptionCount + p.blessedInscriptionCount
if origin.Cursed {
inscriptionNumber = -int64(p.cursedInscriptionCount + 1)
p.cursedInscriptionCount++
} else {
inscriptionNumber = int64(p.blessedInscriptionCount)
p.blessedInscriptionCount++
}
// if not valid brc20 inscription, delete content to save space
if !isBRC20Inscription(origin.Inscription) {
origin.Inscription.Content = nil
origin.Inscription.ContentType = ""
origin.Inscription.ContentEncoding = ""
}
transfer := &entity.InscriptionTransfer{
InscriptionId: flotsam.InscriptionId,
InscriptionNumber: inscriptionNumber,
InscriptionSequenceNumber: sequenceNumber,
BlockHeight: uint64(flotsam.Tx.BlockHeight), // use flotsam's tx to track tx that initiated the transfer
TxIndex: flotsam.Tx.Index, // use flotsam's tx to track tx that initiated the transfer
TxHash: flotsam.Tx.TxHash,
Content: origin.Inscription.Content,
FromInputIndex: 0, // unused
OldSatPoint: ordinals.SatPoint{},
NewSatPoint: newSatPoint,
NewPkScript: txOut.PkScript,
NewOutputValue: uint64(txOut.Value),
SentAsFee: sentAsFee,
TransferCount: 1, // count inscription as first transfer
}
entry := &ordinals.InscriptionEntry{
Id: flotsam.InscriptionId,
Number: inscriptionNumber,
SequenceNumber: sequenceNumber,
Cursed: origin.Cursed,
CursedForBRC20: origin.CursedForBRC20,
CreatedAt: blockHeader.Timestamp,
CreatedAtHeight: uint64(blockHeader.Height),
Inscription: origin.Inscription,
TransferCount: 1, // count inscription as first transfer
}
p.newInscriptionTransfers = append(p.newInscriptionTransfers, transfer)
p.newInscriptionEntries[entry.Id] = entry
p.newInscriptionEntryStates[entry.Id] = entry
// add new transfer to transfersInOutPoints cache
if _, ok := transfersInOutPoints[newSatPoint.OutPoint]; !ok {
transfersInOutPoints[newSatPoint.OutPoint] = make(map[ordinals.SatPoint][]*entity.InscriptionTransfer)
}
transfersInOutPoints[newSatPoint.OutPoint][newSatPoint] = append(transfersInOutPoints[newSatPoint.OutPoint][newSatPoint], transfer)
return nil
}
panic("unreachable")
}
func (p *Processor) ensureOutPointValues(ctx context.Context, outPointValues map[wire.OutPoint]uint64, outPoints []wire.OutPoint) error {
missingOutPoints := make([]wire.OutPoint, 0)
for _, outPoint := range outPoints {
if _, ok := outPointValues[outPoint]; !ok {
missingOutPoints = append(missingOutPoints, outPoint)
}
}
if len(missingOutPoints) == 0 {
return nil
}
missingOutPointValues, err := p.getOutPointValues(ctx, missingOutPoints)
if err != nil {
return errors.Wrap(err, "failed to get outpoint values")
}
for outPoint, value := range missingOutPointValues {
outPointValues[outPoint] = value
}
return nil
}
type brc20Inscription struct {
P string `json:"p"`
}
func isBRC20Inscription(inscription ordinals.Inscription) bool {
if inscription.ContentType != "application/json" && inscription.ContentType != "text/plain" {
return false
}
// attempt to parse content as json
if inscription.Content == nil {
return false
}
var parsed brc20Inscription
if err := json.Unmarshal(inscription.Content, &parsed); err != nil {
return false
}
if parsed.P != "brc-20" {
return false
}
return true
}
func (p *Processor) getOutPointValues(ctx context.Context, outPoints []wire.OutPoint) (map[wire.OutPoint]uint64, error) {
// try to get from cache if exists
cacheValues := p.outPointValueCache.MGet(outPoints)
result := make(map[wire.OutPoint]uint64)
outPointsToFetch := make([]wire.OutPoint, 0)
for i, outPoint := range outPoints {
if outPoint.Hash == (chainhash.Hash{}) {
// skip coinbase input
continue
}
if cacheValues[i] != 0 {
result[outPoint] = cacheValues[i]
} else {
outPointsToFetch = append(outPointsToFetch, outPoint)
}
}
eg, ectx := errgroup.WithContext(ctx)
txHashes := make(map[chainhash.Hash]struct{})
for _, outPoint := range outPointsToFetch {
txHashes[outPoint.Hash] = struct{}{}
}
txOutsByHash := make(map[chainhash.Hash][]*types.TxOut)
var mutex sync.Mutex
for txHash := range txHashes {
txHash := txHash
eg.Go(func() error {
txOuts, err := p.btcClient.GetTransactionOutputs(ectx, txHash)
if err != nil {
return errors.Wrapf(err, "failed to get transaction outputs for hash %s", txHash)
}
// update cache
mutex.Lock()
defer mutex.Unlock()
txOutsByHash[txHash] = txOuts
for i, txOut := range txOuts {
p.outPointValueCache.Add(wire.OutPoint{Hash: txHash, Index: uint32(i)}, uint64(txOut.Value))
}
return nil
})
}
if err := eg.Wait(); err != nil {
return nil, errors.WithStack(err)
}
for i := range outPoints {
if outPoints[i].Hash == (chainhash.Hash{}) {
// skip coinbase input
continue
}
if result[outPoints[i]] == 0 {
result[outPoints[i]] = uint64(txOutsByHash[outPoints[i].Hash][outPoints[i].Index].Value)
}
}
return result, nil
}
func (p *Processor) getInscriptionTransfersInOutPoints(ctx context.Context, outPoints []wire.OutPoint) (map[wire.OutPoint]map[ordinals.SatPoint][]*entity.InscriptionTransfer, error) {
outPoints = lo.Uniq(outPoints)
// try to get from flush buffer if exists
result := make(map[wire.OutPoint]map[ordinals.SatPoint][]*entity.InscriptionTransfer)
outPointsToFetch := make([]wire.OutPoint, 0)
for _, outPoint := range outPoints {
var found bool
for _, transfer := range p.newInscriptionTransfers {
if transfer.NewSatPoint.OutPoint == outPoint {
found = true
if _, ok := result[outPoint]; !ok {
result[outPoint] = make(map[ordinals.SatPoint][]*entity.InscriptionTransfer)
}
result[outPoint][transfer.NewSatPoint] = append(result[outPoint][transfer.NewSatPoint], transfer)
}
}
if !found {
outPointsToFetch = append(outPointsToFetch, outPoint)
}
}
transfers, err := p.brc20Dg.GetInscriptionTransfersInOutPoints(ctx, outPointsToFetch)
if err != nil {
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
}
for satPoint, transferList := range transfers {
if _, ok := result[satPoint.OutPoint]; !ok {
result[satPoint.OutPoint] = make(map[ordinals.SatPoint][]*entity.InscriptionTransfer)
}
result[satPoint.OutPoint][satPoint] = append(result[satPoint.OutPoint][satPoint], transferList...)
}
for _, transfersBySatPoint := range result {
for satPoint := range transfersBySatPoint {
// sort all transfers by sequence number
slices.SortFunc(transfersBySatPoint[satPoint], func(i, j *entity.InscriptionTransfer) int {
return int(i.InscriptionSequenceNumber) - int(j.InscriptionSequenceNumber)
})
}
}
return result, nil
}
func (p *Processor) getInscriptionEntryById(ctx context.Context, id ordinals.InscriptionId) (*ordinals.InscriptionEntry, error) {
inscriptions, err := p.getInscriptionEntriesByIds(ctx, []ordinals.InscriptionId{id})
if err != nil {
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
}
inscription, ok := inscriptions[id]
if !ok {
return nil, errors.Wrap(errs.NotFound, "inscription not found")
}
return inscription, nil
}
func (p *Processor) getInscriptionEntriesByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*ordinals.InscriptionEntry, error) {
// try to get from cache if exists
result := make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
idsToFetch := make([]ordinals.InscriptionId, 0)
for _, id := range ids {
if inscriptionEntry, ok := p.newInscriptionEntryStates[id]; ok {
result[id] = inscriptionEntry
} else {
idsToFetch = append(idsToFetch, id)
}
}
if len(idsToFetch) > 0 {
inscriptions, err := p.brc20Dg.GetInscriptionEntriesByIds(ctx, idsToFetch)
if err != nil {
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
}
for id, inscription := range inscriptions {
result[id] = inscription
}
}
return result, nil
}
func (p *Processor) getInscriptionNumbersByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]int64, error) {
// try to get from cache if exists
result := make(map[ordinals.InscriptionId]int64)
idsToFetch := make([]ordinals.InscriptionId, 0)
for _, id := range ids {
if entry, ok := p.newInscriptionEntryStates[id]; ok {
result[id] = int64(entry.Number)
} else {
idsToFetch = append(idsToFetch, id)
}
}
if len(idsToFetch) > 0 {
inscriptions, err := p.brc20Dg.GetInscriptionNumbersByIds(ctx, idsToFetch)
if err != nil {
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
}
for id, number := range inscriptions {
result[id] = number
}
}
return result, nil
}
func (p *Processor) getInscriptionParentsByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]ordinals.InscriptionId, error) {
// try to get from cache if exists
result := make(map[ordinals.InscriptionId]ordinals.InscriptionId)
idsToFetch := make([]ordinals.InscriptionId, 0)
for _, id := range ids {
if entry, ok := p.newInscriptionEntryStates[id]; ok {
if entry.Inscription.Parent != nil {
result[id] = *entry.Inscription.Parent
}
} else {
idsToFetch = append(idsToFetch, id)
}
}
if len(idsToFetch) > 0 {
inscriptions, err := p.brc20Dg.GetInscriptionParentsByIds(ctx, idsToFetch)
if err != nil {
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
}
for id, parent := range inscriptions {
result[id] = parent
}
}
return result, nil
}
func (p *Processor) getBlockSubsidy(blockHeight uint64) uint64 {
return uint64(blockchain.CalcBlockSubsidy(int32(blockHeight), p.network.ChainParams()))
}

View File

@@ -0,0 +1,271 @@
package brc20
import (
"context"
"crypto/sha256"
"encoding/hex"
"slices"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/samber/lo"
)
// Process implements indexer.Processor.
func (p *Processor) Process(ctx context.Context, blocks []*types.Block) error {
for _, block := range blocks {
ctx = logger.WithContext(ctx, slogx.Uint64("height", uint64(block.Header.Height)))
p.blockReward = p.getBlockSubsidy(uint64(block.Header.Height))
p.flotsamsSentAsFee = make([]*entity.Flotsam, 0)
// put coinbase tx (first tx) at the end of block
transactions := append(block.Transactions[1:], block.Transactions[0])
var inputOutPoints []wire.OutPoint
for _, tx := range transactions {
for _, txIn := range tx.TxIn {
if txIn.PreviousOutTxHash == (chainhash.Hash{}) {
// skip coinbase input
continue
}
inputOutPoints = append(inputOutPoints, wire.OutPoint{
Hash: txIn.PreviousOutTxHash,
Index: txIn.PreviousOutIndex,
})
}
}
// pre-fetch inscriptions in outpoints
transfersInOutPoints, err := p.getInscriptionTransfersInOutPoints(ctx, inputOutPoints)
if err != nil {
return errors.Wrap(err, "failed to get inscriptions in outpoints")
}
// pre-fetch outpoint values for transactions with inscriptions/envelopes
outPointsToFetchValues := make([]wire.OutPoint, 0)
for _, tx := range transactions {
txInputOutPoints := lo.Map(tx.TxIn, func(txIn *types.TxIn, _ int) wire.OutPoint {
return wire.OutPoint{
Hash: txIn.PreviousOutTxHash,
Index: txIn.PreviousOutIndex,
}
})
envelopes := ordinals.ParseEnvelopesFromTx(tx)
outPointsWithTransfers := lo.Keys(transfersInOutPoints)
txContainsTransfers := len(lo.Intersect(txInputOutPoints, outPointsWithTransfers)) > 0
isCoinbase := tx.TxIn[0].PreviousOutTxHash.IsEqual(&chainhash.Hash{})
if len(envelopes) == 0 && !txContainsTransfers && !isCoinbase {
// no inscription activity, skip tx
continue
}
outPointsToFetchValues = append(outPointsToFetchValues, lo.Map(tx.TxIn, func(txIn *types.TxIn, _ int) wire.OutPoint {
return wire.OutPoint{
Hash: txIn.PreviousOutTxHash,
Index: txIn.PreviousOutIndex,
}
})...)
}
outPointValues, err := p.getOutPointValues(ctx, outPointsToFetchValues)
if err != nil {
return errors.Wrap(err, "failed to get input values")
}
for _, tx := range transactions {
if err := p.processInscriptionTx(ctx, tx, block.Header, transfersInOutPoints, outPointValues); err != nil {
return errors.Wrap(err, "failed to process tx")
}
}
// sort transfers by tx index, output index, output sat offset
// NOTE: ord indexes inscription transfers spent as fee at the end of the block, but brc20 indexes them as soon as they are sent
slices.SortFunc(p.newInscriptionTransfers, func(t1, t2 *entity.InscriptionTransfer) int {
if t1.TxIndex != t2.TxIndex {
return int(t1.TxIndex) - int(t2.TxIndex)
}
if t1.SentAsFee != t2.SentAsFee {
// transfers sent as fee should be ordered after non-fees
if t1.SentAsFee {
return 1
}
return -1
}
if t1.NewSatPoint.OutPoint.Index != t2.NewSatPoint.OutPoint.Index {
return int(t1.NewSatPoint.OutPoint.Index) - int(t2.NewSatPoint.OutPoint.Index)
}
return int(t1.NewSatPoint.Offset) - int(t2.NewSatPoint.Offset)
})
if err := p.processBRC20States(ctx, p.newInscriptionTransfers, block.Header); err != nil {
return errors.Wrap(err, "failed to process brc20 states")
}
if err := p.flushBlock(ctx, block.Header); err != nil {
return errors.Wrap(err, "failed to flush block")
}
logger.DebugContext(ctx, "Inserted new block")
}
return nil
}
func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeader) error {
brc20DgTx, err := p.brc20Dg.BeginBRC20Tx(ctx)
if err != nil {
return errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if err := brc20DgTx.Rollback(ctx); err != nil {
logger.WarnContext(ctx, "failed to rollback transaction",
slogx.Error(err),
slogx.String("event", "rollback_brc20_insertion"),
)
}
}()
blockHeight := uint64(blockHeader.Height)
// calculate event hash
{
eventHashString := p.eventHashString
if len(eventHashString) > 0 && eventHashString[len(eventHashString)-1:] == eventHashSeparator {
eventHashString = eventHashString[:len(eventHashString)-1]
}
eventHash := sha256.Sum256([]byte(eventHashString))
prevIndexedBlock, err := brc20DgTx.GetIndexedBlockByHeight(ctx, blockHeader.Height-1)
if err != nil && errors.Is(err, errs.NotFound) && blockHeader.Height-1 == startingBlockHeader[p.network].Height {
prevIndexedBlock = &entity.IndexedBlock{
Height: uint64(startingBlockHeader[p.network].Height),
Hash: startingBlockHeader[p.network].Hash,
EventHash: []byte{},
CumulativeEventHash: []byte{},
}
err = nil
}
if err != nil {
return errors.Wrap(err, "failed to get previous indexed block")
}
var cumulativeEventHash [32]byte
if len(prevIndexedBlock.CumulativeEventHash) == 0 {
cumulativeEventHash = eventHash
} else {
cumulativeEventHash = sha256.Sum256([]byte(hex.EncodeToString(prevIndexedBlock.CumulativeEventHash[:]) + hex.EncodeToString(eventHash[:])))
}
if err := brc20DgTx.CreateIndexedBlock(ctx, &entity.IndexedBlock{
Height: blockHeight,
Hash: blockHeader.Hash,
EventHash: eventHash[:],
CumulativeEventHash: cumulativeEventHash[:],
}); err != nil {
return errors.Wrap(err, "failed to create indexed block")
}
p.eventHashString = ""
}
// flush new inscription entries
{
newInscriptionEntries := lo.Values(p.newInscriptionEntries)
if err := brc20DgTx.CreateInscriptionEntries(ctx, blockHeight, newInscriptionEntries); err != nil {
return errors.Wrap(err, "failed to create inscription entries")
}
p.newInscriptionEntries = make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
}
// flush new inscription entry states
{
newInscriptionEntryStates := lo.Values(p.newInscriptionEntryStates)
if err := brc20DgTx.CreateInscriptionEntryStates(ctx, blockHeight, newInscriptionEntryStates); err != nil {
return errors.Wrap(err, "failed to create inscription entry states")
}
p.newInscriptionEntryStates = make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
}
// flush new inscription entry states
{
if err := brc20DgTx.CreateInscriptionTransfers(ctx, p.newInscriptionTransfers); err != nil {
return errors.Wrap(err, "failed to create inscription transfers")
}
p.newInscriptionTransfers = make([]*entity.InscriptionTransfer, 0)
}
// flush processor stats
{
stats := &entity.ProcessorStats{
BlockHeight: blockHeight,
CursedInscriptionCount: p.cursedInscriptionCount,
BlessedInscriptionCount: p.blessedInscriptionCount,
LostSats: p.lostSats,
}
if err := brc20DgTx.CreateProcessorStats(ctx, stats); err != nil {
return errors.Wrap(err, "failed to create processor stats")
}
}
// newTickEntries map[string]*entity.TickEntry
// newTickEntryStates map[string]*entity.TickEntry
// newEventDeploys []*entity.EventDeploy
// newEventMints []*entity.EventMint
// newEventInscribeTransfers []*entity.EventInscribeTransfer
// newEventTransferTransfers []*entity.EventTransferTransfer
// newBalances map[string]map[string]*entity.Balance
// flush new tick entries
{
newTickEntries := lo.Values(p.newTickEntries)
if err := brc20DgTx.CreateTickEntries(ctx, blockHeight, newTickEntries); err != nil {
return errors.Wrap(err, "failed to create tick entries")
}
p.newTickEntries = make(map[string]*entity.TickEntry)
}
// flush new tick entry states
{
newTickEntryStates := lo.Values(p.newTickEntryStates)
if err := brc20DgTx.CreateTickEntryStates(ctx, blockHeight, newTickEntryStates); err != nil {
return errors.Wrap(err, "failed to create tick entry states")
}
p.newTickEntryStates = make(map[string]*entity.TickEntry)
}
// flush new events
{
if err := brc20DgTx.CreateEventDeploys(ctx, p.newEventDeploys); err != nil {
return errors.Wrap(err, "failed to create event deploys")
}
if err := brc20DgTx.CreateEventMints(ctx, p.newEventMints); err != nil {
return errors.Wrap(err, "failed to create event mints")
}
if err := brc20DgTx.CreateEventInscribeTransfers(ctx, p.newEventInscribeTransfers); err != nil {
return errors.Wrap(err, "failed to create event inscribe transfers")
}
if err := brc20DgTx.CreateEventTransferTransfers(ctx, p.newEventTransferTransfers); err != nil {
return errors.Wrap(err, "failed to create event transfer transfers")
}
p.newEventDeploys = make([]*entity.EventDeploy, 0)
p.newEventMints = make([]*entity.EventMint, 0)
p.newEventInscribeTransfers = make([]*entity.EventInscribeTransfer, 0)
p.newEventTransferTransfers = make([]*entity.EventTransferTransfer, 0)
}
// flush new balances
{
newBalances := make([]*entity.Balance, 0)
for _, tickBalances := range p.newBalances {
for _, balance := range tickBalances {
newBalances = append(newBalances, balance)
}
}
if err := brc20DgTx.CreateBalances(ctx, newBalances); err != nil {
return errors.Wrap(err, "failed to create balances")
}
p.newBalances = make(map[string]map[string]*entity.Balance)
}
if err := brc20DgTx.Commit(ctx); err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
return nil
}

View File

@@ -1,7 +1,6 @@
package runes
import (
"fmt"
"log"
"slices"
"unicode/utf8"
@@ -335,7 +334,6 @@ func runestonePayloadFromTx(tx *types.Transaction) ([]byte, Flaws) {
continue
}
if err := tokenizer.Err(); err != nil {
fmt.Println(err.Error())
continue
}
if opCode := tokenizer.Opcode(); opCode != RUNESTONE_PAYLOAD_MAGIC_NUMBER {

View File

@@ -9,4 +9,5 @@ import (
type Contract interface {
GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error)
GetTransactionOutputs(ctx context.Context, txHash chainhash.Hash) ([]*types.TxOut, error)
}

203
pkg/btcutils/address.go Normal file
View File

@@ -0,0 +1,203 @@
package btcutils
import (
"encoding/json"
"reflect"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
)
// IsAddress returns whether or not the passed string is a valid bitcoin address and valid supported type.
//
// NetParams is optional. If provided, we only check for that network,
// otherwise, we check for all supported networks.
func IsAddress(address string, defaultNet ...*chaincfg.Params) bool {
if len(address) == 0 {
return false
}
// If defaultNet is provided, we only check for that network.
net, ok := utils.Optional(defaultNet)
if ok {
_, _, err := parseAddress(address, net)
return err == nil
}
// Otherwise, we check for all supported networks.
for _, net := range supportedNetworks {
_, _, err := parseAddress(address, net)
if err == nil {
return true
}
}
return false
}
// TODO: create GetAddressNetwork
// check `Bech32HRPSegwit` prefix or netID for P2SH/P2PKH is equal to `PubKeyHashAddrID/ScriptHashAddrID`
// GetAddressType returns the address type of the passed address.
func GetAddressType(address string, net *chaincfg.Params) (AddressType, error) {
_, addrType, err := parseAddress(address, net)
return addrType, errors.WithStack(err)
}
type Address struct {
decoded btcutil.Address
net *chaincfg.Params
encoded string
encodedType AddressType
scriptPubKey []byte
}
// NewAddress creates a new address from the given address string.
//
// defaultNet is required if your address is P2SH or P2PKH (legacy or nested segwit)
// If your address is P2WSH, P2WPKH or P2TR, defaultNet is not required.
func NewAddress(address string, defaultNet ...*chaincfg.Params) Address {
addr, err := SafeNewAddress(address, defaultNet...)
if err != nil {
logger.Panic("can't create parse address", slogx.Error(err), slogx.String("package", "btcutils"))
}
return addr
}
// SafeNewAddress creates a new address from the given address string.
// It returns an error if the address is invalid.
//
// defaultNet is required if your address is P2SH or P2PKH (legacy or nested segwit)
// If your address is P2WSH, P2WPKH or P2TR, defaultNet is not required.
func SafeNewAddress(address string, defaultNet ...*chaincfg.Params) (Address, error) {
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
decoded, addrType, err := parseAddress(address, net)
if err != nil {
return Address{}, errors.Wrap(err, "can't parse address")
}
scriptPubkey, err := txscript.PayToAddrScript(decoded)
if err != nil {
return Address{}, errors.Wrap(err, "can't get script pubkey")
}
return Address{
decoded: decoded,
net: net,
encoded: decoded.EncodeAddress(),
encodedType: addrType,
scriptPubKey: scriptPubkey,
}, nil
}
// String returns the address string.
func (a Address) String() string {
return a.encoded
}
// Type returns the address type.
func (a Address) Type() AddressType {
return a.encodedType
}
// Decoded returns the btcutil.Address
func (a Address) Decoded() btcutil.Address {
return a.decoded
}
// IsForNet returns whether or not the address is associated with the passed bitcoin network.
func (a Address) IsForNet(net *chaincfg.Params) bool {
return a.decoded.IsForNet(net)
}
// ScriptAddress returns the raw bytes of the address to be used when inserting the address into a txout's script.
func (a Address) ScriptAddress() []byte {
return a.decoded.ScriptAddress()
}
// Net returns the address network params.
func (a Address) Net() *chaincfg.Params {
return a.net
}
// NetworkName
func (a Address) NetworkName() string {
return a.net.Name
}
// ScriptPubKey or pubkey script
func (a Address) ScriptPubKey() []byte {
return a.scriptPubKey
}
// Equal return true if addresses are equal
func (a Address) Equal(b Address) bool {
return a.encoded == b.encoded
}
// MarshalText implements the encoding.TextMarshaler interface.
func (a Address) MarshalText() ([]byte, error) {
return []byte(a.encoded), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (a *Address) UnmarshalText(input []byte) error {
address := string(input)
addr, err := SafeNewAddress(address)
if err == nil {
*a = addr
return nil
}
return errors.Wrapf(errs.InvalidArgument, "invalid address `%s`", address)
}
// MarshalJSON implements the json.Marshaler interface.
func (a Address) MarshalJSON() ([]byte, error) {
t, err := a.MarshalText()
if err != nil {
return nil, &json.MarshalerError{Type: reflect.TypeOf(a), Err: err}
}
b := make([]byte, len(t)+2)
b[0], b[len(b)-1] = '"', '"' // add quotes
copy(b[1:], t)
return b, nil
}
// UnmarshalJSON parses a hash in hex syntax.
func (a *Address) UnmarshalJSON(input []byte) error {
if !(len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"') {
return &json.UnmarshalTypeError{Value: "non-string", Type: reflect.TypeOf(Address{})}
}
if err := a.UnmarshalText(input[1 : len(input)-1]); err != nil {
return err
}
return nil
}
func parseAddress(address string, params *chaincfg.Params) (btcutil.Address, AddressType, error) {
decoded, err := btcutil.DecodeAddress(address, params)
if err != nil {
return nil, 0, errors.Wrapf(err, "can't decode address `%s` for network `%s`", address, params.Name)
}
switch decoded.(type) {
case *btcutil.AddressWitnessPubKeyHash:
return decoded, AddressP2WPKH, nil
case *btcutil.AddressTaproot:
return decoded, AddressP2TR, nil
case *btcutil.AddressScriptHash:
return decoded, AddressP2SH, nil
case *btcutil.AddressPubKeyHash:
return decoded, AddressP2PKH, nil
case *btcutil.AddressWitnessScriptHash:
return decoded, AddressP2WSH, nil
default:
return nil, 0, errors.Wrap(errs.Unsupported, "unsupported address type")
}
}

View File

@@ -0,0 +1,80 @@
package btcutils_test
import (
"testing"
"github.com/btcsuite/btcd/chaincfg"
"github.com/gaze-network/indexer-network/pkg/btcutils"
)
/*
NOTE:
# Compare this benchmark to go-ethereum/common.Address utils
- go-ethereum/common.HexToAddress speed: 45 ns/op, 48 B/op, 1 allocs/op
- go-ethereum/common.IsHexAddress speed: 25 ns/op, 0 B/op, 0 allocs/op
It's slower than go-ethereum/common.Address utils because ethereum wallet address is Hex string 20 bytes,
but Bitcoin has many types of address and each type has complex algorithm to solve (can't solve and validate address type directly from address string)
20/Jan/2024 @Planxnx Macbook Air M1 16GB
BenchmarkIsAddress/specific-network/mainnet/P2WPKH-8 1776146 625.6 ns/op 120 B/op 3 allocs/op
BenchmarkIsAddress/specific-network/testnet3/P2WPKH-8 1917876 623.2 ns/op 120 B/op 3 allocs/op
BenchmarkIsAddress/specific-network/mainnet/P2TR-8 1330348 915.4 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/specific-network/testnet3/P2TR-8 1235806 931.1 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/specific-network/mainnet/P2WSH-8 1261730 960.9 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/specific-network/testnet3/P2WSH-8 1307851 916.1 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/specific-network/mainnet/P2SH-8 3081762 402.0 ns/op 192 B/op 8 allocs/op
BenchmarkIsAddress/specific-network/testnet3/P2SH-8 3245838 344.9 ns/op 176 B/op 7 allocs/op
BenchmarkIsAddress/specific-network/mainnet/P2PKH-8 2904252 410.4 ns/op 184 B/op 8 allocs/op
BenchmarkIsAddress/specific-network/testnet3/P2PKH-8 3522332 342.8 ns/op 176 B/op 7 allocs/op
BenchmarkIsAddress/automate-network/mainnet/P2WPKH-8 1882059 637.6 ns/op 120 B/op 3 allocs/op
BenchmarkIsAddress/automate-network/testnet3/P2WPKH-8 1626151 664.8 ns/op 120 B/op 3 allocs/op
BenchmarkIsAddress/automate-network/mainnet/P2TR-8 1250253 952.1 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/automate-network/testnet3/P2TR-8 1257901 993.7 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/automate-network/mainnet/P2WSH-8 1000000 1005 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/automate-network/testnet3/P2WSH-8 1209108 971.2 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/automate-network/mainnet/P2SH-8 1869075 625.0 ns/op 268 B/op 9 allocs/op
BenchmarkIsAddress/automate-network/testnet3/P2SH-8 779496 1609 ns/op 694 B/op 17 allocs/op
BenchmarkIsAddress/automate-network/mainnet/P2PKH-8 1924058 650.6 ns/op 259 B/op 9 allocs/op
BenchmarkIsAddress/automate-network/testnet3/P2PKH-8 721510 1690 ns/op 694 B/op 17 allocs/op
*/
func BenchmarkIsAddress(b *testing.B) {
cases := []btcutils.Address{
/* P2WPKH */ btcutils.NewAddress("bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh", &chaincfg.MainNetParams),
/* P2WPKH */ btcutils.NewAddress("tb1qfpgdxtpl7kz5qdus2pmexyjaza99c28qd6ltey", &chaincfg.TestNet3Params),
/* P2TR */ btcutils.NewAddress("bc1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qvz5d38", &chaincfg.MainNetParams),
/* P2TR */ btcutils.NewAddress("tb1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qm2zztg", &chaincfg.TestNet3Params),
/* P2WSH */ btcutils.NewAddress("bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak", &chaincfg.MainNetParams),
/* P2WSH */ btcutils.NewAddress("tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7", &chaincfg.TestNet3Params),
/* P2SH */ btcutils.NewAddress("3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw", &chaincfg.MainNetParams),
/* P2SH */ btcutils.NewAddress("2NCxMvHPTduZcCuUeAiWUpuwHga7Y66y9XJ", &chaincfg.TestNet3Params),
/* P2PKH */ btcutils.NewAddress("1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH", &chaincfg.MainNetParams),
/* P2PKH */ btcutils.NewAddress("migbBPcDajPfffrhoLpYFTQNXQFbWbhpz3", &chaincfg.TestNet3Params),
}
b.Run("specific-network", func(b *testing.B) {
for _, c := range cases {
b.Run(c.NetworkName()+"/"+c.Type().String(), func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = btcutils.IsAddress(c.String(), c.Net())
}
})
}
})
b.Run("automate-network", func(b *testing.B) {
for _, c := range cases {
b.Run(c.NetworkName()+"/"+c.Type().String(), func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
ok := btcutils.IsAddress(c.String())
if !ok {
b.Error("IsAddress returned false")
}
}
})
}
})
}

View File

@@ -0,0 +1,363 @@
package btcutils_test
import (
"encoding/json"
"fmt"
"testing"
"github.com/btcsuite/btcd/chaincfg"
"github.com/gaze-network/indexer-network/pkg/btcutils"
"github.com/stretchr/testify/assert"
)
func TestGetAddressType(t *testing.T) {
type Spec struct {
Address string
DefaultNet *chaincfg.Params
ExpectedError error
ExpectedAddressType btcutils.AddressType
}
specs := []Spec{
{
Address: "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2WPKH,
},
{
Address: "tb1qfpgdxtpl7kz5qdus2pmexyjaza99c28qd6ltey",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2WPKH,
},
{
Address: "bc1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qvz5d38",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2TR,
},
{
Address: "tb1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qm2zztg",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2TR,
},
{
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2SH,
},
{
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2PKH,
},
{
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2WSH,
},
{
Address: "migbBPcDajPfffrhoLpYFTQNXQFbWbhpz3",
DefaultNet: &chaincfg.TestNet3Params,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2PKH,
},
{
Address: "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2WSH,
},
{
Address: "2NCxMvHPTduZcCuUeAiWUpuwHga7Y66y9XJ",
DefaultNet: &chaincfg.TestNet3Params,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2SH,
},
}
for _, spec := range specs {
t.Run(fmt.Sprintf("address:%s", spec.Address), func(t *testing.T) {
actualAddressType, actualError := btcutils.GetAddressType(spec.Address, spec.DefaultNet)
if spec.ExpectedError != nil {
assert.ErrorIs(t, actualError, spec.ExpectedError)
} else {
assert.Equal(t, spec.ExpectedAddressType, actualAddressType)
}
})
}
}
func TestNewAddress(t *testing.T) {
type Spec struct {
Address string
DefaultNet *chaincfg.Params
ExpectedAddressType btcutils.AddressType
}
specs := []Spec{
{
Address: "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh",
// DefaultNet: &chaincfg.MainNetParams, // Optional
ExpectedAddressType: btcutils.AddressP2WPKH,
},
{
Address: "tb1qfpgdxtpl7kz5qdus2pmexyjaza99c28qd6ltey",
// DefaultNet: &chaincfg.MainNetParams, // Optional
ExpectedAddressType: btcutils.AddressP2WPKH,
},
{
Address: "bc1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qvz5d38",
// DefaultNet: &chaincfg.MainNetParams, // Optional
ExpectedAddressType: btcutils.AddressP2TR,
},
{
Address: "tb1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qm2zztg",
// DefaultNet: &chaincfg.MainNetParams, // Optional
ExpectedAddressType: btcutils.AddressP2TR,
},
{
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
// DefaultNet: &chaincfg.MainNetParams, // Optional
ExpectedAddressType: btcutils.AddressP2WSH,
},
{
Address: "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
// DefaultNet: &chaincfg.MainNetParams, // Optional
ExpectedAddressType: btcutils.AddressP2WSH,
},
{
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
DefaultNet: &chaincfg.MainNetParams,
ExpectedAddressType: btcutils.AddressP2SH,
},
{
Address: "2NCxMvHPTduZcCuUeAiWUpuwHga7Y66y9XJ",
DefaultNet: &chaincfg.TestNet3Params,
ExpectedAddressType: btcutils.AddressP2SH,
},
{
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
DefaultNet: &chaincfg.MainNetParams,
ExpectedAddressType: btcutils.AddressP2PKH,
},
{
Address: "migbBPcDajPfffrhoLpYFTQNXQFbWbhpz3",
DefaultNet: &chaincfg.TestNet3Params,
ExpectedAddressType: btcutils.AddressP2PKH,
},
}
for _, spec := range specs {
t.Run(fmt.Sprintf("address:%s,type:%s", spec.Address, spec.ExpectedAddressType), func(t *testing.T) {
addr := btcutils.NewAddress(spec.Address, spec.DefaultNet)
assert.Equal(t, spec.ExpectedAddressType, addr.Type())
assert.Equal(t, spec.Address, addr.String())
})
}
}
func TestIsAddress(t *testing.T) {
type Spec struct {
Address string
Expected bool
}
specs := []Spec{
{
Address: "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh",
Expected: true,
},
{
Address: "tb1qfpgdxtpl7kz5qdus2pmexyjaza99c28qd6ltey",
Expected: true,
},
{
Address: "bc1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qvz5d38",
Expected: true,
},
{
Address: "tb1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qm2zztg",
Expected: true,
},
{
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
Expected: true,
},
{
Address: "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
Expected: true,
},
{
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
Expected: true,
},
{
Address: "2NCxMvHPTduZcCuUeAiWUpuwHga7Y66y9XJ",
Expected: true,
},
{
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
Expected: true,
},
{
Address: "migbBPcDajPfffrhoLpYFTQNXQFbWbhpz3",
Expected: true,
},
{
Address: "",
Expected: false,
},
{
Address: "migbBPcDajPfffrhoLpYFTQNXQFbWbhpz2",
Expected: false,
},
{
Address: "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczz",
Expected: false,
},
}
for _, spec := range specs {
t.Run(fmt.Sprintf("address:%s", spec.Address), func(t *testing.T) {
ok := btcutils.IsAddress(spec.Address)
assert.Equal(t, spec.Expected, ok)
})
}
}
func TestAddressEncoding(t *testing.T) {
rawAddress := "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh"
address := btcutils.NewAddress(rawAddress, &chaincfg.MainNetParams)
type Spec struct {
Data interface{}
Expected string
}
specs := []Spec{
{
Data: address,
Expected: fmt.Sprintf(`"%s"`, rawAddress),
},
{
Data: map[string]interface{}{
"address": rawAddress,
},
Expected: fmt.Sprintf(`{"address":"%s"}`, rawAddress),
},
}
for i, spec := range specs {
t.Run(fmt.Sprint(i+1), func(t *testing.T) {
actual, err := json.Marshal(spec.Data)
assert.NoError(t, err)
assert.Equal(t, spec.Expected, string(actual))
})
}
}
func TestAddressDecoding(t *testing.T) {
rawAddress := "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh"
address := btcutils.NewAddress(rawAddress, &chaincfg.MainNetParams)
// Case #1: address is a string
t.Run("from_string", func(t *testing.T) {
input := fmt.Sprintf(`"%s"`, rawAddress)
expected := address
actual := btcutils.Address{}
err := json.Unmarshal([]byte(input), &actual)
if !assert.NoError(t, err) {
t.FailNow()
}
assert.Equal(t, expected, actual)
})
// Case #2: address is a field of a struct
t.Run("from_field_string", func(t *testing.T) {
type Data struct {
Address btcutils.Address `json:"address"`
}
input := fmt.Sprintf(`{"address":"%s"}`, rawAddress)
expected := Data{Address: address}
actual := Data{}
err := json.Unmarshal([]byte(input), &actual)
if !assert.NoError(t, err) {
t.FailNow()
}
assert.Equal(t, expected, actual)
})
// Case #3: address is an element of an array
t.Run("from_array", func(t *testing.T) {
input := fmt.Sprintf(`["%s"]`, rawAddress)
expected := []btcutils.Address{address}
actual := []btcutils.Address{}
err := json.Unmarshal([]byte(input), &actual)
if !assert.NoError(t, err) {
t.FailNow()
}
assert.Equal(t, expected, actual)
})
// Case #4: not supported address type
t.Run("from_string/not_address", func(t *testing.T) {
input := fmt.Sprintf(`"%s"`, "THIS_IS_NOT_SUPPORTED_ADDRESS")
actual := btcutils.Address{}
err := json.Unmarshal([]byte(input), &actual)
assert.Error(t, err)
})
// Case #5: invalid field type
t.Run("from_number", func(t *testing.T) {
type Data struct {
Address btcutils.Address `json:"address"`
}
input := fmt.Sprintf(`{"address":%d}`, 123)
actual := Data{}
err := json.Unmarshal([]byte(input), &actual)
assert.Error(t, err)
})
}

44
pkg/btcutils/btc.go Normal file
View File

@@ -0,0 +1,44 @@
package btcutils
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
)
var (
// NullAddress is an address that script address is all zeros.
NullAddress = NewAddress("1111111111111111111114oLvT2", &chaincfg.MainNetParams)
// NullHash is a hash that all bytes are zero.
NullHash = utils.Must(chainhash.NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000"))
)
// TransactionType is the type of bitcoin transaction
// It's an alias of txscript.ScriptClass
type TransactionType = txscript.ScriptClass
// AddressType is the type of bitcoin address.
// It's an alias of txscript.ScriptClass
type AddressType = txscript.ScriptClass
// Types of bitcoin transaction
const (
TransactionP2WPKH = txscript.WitnessV0PubKeyHashTy
TransactionP2TR = txscript.WitnessV1TaprootTy
TransactionTaproot = TransactionP2TR // Alias of P2TR
TransactionP2SH = txscript.ScriptHashTy
TransactionP2PKH = txscript.PubKeyHashTy
TransactionP2WSH = txscript.WitnessV0ScriptHashTy
)
// Types of bitcoin address
const (
AddressP2WPKH = txscript.WitnessV0PubKeyHashTy
AddressP2TR = txscript.WitnessV1TaprootTy
AddressTaproot = AddressP2TR // Alias of P2TR
AddressP2SH = txscript.ScriptHashTy
AddressP2PKH = txscript.PubKeyHashTy
AddressP2WSH = txscript.WitnessV0ScriptHashTy
)

View File

@@ -0,0 +1,23 @@
package btcutils
import (
"github.com/btcsuite/btcd/chaincfg"
)
var supportedNetworks = map[string]*chaincfg.Params{
"mainnet": &chaincfg.MainNetParams,
"testnet": &chaincfg.TestNet3Params,
}
// IsSupportedNetwork returns true if the given network is supported.
//
// TODO: create enum for network
func IsSupportedNetwork(network string) bool {
_, ok := supportedNetworks[network]
return ok
}
// GetNetParams returns the *chaincfg.Params for the given network.
func GetNetParams(network string) *chaincfg.Params {
return supportedNetworks[network]
}

54
pkg/btcutils/pk_script.go Normal file
View File

@@ -0,0 +1,54 @@
package btcutils
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/cockroachdb/errors"
)
// NewPkScript creates a pubkey script(or witness program) from the given address string
//
// see: https://en.bitcoin.it/wiki/Script
func NewPkScript(address string, defaultNet ...*chaincfg.Params) ([]byte, error) {
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
decoded, _, err := parseAddress(address, net)
if err != nil {
return nil, errors.Wrap(err, "can't parse address")
}
scriptPubkey, err := txscript.PayToAddrScript(decoded)
if err != nil {
return nil, errors.Wrap(err, "can't get script pubkey")
}
return scriptPubkey, nil
}
// GetAddressTypeFromPkScript returns the address type from the given pubkey script/script pubkey.
func GetAddressTypeFromPkScript(pkScript []byte, defaultNet ...*chaincfg.Params) (AddressType, error) {
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
scriptClass, _, _, err := txscript.ExtractPkScriptAddrs(pkScript, net)
if err != nil {
return txscript.NonStandardTy, errors.Wrap(err, "can't parse pkScript")
}
return scriptClass, nil
}
// ExtractAddressFromPkScript extracts address from the given pubkey script/script pubkey.
// multi-signature script not supported
func ExtractAddressFromPkScript(pkScript []byte, defaultNet ...*chaincfg.Params) (Address, error) {
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
addrType, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, net)
if err != nil {
return Address{}, errors.Wrap(err, "can't parse pkScript")
}
if len(addrs) == 0 {
return Address{}, errors.New("can't extract address from pkScript")
}
return Address{
decoded: addrs[0],
net: net,
encoded: addrs[0].EncodeAddress(),
encodedType: addrType,
scriptPubKey: pkScript,
}, nil
}

View File

@@ -0,0 +1,205 @@
package btcutils_test
import (
"encoding/hex"
"fmt"
"testing"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/pkg/btcutils"
"github.com/stretchr/testify/assert"
)
func TestNewPkScript(t *testing.T) {
anyError := errors.New("any error")
type Spec struct {
Address string
DefaultNet *chaincfg.Params
ExpectedError error
ExpectedPkScript string // hex encoded
}
specs := []Spec{
{
Address: "some_invalid_address",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: anyError,
ExpectedPkScript: "",
},
{
// P2WPKH
Address: "bc1qdx72th7e3z8zc5wdrdxweswfcne974pjneyjln",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "001469bca5dfd9888e2c51cd1b4cecc1c9c4f25f5432",
},
{
// P2WPKH
Address: "bc1q7cj6gz6t3d28qg7kxhrc7h5t3h0re34fqqalga",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "0014f625a40b4b8b547023d635c78f5e8b8dde3cc6a9",
},
{
// P2TR
Address: "bc1pfd0zw2jwlpn4xckpr3dxpt7x0gw6wetuftxvrc4dt2qgn9azjuus65fug6",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "51204b5e272a4ef8675362c11c5a60afc67a1da7657c4accc1e2ad5a808997a29739",
},
{
// P2TR
Address: "bc1pxpumml545tqum5afarzlmnnez2npd35nvf0j0vnrp88nemqsn54qle05sm",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "51203079bdfe95a2c1cdd3a9e8c5fdce7912a616c693625f27b26309cf3cec109d2a",
},
{
// P2SH
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "a91477e1a3d54f545d83869ae3a6b28b071422801d7b87",
},
{
// P2PKH
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "76a914cecb25b53809991c7beef2d27bc2be49e78c684388ac",
},
{
// P2WSH
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "0020cdbf909e935c855d3e8d1b61aeb9c5e3c03ae8021b286839b1a72f2e48fdba70",
},
}
for _, spec := range specs {
t.Run(fmt.Sprintf("address:%s", spec.Address), func(t *testing.T) {
// Validate Expected PkScript
if spec.ExpectedError == nil {
{
expectedPkScriptRaw, err := hex.DecodeString(spec.ExpectedPkScript)
if err != nil {
t.Fatalf("can't decode expected pkscript %s, Reason: %s", spec.ExpectedPkScript, err)
}
expectedPkScript, err := txscript.ParsePkScript(expectedPkScriptRaw)
if err != nil {
t.Fatalf("invalid expected pkscript %s, Reason: %s", spec.ExpectedPkScript, err)
}
expectedAddress, err := expectedPkScript.Address(spec.DefaultNet)
if err != nil {
t.Fatalf("can't get address from expected pkscript %s, Reason: %s", spec.ExpectedPkScript, err)
}
assert.Equal(t, spec.Address, expectedAddress.EncodeAddress())
}
{
address, err := btcutil.DecodeAddress(spec.Address, spec.DefaultNet)
if err != nil {
t.Fatalf("can't decode address %s(%s),Reason: %s", spec.Address, spec.DefaultNet.Name, err)
}
pkScript, err := txscript.PayToAddrScript(address)
if err != nil {
t.Fatalf("can't get pkscript from address %s(%s),Reason: %s", spec.Address, spec.DefaultNet.Name, err)
}
pkScriptStr := hex.EncodeToString(pkScript)
assert.Equal(t, spec.ExpectedPkScript, pkScriptStr)
}
}
pkScript, err := btcutils.NewPkScript(spec.Address, spec.DefaultNet)
if spec.ExpectedError == anyError {
assert.Error(t, err)
} else if spec.ExpectedError != nil {
assert.ErrorIs(t, err, spec.ExpectedError)
} else {
address, err := btcutils.SafeNewAddress(spec.Address, spec.DefaultNet)
if err != nil {
t.Fatalf("can't create address %s(%s),Reason: %s", spec.Address, spec.DefaultNet.Name, err)
}
// ScriptPubKey from address and from NewPkScript should be the same
assert.Equal(t, address.ScriptPubKey(), pkScript)
// Expected PkScript and New PkScript should be the same
pkScriptStr := hex.EncodeToString(pkScript)
assert.Equal(t, spec.ExpectedPkScript, pkScriptStr)
// Can convert PkScript back to same address
acualPkScript, err := txscript.ParsePkScript(address.ScriptPubKey())
if !assert.NoError(t, err) {
t.Fail()
}
assert.Equal(t, address.Decoded().String(), utils.Must(acualPkScript.Address(spec.DefaultNet)).String())
}
})
}
}
func TestGetAddressTypeFromPkScript(t *testing.T) {
type Spec struct {
PubkeyScript string
ExpectedError error
ExpectedAddressType btcutils.AddressType
}
specs := []Spec{
{
PubkeyScript: "0014602181cc89f7c9f54cb6d7607a3445e3e022895d",
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2WPKH,
},
{
PubkeyScript: "5120ef8d59038dd51093fbfff794f658a07a3697b94d9e6d24e45b28abd88f10e33d",
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2TR,
},
{
PubkeyScript: "a91416eef7e84fb9821db1341b6ccef1c4a4e5ec21e487",
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2SH,
},
{
PubkeyScript: "76a914cecb25b53809991c7beef2d27bc2be49e78c684388ac",
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2PKH,
},
{
PubkeyScript: "0020cdbf909e935c855d3e8d1b61aeb9c5e3c03ae8021b286839b1a72f2e48fdba70",
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2WSH,
},
}
for _, spec := range specs {
t.Run(fmt.Sprintf("PkScript:%s", spec.PubkeyScript), func(t *testing.T) {
pkScript, err := hex.DecodeString(spec.PubkeyScript)
if err != nil {
t.Fail()
}
actualAddressType, actualError := btcutils.GetAddressTypeFromPkScript(pkScript)
if spec.ExpectedError != nil {
assert.ErrorIs(t, actualError, spec.ExpectedError)
} else {
assert.Equal(t, spec.ExpectedAddressType, actualAddressType)
}
})
}
}

63
pkg/btcutils/pkscript.go Normal file
View File

@@ -0,0 +1,63 @@
package btcutils
import (
"encoding/hex"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/pkg/errors"
)
// ToPkScript converts a string of address or pkscript to bytes of pkscript
func ToPkScript(network common.Network, from string) ([]byte, error) {
if from == "" {
return nil, errors.Wrap(errs.InvalidArgument, "empty input")
}
defaultNet, err := func() (*chaincfg.Params, error) {
switch network {
case common.NetworkMainnet:
return &chaincfg.MainNetParams, nil
case common.NetworkTestnet:
return &chaincfg.TestNet3Params, nil
default:
return nil, errors.Wrap(errs.InvalidArgument, "invalid network")
}
}()
if err != nil {
return nil, err
}
// attempt to parse as address
address, err := btcutil.DecodeAddress(from, defaultNet)
if err == nil {
pkScript, err := txscript.PayToAddrScript(address)
if err != nil {
return nil, errors.Wrap(err, "error converting address to pkscript")
}
return pkScript, nil
}
// attempt to parse as pkscript
pkScript, err := hex.DecodeString(from)
if err != nil {
return nil, errors.Wrap(err, "error decoding pkscript")
}
return pkScript, nil
}
// PkScriptToAddress returns the address from the given pkScript. If the pkScript is invalid or not standard, it returns empty string.
func PkScriptToAddress(pkScript []byte, network common.Network) (string, error) {
_, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, network.ChainParams())
if err != nil {
return "", errors.Wrap(err, "error extracting addresses from pkscript")
}
if len(addrs) != 1 {
return "", errors.New("invalid number of addresses extracted from pkscript")
}
return addrs[0].EncodeAddress(), nil
}

View File

@@ -0,0 +1,92 @@
package psbtutils
import (
"bytes"
"encoding/base64"
"encoding/hex"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/btcutil/psbt"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
)
const (
// default psbt encoding is hex
DefaultEncoding = EncodingHex
)
type Encoding string
const (
EncodingBase64 Encoding = "base64"
EncodingHex Encoding = "hex"
)
// DecodeString decodes a psbt hex/base64 string into a psbt.Packet
//
// encoding is optional, default is EncodingHex
func DecodeString(psbtStr string, encoding ...Encoding) (*psbt.Packet, error) {
pC, err := Decode([]byte(psbtStr), encoding...)
return pC, errors.WithStack(err)
}
// Decode decodes a psbt hex/base64 byte into a psbt.Packet
//
// encoding is optional, default is EncodingHex
func Decode(psbtB []byte, encoding ...Encoding) (*psbt.Packet, error) {
enc, ok := utils.Optional(encoding)
if !ok {
enc = DefaultEncoding
}
var (
psbtBytes []byte
err error
)
switch enc {
case EncodingBase64, "b64":
psbtBytes = make([]byte, base64.StdEncoding.DecodedLen(len(psbtB)))
_, err = base64.StdEncoding.Decode(psbtBytes, psbtB)
case EncodingHex:
psbtBytes = make([]byte, hex.DecodedLen(len(psbtB)))
_, err = hex.Decode(psbtBytes, psbtB)
default:
return nil, errors.Wrap(errs.Unsupported, "invalid encoding")
}
if err != nil {
return nil, errors.Wrap(err, "can't decode psbt string")
}
pC, err := psbt.NewFromRawBytes(bytes.NewReader(psbtBytes), false)
if err != nil {
return nil, errors.Wrap(err, "can't create psbt from given psbt")
}
return pC, nil
}
// EncodeToString encodes a psbt.Packet into a psbt hex/base64 string
//
// encoding is optional, default is EncodingHex
func EncodeToString(pC *psbt.Packet, encoding ...Encoding) (string, error) {
enc, ok := utils.Optional(encoding)
if !ok {
enc = DefaultEncoding
}
var buf bytes.Buffer
if err := pC.Serialize(&buf); err != nil {
return "", errors.Wrap(err, "can't serialize psbt")
}
switch enc {
case EncodingBase64, "b64":
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
case EncodingHex:
return hex.EncodeToString(buf.Bytes()), nil
default:
return "", errors.Wrap(errs.Unsupported, "invalid encoding")
}
}

View File

@@ -0,0 +1,110 @@
package psbtutils
import (
"math"
"github.com/btcsuite/btcd/btcutil/psbt"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/pkg/btcutils"
)
// TxFee returns satoshis fee of a transaction given the fee rate (sat/vB)
// and the number of inputs and outputs.
func TxFee(feeRate int64, p *psbt.Packet) (int64, error) {
size, err := PSBTSize(p)
if err != nil {
return 0, errors.Wrap(err, "psbt size")
}
return int64(math.Ceil(size * float64(feeRate))), nil
}
func PredictTxFee(feeRate int64, inputs, outputs int) int64 {
/**
TODO: handle edge cases like:
1. when we predict that we need to use unnecessary UTXOs
2. when we predict that we need to use more value than user have, but user do have enough for the actual transaction
Idea for solving this:
- When trying to find the best UTXOs to use, we:
- Will not reject when user's balance is not enough, instead we will return all UTXOs even if it's not enough.
- Will be okay returning excessive UTXOs (say we predict we need 10K satoshis, but actually we only need 5K satoshis, then we will return UTXOs enough for 10K satoshis)
- And then we:
- Construct the actual PSBT, then select UTXOs to use accordingly,
- If the user's balance is not enough, then we will return an error,
- Or if when we predict we expect to use more UTXOs than the actual transaction, then we will just use what's needed.
*/
size := defaultOverhead + 148*float64(inputs) + 43*float64(outputs)
return int64(math.Ceil(size * float64(feeRate)))
}
type txSize struct {
Overhead float64
Inputs float64
Outputs float64
}
const defaultOverhead = 10.5
// Transaction Virtual Sizes Bytes
//
// Reference: https://bitcoinops.org/en/tools/calc-size/
var txSizes = map[btcutils.TransactionType]txSize{
btcutils.TransactionP2WPKH: {
Inputs: 68,
Outputs: 31,
},
btcutils.TransactionP2TR: {
Inputs: 57.5,
Outputs: 43,
},
btcutils.TransactionP2SH: {
Inputs: 91,
Outputs: 32,
},
btcutils.TransactionP2PKH: {
Inputs: 148,
Outputs: 34,
},
btcutils.TransactionP2WSH: {
Inputs: 104.5,
Outputs: 43,
},
}
func PSBTSize(psbt *psbt.Packet) (float64, error) {
if err := psbt.SanityCheck(); err != nil {
return 0, errors.Wrap(errors.Join(err, errs.InvalidArgument), "psbt sanity check")
}
inputs := map[btcutils.TransactionType]int{}
outputs := map[btcutils.TransactionType]int{}
for _, input := range psbt.Inputs {
addrType, err := btcutils.GetAddressTypeFromPkScript(input.WitnessUtxo.PkScript)
if err != nil {
return 0, errors.Wrap(err, "get address type from pk script")
}
inputs[addrType]++
}
for _, output := range psbt.UnsignedTx.TxOut {
addrType, err := btcutils.GetAddressTypeFromPkScript(output.PkScript)
if err != nil {
return 0, errors.Wrap(err, "get address type from pk script")
}
outputs[addrType]++
}
totalSize := defaultOverhead
for txType, txSizeData := range txSizes {
if inputCount, ok := inputs[txType]; ok {
totalSize += txSizeData.Inputs * float64(inputCount)
}
if outputCount, ok := outputs[txType]; ok {
totalSize += txSizeData.Outputs * float64(outputCount)
}
}
return totalSize, nil
}

View File

@@ -0,0 +1,131 @@
package psbtutils_test
import (
"fmt"
"math"
"testing"
"github.com/gaze-network/indexer-network/pkg/btcutils/psbtutils"
"github.com/stretchr/testify/assert"
)
func TestPSBTSize(t *testing.T) {
type Spec struct {
Name string
PSBTString string
ExpectedError error
ExpectedSize float64
}
specs := []Spec{
{
Name: "3-inputs-3-outputs-taproot",
PSBTString: "70736274ff0100fd06010100000003866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910000000000ffffffff866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910100000000ffffffff866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910200000000ffffffff03b0040000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f22020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f4d370f00000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f000000000001012b58020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f0001012b58020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f0001012bcb3c0f00000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f00000000",
ExpectedError: nil,
ExpectedSize: 312,
},
{
Name: "mixed-segwit-taproot",
PSBTString: "70736274ff0100fd230202000000061f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90300000000ffffffff1f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90400000000ffffffff21c8ec368f2aff1a7baf4964e4070f52e7247ae39edfbda3976f8df4da1b72a00000000000ffffffff969e65b705e3d5071f1743a63381b3aa1ec31e1dbbbd63ab594a19ca399a58af0000000000ffffffffcca5cfd28bd6c54a851d97d029560b3047f7c6482fda7b2f2603d56ade8c95890000000000ffffffff1f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90500000000ffffffff0908070000000000001600144850d32c3ff585403790507793125d174a5c28e022020000000000001600144850d32c3ff585403790507793125d174a5c28e022020000000000001600144850d32c3ff585403790507793125d174a5c28e0b03600000000000016001459805fc1fdb9f05e190db569987c95c4f9deaa532a680000000000002251203a9ddeb6a2a327fed0f50d18778b28168e3ddb7fdfd4b05f4e438c9174d76a8d58020000000000001600144850d32c3ff585403790507793125d174a5c28e058020000000000001600144850d32c3ff585403790507793125d174a5c28e058020000000000001600144850d32c3ff585403790507793125d174a5c28e0b21f1e00000000001600144850d32c3ff585403790507793125d174a5c28e0000000000001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f220200000000000016001459805fc1fdb9f05e190db569987c95c4f9deaa53010304830000000001012b22020000000000002251203a9ddeb6a2a327fed0f50d18778b28168e3ddb7fdfd4b05f4e438c9174d76a8d010304830000000001011f06432000000000001600144850d32c3ff585403790507793125d174a5c28e000000000000000000000",
ExpectedError: nil,
ExpectedSize: 699,
},
{
Name: "segwit-transfer-to-legacy",
PSBTString: "70736274ff010074020000000124ba4becfc732f3b4729784a3dd0cc2494ae890d826377fd98aeb0607feb1ace0100000000ffffffff0210270000000000001976a91422bae94117be666b593916527d55bdaf030d756e88ac25f62e000000000016001476d1e072c9b8a18fa1e4be697c175e0c642026ac000000000001011fc51d2f000000000016001476d1e072c9b8a18fa1e4be697c175e0c642026ac01086b024730440220759df9d109298a1ef69b9faa1786f4118f0d4d63a68cd2061e217b6090573f62022053ffa117fc21e5bf20e7d16bb786de52dc0214c9a21af87b4e92a639ef66e997012103e0cb213a46a68b1f463a4858635ee44694ce4b512788833d629840341b1219c9000000",
ExpectedError: nil,
ExpectedSize: 143.5,
},
}
for _, spec := range specs {
t.Run(spec.Name, func(t *testing.T) {
p, err := psbtutils.DecodeString(spec.PSBTString)
assert.NoError(t, err)
size, err := psbtutils.PSBTSize(p)
if spec.ExpectedError != nil {
assert.ErrorIs(t, err, spec.ExpectedError)
} else {
assert.Equal(t, spec.ExpectedSize, size)
}
})
}
}
func TestPredictTxFee(t *testing.T) {
type Spec struct {
FeeRate int64
InputsCount int
OutputsCount int
ExpectedFee int64
}
specs := []Spec{
{
FeeRate: 100,
InputsCount: 1,
OutputsCount: 1,
ExpectedFee: int64(math.Ceil((10.5 + 148 + 43) * 100)),
},
{
FeeRate: 1,
InputsCount: 99,
OutputsCount: 99,
ExpectedFee: int64(math.Ceil((10.5 + (99 * 148) + (99 * 43)) * 1)),
},
}
for _, spec := range specs {
t.Run(fmt.Sprintf("feeRate=%d:inputs=%d:outputs=%d", spec.FeeRate, spec.InputsCount, spec.OutputsCount), func(t *testing.T) {
fee := psbtutils.PredictTxFee(spec.FeeRate, spec.InputsCount, spec.OutputsCount)
assert.Equal(t, spec.ExpectedFee, fee)
})
}
}
func TestTxFee(t *testing.T) {
type Spec struct {
Name string
FeeRate int64
PSBTString string
ExpectedError error
ExpectedFee int64
}
specs := []Spec{
{
Name: "3-inputs-3-outputs-taproot",
FeeRate: 10,
PSBTString: "70736274ff0100fd06010100000003866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910000000000ffffffff866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910100000000ffffffff866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910200000000ffffffff03b0040000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f22020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f4d370f00000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f000000000001012b58020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f0001012b58020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f0001012bcb3c0f00000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f00000000",
ExpectedError: nil,
ExpectedFee: 312 * 10,
},
{
Name: "mixed-segwit-taproot",
FeeRate: 20,
PSBTString: "70736274ff0100fd230202000000061f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90300000000ffffffff1f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90400000000ffffffff21c8ec368f2aff1a7baf4964e4070f52e7247ae39edfbda3976f8df4da1b72a00000000000ffffffff969e65b705e3d5071f1743a63381b3aa1ec31e1dbbbd63ab594a19ca399a58af0000000000ffffffffcca5cfd28bd6c54a851d97d029560b3047f7c6482fda7b2f2603d56ade8c95890000000000ffffffff1f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90500000000ffffffff0908070000000000001600144850d32c3ff585403790507793125d174a5c28e022020000000000001600144850d32c3ff585403790507793125d174a5c28e022020000000000001600144850d32c3ff585403790507793125d174a5c28e0b03600000000000016001459805fc1fdb9f05e190db569987c95c4f9deaa532a680000000000002251203a9ddeb6a2a327fed0f50d18778b28168e3ddb7fdfd4b05f4e438c9174d76a8d58020000000000001600144850d32c3ff585403790507793125d174a5c28e058020000000000001600144850d32c3ff585403790507793125d174a5c28e058020000000000001600144850d32c3ff585403790507793125d174a5c28e0b21f1e00000000001600144850d32c3ff585403790507793125d174a5c28e0000000000001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f220200000000000016001459805fc1fdb9f05e190db569987c95c4f9deaa53010304830000000001012b22020000000000002251203a9ddeb6a2a327fed0f50d18778b28168e3ddb7fdfd4b05f4e438c9174d76a8d010304830000000001011f06432000000000001600144850d32c3ff585403790507793125d174a5c28e000000000000000000000",
ExpectedError: nil,
ExpectedFee: 699 * 20,
},
{
Name: "segwit-transfer-to-legacy",
FeeRate: 99,
PSBTString: "70736274ff010074020000000124ba4becfc732f3b4729784a3dd0cc2494ae890d826377fd98aeb0607feb1ace0100000000ffffffff0210270000000000001976a91422bae94117be666b593916527d55bdaf030d756e88ac25f62e000000000016001476d1e072c9b8a18fa1e4be697c175e0c642026ac000000000001011fc51d2f000000000016001476d1e072c9b8a18fa1e4be697c175e0c642026ac01086b024730440220759df9d109298a1ef69b9faa1786f4118f0d4d63a68cd2061e217b6090573f62022053ffa117fc21e5bf20e7d16bb786de52dc0214c9a21af87b4e92a639ef66e997012103e0cb213a46a68b1f463a4858635ee44694ce4b512788833d629840341b1219c9000000",
ExpectedError: nil,
ExpectedFee: int64(math.Ceil((143.5) * 99)),
},
}
for _, spec := range specs {
t.Run(spec.Name, func(t *testing.T) {
p, err := psbtutils.DecodeString(spec.PSBTString)
assert.NoError(t, err)
fee, err := psbtutils.TxFee(spec.FeeRate, p)
if spec.ExpectedError != nil {
assert.ErrorIs(t, err, spec.ExpectedError)
} else {
assert.Equal(t, spec.ExpectedFee, fee)
}
})
}
}

View File

@@ -0,0 +1,35 @@
package psbtutils
import (
"github.com/btcsuite/btcd/btcutil/psbt"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/samber/lo"
)
func IsReadyPSBT(pC *psbt.Packet, feeRate int64) (bool, error) {
// if input = output + fee then it's ready
// Calculate tx fee
fee, err := TxFee(feeRate, pC)
if err != nil {
return false, errors.Wrap(err, "calculate fee")
}
// sum total input and output
totalInputValue := lo.SumBy(pC.Inputs, func(input psbt.PInput) int64 { return input.WitnessUtxo.Value })
totalOutputValue := lo.SumBy(pC.UnsignedTx.TxOut, func(txout *wire.TxOut) int64 { return txout.Value }) + fee
// it's perfect match
if totalInputValue == totalOutputValue {
return true, nil
}
// if input is more than output + fee but not more than 1000 satoshi,
// then it's ready
if totalInputValue > totalOutputValue && totalInputValue-totalOutputValue < 1000 {
return true, nil
}
return false, nil
}

21
pkg/btcutils/signature.go Normal file
View File

@@ -0,0 +1,21 @@
package btcutils
import (
"github.com/Cleverse/go-utilities/utils"
verifier "github.com/bitonicnl/verify-signed-message/pkg"
"github.com/btcsuite/btcd/chaincfg"
"github.com/cockroachdb/errors"
)
func VerifySignature(address string, message string, sigBase64 string, defaultNet ...*chaincfg.Params) error {
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
_, err := verifier.VerifyWithChain(verifier.SignedMessage{
Address: address,
Message: message,
Signature: sigBase64,
}, net)
if err != nil {
return errors.WithStack(err)
}
return nil
}

View File

@@ -0,0 +1,69 @@
package btcutils
import (
"testing"
"github.com/btcsuite/btcd/chaincfg"
"github.com/stretchr/testify/assert"
)
func TestVerifySignature(t *testing.T) {
{
message := "Test123"
address := "18J72YSM9pKLvyXX1XAjFXA98zeEvxBYmw"
signature := "Gzhfsw0ItSrrTCChykFhPujeTyAcvVxiXwywxpHmkwFiKuUR2ETbaoFcocmcSshrtdIjfm8oXlJoTOLosZp3Yc8="
network := &chaincfg.MainNetParams
err := VerifySignature(address, message, signature, network)
assert.NoError(t, err)
}
{
address := "tb1qr97cuq4kvq7plfetmxnl6kls46xaka78n2288z"
message := "The outage comes at a time when bitcoin has been fast approaching new highs not seen since June 26, 2019."
signature := "H/bSByRH7BW1YydfZlEx9x/nt4EAx/4A691CFlK1URbPEU5tJnTIu4emuzkgZFwC0ptvKuCnyBThnyLDCqPqT10="
network := &chaincfg.TestNet3Params
err := VerifySignature(address, message, signature, network)
assert.NoError(t, err)
}
{
// Missmatch address
address := "tb1qp7y2ywgrv8a4t9h47yphtgj8w759rk6vgd9ran"
message := "The outage comes at a time when bitcoin has been fast approaching new highs not seen since June 26, 2019."
signature := "H/bSByRH7BW1YydfZlEx9x/nt4EAx/4A691CFlK1URbPEU5tJnTIu4emuzkgZFwC0ptvKuCnyBThnyLDCqPqT10="
network := &chaincfg.TestNet3Params
err := VerifySignature(address, message, signature, network)
assert.Error(t, err)
}
{
// Missmatch signature
address := "tb1qr97cuq4kvq7plfetmxnl6kls46xaka78n2288z"
message := "The outage comes at a time when bitcoin has been fast approaching new highs not seen since June 26, 2019."
signature := "Gzhfsw0ItSrrTCChykFhPujeTyAcvVxiXwywxpHmkwFiKuUR2ETbaoFcocmcSshrtdIjfm8oXlJoTOLosZp3Yc8="
network := &chaincfg.TestNet3Params
err := VerifySignature(address, message, signature, network)
assert.Error(t, err)
}
{
// Missmatch message
address := "tb1qr97cuq4kvq7plfetmxnl6kls46xaka78n2288z"
message := "Hello World"
signature := "H/bSByRH7BW1YydfZlEx9x/nt4EAx/4A691CFlK1URbPEU5tJnTIu4emuzkgZFwC0ptvKuCnyBThnyLDCqPqT10="
network := &chaincfg.TestNet3Params
err := VerifySignature(address, message, signature, network)
assert.Error(t, err)
}
{
// Missmatch network
address := "tb1qr97cuq4kvq7plfetmxnl6kls46xaka78n2288z"
message := "The outage comes at a time when bitcoin has been fast approaching new highs not seen since June 26, 2019."
signature := "H/bSByRH7BW1YydfZlEx9x/nt4EAx/4A691CFlK1URbPEU5tJnTIu4emuzkgZFwC0ptvKuCnyBThnyLDCqPqT10="
network := &chaincfg.MainNetParams
err := VerifySignature(address, message, signature, network)
assert.Error(t, err)
}
}

View File

@@ -0,0 +1,10 @@
package btcutils
const (
// TxVersion is the current latest supported transaction version.
TxVersion = 2
// MaxTxInSequenceNum is the maximum sequence number the sequence field
// of a transaction input can be.
MaxTxInSequenceNum uint32 = 0xffffffff
)

106
pkg/decimals/decimals.go Normal file
View File

@@ -0,0 +1,106 @@
package decimals
import (
"math"
"math/big"
"reflect"
"github.com/Cleverse/go-utilities/utils"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/uint128"
"github.com/holiman/uint256"
"github.com/jackc/pgx/v5/pgtype"
"github.com/shopspring/decimal"
"golang.org/x/exp/constraints"
)
const (
DefaultDivPrecision = 36
)
func init() {
decimal.DivisionPrecision = DefaultDivPrecision
}
// MustFromString convert string to decimal.Decimal. Panic if error
// string must be a valid number, not NaN, Inf or empty string.
func MustFromString(s string) decimal.Decimal {
return utils.Must(decimal.NewFromString(s))
}
// ToDecimal convert any type to decimal.Decimal (safety floating point)
func ToDecimal[T constraints.Integer](ivalue any, decimals T) decimal.Decimal {
value := new(big.Int)
switch v := ivalue.(type) {
case string:
value.SetString(v, 10)
case *big.Int:
value = v
case int64:
value = big.NewInt(v)
case int, int8, int16, int32:
rValue := reflect.ValueOf(v)
value.SetInt64(rValue.Int())
case uint64:
value = big.NewInt(0).SetUint64(v)
case uint, uint8, uint16, uint32:
rValue := reflect.ValueOf(v)
value.SetUint64(rValue.Uint())
case []byte:
value.SetBytes(v)
case uint128.Uint128:
value = v.Big()
case uint256.Int:
value = v.ToBig()
case *uint256.Int:
value = v.ToBig()
}
switch {
case int64(decimals) > math.MaxInt32:
logger.Panic("ToDecimal: decimals is too big, should be equal less than 2^31-1", slogx.Any("decimals", decimals))
case int64(decimals) < math.MinInt32+1:
logger.Panic("ToDecimal: decimals is too small, should be greater than -2^31", slogx.Any("decimals", decimals))
}
return decimal.NewFromBigInt(value, -int32(decimals))
}
// ToBigInt convert any type to *big.Int
func ToBigInt(iamount any, decimals uint16) *big.Int {
amount := decimal.NewFromFloat(0)
switch v := iamount.(type) {
case string:
amount, _ = decimal.NewFromString(v)
case float64:
amount = decimal.NewFromFloat(v)
case float32:
amount = decimal.NewFromFloat32(v)
case int64:
amount = decimal.NewFromInt(v)
case int, int8, int16, int32:
rValue := reflect.ValueOf(v)
amount = decimal.NewFromInt(rValue.Int())
case decimal.Decimal:
amount = v
case *decimal.Decimal:
amount = *v
case big.Float:
amount, _ = decimal.NewFromString(v.String())
case *big.Float:
amount, _ = decimal.NewFromString(v.String())
case pgtype.Numeric:
amount = decimal.NewFromBigInt(v.Int, v.Exp)
}
return amount.Mul(PowerOfTen(decimals)).BigInt()
}
// ToUint256 convert any type to *uint256.Int
func ToUint256(iamount any, decimals uint16) *uint256.Int {
result := new(uint256.Int)
if overflow := result.SetFromBig(ToBigInt(iamount, decimals)); overflow {
logger.Panic("ToUint256: overflow", slogx.Any("amount", iamount), slogx.Uint16("decimals", decimals))
}
return result
}

View File

@@ -0,0 +1,86 @@
package decimals
import (
"fmt"
"math"
"math/big"
"testing"
"github.com/gaze-network/uint128"
"github.com/holiman/uint256"
"github.com/stretchr/testify/assert"
)
func TestToDecimal(t *testing.T) {
t.Run("overflow_decimals", func(t *testing.T) {
assert.NotPanics(t, func() { ToDecimal(1, math.MaxInt32-1) }, "in-range decimals shouldn't panic")
assert.NotPanics(t, func() { ToDecimal(1, math.MinInt32+1) }, "in-range decimals shouldn't panic")
assert.Panics(t, func() { ToDecimal(1, math.MaxInt32+1) }, "out of range decimals should panic")
assert.Panics(t, func() { ToDecimal(1, math.MinInt32) }, "out of range decimals should panic")
})
t.Run("check_supported_types", func(t *testing.T) {
testcases := []struct {
decimals uint16
value uint64
expected string
}{
{0, 1, "1"},
{1, 1, "0.1"},
{2, 1, "0.01"},
{3, 1, "0.001"},
{18, 1, "0.000000000000000001"},
{36, 1, "0.000000000000000000000000000000000001"},
}
typesConv := []func(uint64) any{
func(i uint64) any { return int(i) },
func(i uint64) any { return int8(i) },
func(i uint64) any { return int16(i) },
func(i uint64) any { return int32(i) },
func(i uint64) any { return int64(i) },
func(i uint64) any { return uint(i) },
func(i uint64) any { return uint8(i) },
func(i uint64) any { return uint16(i) },
func(i uint64) any { return uint32(i) },
func(i uint64) any { return uint64(i) },
func(i uint64) any { return fmt.Sprint(i) },
func(i uint64) any { return new(big.Int).SetUint64(i) },
func(i uint64) any { return new(uint128.Uint128).Add64(i) },
func(i uint64) any { return uint256.NewInt(i) },
}
for _, tc := range testcases {
t.Run(fmt.Sprintf("%d_%d", tc.decimals, tc.value), func(t *testing.T) {
for _, conv := range typesConv {
input := conv(tc.value)
t.Run(fmt.Sprintf("%T", input), func(t *testing.T) {
actual := ToDecimal(input, tc.decimals)
assert.Equal(t, tc.expected, actual.String())
})
}
})
}
})
testcases := []struct {
decimals uint16
value interface{}
expected string
}{
{0, uint64(math.MaxUint64), "18446744073709551615"},
{18, uint64(math.MaxUint64), "18.446744073709551615"},
{36, uint64(math.MaxUint64), "0.000000000000000018446744073709551615"},
/* max uint128 */
{0, uint128.Max, "340282366920938463463374607431768211455"},
{18, uint128.Max, "340282366920938463463.374607431768211455"},
{36, uint128.Max, "340.282366920938463463374607431768211455"},
/* max uint256 */
{0, new(uint256.Int).SetAllOne(), "115792089237316195423570985008687907853269984665640564039457584007913129639935"},
{18, new(uint256.Int).SetAllOne(), "115792089237316195423570985008687907853269984665640564039457.584007913129639935"},
{36, new(uint256.Int).SetAllOne(), "115792089237316195423570985008687907853269.984665640564039457584007913129639935"},
}
for _, tc := range testcases {
t.Run(fmt.Sprintf("%d_%s", tc.decimals, tc.value), func(t *testing.T) {
actual := ToDecimal(tc.value, tc.decimals)
assert.Equal(t, tc.expected, actual.String())
})
}
}

View File

@@ -0,0 +1,97 @@
package decimals
import (
"github.com/shopspring/decimal"
"golang.org/x/exp/constraints"
)
// max precision is 36
const (
minPowerOfTen = -DefaultDivPrecision
maxPowerOfTen = DefaultDivPrecision
)
var powerOfTen = map[int64]decimal.Decimal{
minPowerOfTen: MustFromString("0.000000000000000000000000000000000001"),
-35: MustFromString("0.00000000000000000000000000000000001"),
-34: MustFromString("0.0000000000000000000000000000000001"),
-33: MustFromString("0.000000000000000000000000000000001"),
-32: MustFromString("0.00000000000000000000000000000001"),
-31: MustFromString("0.0000000000000000000000000000001"),
-30: MustFromString("0.000000000000000000000000000001"),
-29: MustFromString("0.00000000000000000000000000001"),
-28: MustFromString("0.0000000000000000000000000001"),
-27: MustFromString("0.000000000000000000000000001"),
-26: MustFromString("0.00000000000000000000000001"),
-25: MustFromString("0.0000000000000000000000001"),
-24: MustFromString("0.000000000000000000000001"),
-23: MustFromString("0.00000000000000000000001"),
-22: MustFromString("0.0000000000000000000001"),
-21: MustFromString("0.000000000000000000001"),
-20: MustFromString("0.00000000000000000001"),
-19: MustFromString("0.0000000000000000001"),
-18: MustFromString("0.000000000000000001"),
-17: MustFromString("0.00000000000000001"),
-16: MustFromString("0.0000000000000001"),
-15: MustFromString("0.000000000000001"),
-14: MustFromString("0.00000000000001"),
-13: MustFromString("0.0000000000001"),
-12: MustFromString("0.000000000001"),
-11: MustFromString("0.00000000001"),
-10: MustFromString("0.0000000001"),
-9: MustFromString("0.000000001"),
-8: MustFromString("0.00000001"),
-7: MustFromString("0.0000001"),
-6: MustFromString("0.000001"),
-5: MustFromString("0.00001"),
-4: MustFromString("0.0001"),
-3: MustFromString("0.001"),
-2: MustFromString("0.01"),
-1: MustFromString("0.1"),
0: MustFromString("1"),
1: MustFromString("10"),
2: MustFromString("100"),
3: MustFromString("1000"),
4: MustFromString("10000"),
5: MustFromString("100000"),
6: MustFromString("1000000"),
7: MustFromString("10000000"),
8: MustFromString("100000000"),
9: MustFromString("1000000000"),
10: MustFromString("10000000000"),
11: MustFromString("100000000000"),
12: MustFromString("1000000000000"),
13: MustFromString("10000000000000"),
14: MustFromString("100000000000000"),
15: MustFromString("1000000000000000"),
16: MustFromString("10000000000000000"),
17: MustFromString("100000000000000000"),
18: MustFromString("1000000000000000000"),
19: MustFromString("10000000000000000000"),
20: MustFromString("100000000000000000000"),
21: MustFromString("1000000000000000000000"),
22: MustFromString("10000000000000000000000"),
23: MustFromString("100000000000000000000000"),
24: MustFromString("1000000000000000000000000"),
25: MustFromString("10000000000000000000000000"),
26: MustFromString("100000000000000000000000000"),
27: MustFromString("1000000000000000000000000000"),
28: MustFromString("10000000000000000000000000000"),
29: MustFromString("100000000000000000000000000000"),
30: MustFromString("1000000000000000000000000000000"),
31: MustFromString("10000000000000000000000000000000"),
32: MustFromString("100000000000000000000000000000000"),
33: MustFromString("1000000000000000000000000000000000"),
34: MustFromString("10000000000000000000000000000000000"),
35: MustFromString("100000000000000000000000000000000000"),
maxPowerOfTen: MustFromString("1000000000000000000000000000000000000"),
}
// PowerOfTen optimized arithmetic performance for 10^n.
func PowerOfTen[T constraints.Integer](n T) decimal.Decimal {
nInt64 := int64(n)
if val, ok := powerOfTen[nInt64]; ok {
return val
}
return powerOfTen[1].Pow(decimal.NewFromInt(nInt64))
}

View File

@@ -0,0 +1,44 @@
package decimals
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPowerOfTen(t *testing.T) {
for n := int64(-36); n <= 36; n++ {
t.Run(fmt.Sprint(n), func(t *testing.T) {
expected := powerOfTenString(n)
actual := PowerOfTen(n)
assert.Equal(t, expected, actual.String())
})
}
t.Run("constants", func(t *testing.T) {
for n, p := range powerOfTen {
t.Run(p.String(), func(t *testing.T) {
require.False(t, p.IsZero(), "power of ten must not be zero")
actual := PowerOfTen(n)
assert.Equal(t, p, actual)
})
}
})
}
// powerOfTenString add zero padding to power of ten string
func powerOfTenString(n int64) string {
s := "1"
if n < 0 {
for i := int64(0); i < -n-1; i++ {
s = "0" + s
}
s = "0." + s
} else {
for i := int64(0); i < n; i++ {
s = s + "0"
}
}
return s
}

267
pkg/lru/lru.go Normal file
View File

@@ -0,0 +1,267 @@
// lru a lru-cache package modified version of github.com/hashicorp/golang-lru
// TODO: create PR to hashicorp/golang-lru
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
const (
// DefaultEvictedBufferSize defines the default buffer size to store evicted key/val
DefaultEvictedBufferSize = 16
)
// Cache is a thread-safe fixed size LRU cache.
type Cache[K comparable, V any] struct {
lru *simplelru.LRU[K, V]
evictedKeys []K
evictedVals []V
onEvictedCB func(k K, v V)
lock sync.RWMutex
}
// New creates an LRU of the given size.
func New[K comparable, V any](size int) (*Cache[K, V], error) {
return NewWithEvict[K, V](size, nil)
}
// NewWithEvict constructs a fixed size cache with the given eviction
// callback.
func NewWithEvict[K comparable, V any](size int, onEvicted func(key K, value V)) (c *Cache[K, V], err error) {
// create a cache with default settings
c = &Cache[K, V]{
onEvictedCB: onEvicted,
}
if onEvicted != nil {
c.initEvictBuffers()
onEvicted = c.onEvicted
}
c.lru, err = simplelru.NewLRU(size, onEvicted)
return
}
func (c *Cache[K, V]) initEvictBuffers() {
c.evictedKeys = make([]K, 0, DefaultEvictedBufferSize)
c.evictedVals = make([]V, 0, DefaultEvictedBufferSize)
}
// onEvicted save evicted key/val and sent in externally registered callback
// outside of critical section
func (c *Cache[K, V]) onEvicted(k K, v V) {
c.evictedKeys = append(c.evictedKeys, k)
c.evictedVals = append(c.evictedVals, v)
}
// Purge is used to completely clear the cache.
func (c *Cache[K, V]) Purge() {
var ks []K
var vs []V
c.lock.Lock()
c.lru.Purge()
if c.onEvictedCB != nil && len(c.evictedKeys) > 0 {
ks, vs = c.evictedKeys, c.evictedVals
c.initEvictBuffers()
}
c.lock.Unlock()
// invoke callback outside of critical section
if c.onEvictedCB != nil {
for i := 0; i < len(ks); i++ {
c.onEvictedCB(ks[i], vs[i])
}
}
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *Cache[K, V]) Add(key K, value V) (evicted bool) {
var k K
var v V
c.lock.Lock()
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return
}
// Get looks up a key's value from the cache.
func (c *Cache[K, V]) Get(key K) (value V, ok bool) {
c.lock.Lock()
value, ok = c.lru.Get(key)
c.lock.Unlock()
return value, ok
}
// MGet looks up a multiple key's values from the cache.
// Returns a slice of value in the same order as the keys. value will be zero-value if key not found.
func (c *Cache[K, V]) MGet(keys []K) (values []V) {
c.lock.Lock()
defer c.lock.Unlock()
values = make([]V, 0, len(keys))
for _, key := range keys {
value, _ := c.lru.Get(key)
values = append(values, value)
}
return values
}
// Contains checks if a key is in the cache, without updating the
// recent-ness or deleting it for being stale.
func (c *Cache[K, V]) Contains(key K) bool {
c.lock.RLock()
containKey := c.lru.Contains(key)
c.lock.RUnlock()
return containKey
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache[K, V]) Peek(key K) (value V, ok bool) {
c.lock.RLock()
value, ok = c.lru.Peek(key)
c.lock.RUnlock()
return value, ok
}
// MPeek returns multiple key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache[K, V]) MPeek(keys []K) (values []V) {
c.lock.RLock()
defer c.lock.RUnlock()
values = make([]V, 0, len(keys))
for _, key := range keys {
value, _ := c.lru.Peek(key)
values = append(values, value)
}
return values
}
// ContainsOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache[K, V]) ContainsOrAdd(key K, value V) (ok, evicted bool) {
var k K
var v V
c.lock.Lock()
if c.lru.Contains(key) {
c.lock.Unlock()
return true, false
}
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return false, evicted
}
// PeekOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache[K, V]) PeekOrAdd(key K, value V) (previous V, ok, evicted bool) {
var k K
var v V
c.lock.Lock()
previous, ok = c.lru.Peek(key)
if ok {
c.lock.Unlock()
return previous, true, false
}
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return
}
// Remove removes the provided key from the cache.
func (c *Cache[K, V]) Remove(key K) (present bool) {
var k K
var v V
c.lock.Lock()
present = c.lru.Remove(key)
if c.onEvictedCB != nil && present {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && present {
c.onEvictedCB(k, v)
}
return
}
// Resize changes the cache size.
func (c *Cache[K, V]) Resize(size int) (evicted int) {
var ks []K
var vs []V
c.lock.Lock()
evicted = c.lru.Resize(size)
if c.onEvictedCB != nil && evicted > 0 {
ks, vs = c.evictedKeys, c.evictedVals
c.initEvictBuffers()
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted > 0 {
for i := 0; i < len(ks); i++ {
c.onEvictedCB(ks[i], vs[i])
}
}
return evicted
}
// RemoveOldest removes the oldest item from the cache.
func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) {
var k K
var v V
c.lock.Lock()
key, value, ok = c.lru.RemoveOldest()
if c.onEvictedCB != nil && ok {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && ok {
c.onEvictedCB(k, v)
}
return
}
// GetOldest returns the oldest entry
func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) {
c.lock.RLock()
key, value, ok = c.lru.GetOldest()
c.lock.RUnlock()
return
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *Cache[K, V]) Keys() []K {
c.lock.RLock()
keys := c.lru.Keys()
c.lock.RUnlock()
return keys
}
// Len returns the number of items in the cache.
func (c *Cache[K, V]) Len() int {
c.lock.RLock()
length := c.lru.Len()
c.lock.RUnlock()
return length
}

368
pkg/lru/lru_test.go Normal file
View File

@@ -0,0 +1,368 @@
package lru
import (
"crypto/rand"
"math"
"math/big"
"testing"
)
func getRand(tb testing.TB) int64 {
out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
tb.Fatal(err)
}
return out.Int64()
}
func BenchmarkLRU_Rand(b *testing.B) {
l, err := New[int64, int64](8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
trace[i] = getRand(b) % 32768
}
b.ResetTimer()
var hit, miss int
for i := 0; i < 2*b.N; i++ {
if i%2 == 0 {
l.Add(trace[i], trace[i])
} else {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func BenchmarkLRU_Freq(b *testing.B) {
l, err := New[int64, int64](8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
if i%2 == 0 {
trace[i] = getRand(b) % 16384
} else {
trace[i] = getRand(b) % 32768
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
l.Add(trace[i], trace[i])
}
var hit, miss int
for i := 0; i < b.N; i++ {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func TestLRU(t *testing.T) {
evictCounter := 0
onEvicted := func(k int, v int) {
if k != v {
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
}
evictCounter++
}
l, err := NewWithEvict(128, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 256; i++ {
l.Add(i, i)
}
if l.Len() != 128 {
t.Fatalf("bad len: %v", l.Len())
}
if evictCounter != 128 {
t.Fatalf("bad evict count: %v", evictCounter)
}
for i, k := range l.Keys() {
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
t.Fatalf("bad key: %v", k)
}
}
for i := 0; i < 128; i++ {
if _, ok := l.Get(i); ok {
t.Fatalf("should be evicted")
}
}
for i := 128; i < 256; i++ {
if _, ok := l.Get(i); !ok {
t.Fatalf("should not be evicted")
}
}
for i := 128; i < 192; i++ {
l.Remove(i)
if _, ok := l.Get(i); ok {
t.Fatalf("should be deleted")
}
}
l.Get(192) // expect 192 to be last key in l.Keys()
for i, k := range l.Keys() {
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
t.Fatalf("out of order key: %v", k)
}
}
{
// test mget
keys := l.Keys()
values := l.MGet(keys)
for i, v := range values {
if keys[i] != v {
t.Fatalf("[%d]bad value: %v:%v", i, keys[i], v)
}
}
}
{
// test mget with missing keys
keys := append([]int{-1}, l.Keys()...)
values := l.MGet(keys)
if len(values) != len(keys) {
t.Fatalf("bad len: %v, expected: %v", len(values), len(keys))
}
if values[0] != 0 {
t.Fatalf("bad value: %v, expected: %v", values[0], 0)
}
for i := 1; i < len(values); i++ {
if keys[i] != values[i] {
t.Fatalf("[%d]bad value: %v:%v", i, keys[i], values[i])
}
}
}
l.Purge()
if l.Len() != 0 {
t.Fatalf("bad len: %v", l.Len())
}
if _, ok := l.Get(200); ok {
t.Fatalf("should contain nothing")
}
}
// test that Add returns true/false if an eviction occurred
func TestLRUAdd(t *testing.T) {
evictCounter := 0
onEvicted := func(k int, v int) {
evictCounter++
}
l, err := NewWithEvict(1, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
if l.Add(1, 1) == true || evictCounter != 0 {
t.Errorf("should not have an eviction")
}
if l.Add(2, 2) == false || evictCounter != 1 {
t.Errorf("should have an eviction")
}
}
// test that Contains doesn't update recent-ness
func TestLRUContains(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if !l.Contains(1) {
t.Errorf("1 should be contained")
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Contains should not have updated recent-ness of 1")
}
}
// test that ContainsOrAdd doesn't update recent-ness
func TestLRUContainsOrAdd(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
contains, evict := l.ContainsOrAdd(1, 1)
if !contains {
t.Errorf("1 should be contained")
}
if evict {
t.Errorf("nothing should be evicted here")
}
l.Add(3, 3)
contains, evict = l.ContainsOrAdd(1, 1)
if contains {
t.Errorf("1 should not have been contained")
}
if !evict {
t.Errorf("an eviction should have occurred")
}
if !l.Contains(1) {
t.Errorf("now 1 should be contained")
}
}
// test that PeekOrAdd doesn't update recent-ness
func TestLRUPeekOrAdd(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
previous, contains, evict := l.PeekOrAdd(1, 1)
if !contains {
t.Errorf("1 should be contained")
}
if evict {
t.Errorf("nothing should be evicted here")
}
if previous != 1 {
t.Errorf("previous is not equal to 1")
}
l.Add(3, 3)
contains, evict = l.ContainsOrAdd(1, 1)
if contains {
t.Errorf("1 should not have been contained")
}
if !evict {
t.Errorf("an eviction should have occurred")
}
if !l.Contains(1) {
t.Errorf("now 1 should be contained")
}
}
// test that Peek doesn't update recent-ness
func TestLRUPeek(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if v, ok := l.Peek(1); !ok || v != 1 {
t.Errorf("1 should be set to 1: %v, %v", v, ok)
}
vals := l.MPeek([]int{-1, 1, 2})
if len(vals) != 3 {
t.Errorf("bad len: %v", len(vals))
}
if vals[0] != 0 {
t.Errorf("bad value: %v, expected: %v", vals[0], 0)
}
if vals[1] != 1 || vals[2] != 2 {
t.Errorf("bad vals: %v", vals)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("should not have updated recent-ness of 1")
}
}
// test that Resize can upsize and downsize
func TestLRUResize(t *testing.T) {
onEvictCounter := 0
onEvicted := func(k int, v int) {
onEvictCounter++
}
l, err := NewWithEvict(2, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
// Downsize
l.Add(1, 1)
l.Add(2, 2)
evicted := l.Resize(1)
if evicted != 1 {
t.Errorf("1 element should have been evicted: %v", evicted)
}
if onEvictCounter != 1 {
t.Errorf("onEvicted should have been called 1 time: %v", onEvictCounter)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Element 1 should have been evicted")
}
// Upsize
evicted = l.Resize(2)
if evicted != 0 {
t.Errorf("0 elements should have been evicted: %v", evicted)
}
l.Add(4, 4)
if !l.Contains(3) || !l.Contains(4) {
t.Errorf("Cache should have contained 2 elements")
}
}
func TestKeysAndMPeek(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
if vals := l.MPeek(l.Keys()); len(vals) != 0 {
t.Errorf("bad len: %v", len(vals))
}
l.Add(1, 1)
l.Add(2, 2)
keys := l.Keys()
if len(keys) != 2 {
t.Errorf("bad len: %v", len(keys))
}
if keys[0] != 1 || keys[1] != 2 {
t.Errorf("bad keys: %v", keys)
}
vals := l.MPeek([]int{-1, 1, 2})
if len(vals) != 3 {
t.Errorf("bad len: %v", len(vals))
}
if vals[0] != 0 {
t.Errorf("bad value: %v, expected: %v", vals[0], 0)
}
if vals[1] != 1 || vals[2] != 2 {
t.Errorf("bad vals: %v", vals)
}
}

Some files were not shown because too many files have changed in this diff Show More