mirror of
https://github.com/alexgo-io/gaze-indexer.git
synced 2026-01-12 22:43:22 +08:00
Compare commits
58 Commits
develop
...
v0.3.0.brc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c016f36c1 | ||
|
|
980163900c | ||
|
|
8110434e18 | ||
|
|
3603248485 | ||
|
|
2c5a6076ff | ||
|
|
05d7fecf69 | ||
|
|
49eff4f3ba | ||
|
|
e4d41cc7a4 | ||
|
|
b45dfd066a | ||
|
|
2ae5b0835d | ||
|
|
132dcde715 | ||
|
|
4228730a34 | ||
|
|
f3ff5ecb7d | ||
|
|
99bdf49f02 | ||
|
|
806d27fb46 | ||
|
|
7453abec99 | ||
|
|
0d075c31f8 | ||
|
|
605ea63167 | ||
|
|
3fa0a7d975 | ||
|
|
14142096af | ||
|
|
2bb1bad449 | ||
|
|
ccdc4c56ff | ||
|
|
f3c6180c17 | ||
|
|
ce11033919 | ||
|
|
033dbf7324 | ||
|
|
38c37189fc | ||
|
|
6d1db50890 | ||
|
|
0a3800c68a | ||
|
|
cab7d0448e | ||
|
|
7c555fe373 | ||
|
|
a082a35bb6 | ||
|
|
180ea17284 | ||
|
|
fc48ba50b8 | ||
|
|
0305762166 | ||
|
|
83b22eb883 | ||
|
|
45f106995a | ||
|
|
d6c3f90d8c | ||
|
|
ef575dea85 | ||
|
|
d70accc80e | ||
|
|
d8385125c3 | ||
|
|
dc44e4bb5c | ||
|
|
f0cb5d651b | ||
|
|
4ae169218f | ||
|
|
c41da88808 | ||
|
|
94b228dd75 | ||
|
|
518a07e920 | ||
|
|
6512f18f94 | ||
|
|
27161f827d | ||
|
|
db209f68ad | ||
|
|
bb03d439f5 | ||
|
|
71641dd2fb | ||
|
|
3bb26d012b | ||
|
|
bb3c24b472 | ||
|
|
eb65b8dcbc | ||
|
|
b3363c7983 | ||
|
|
c8e03e8056 | ||
|
|
131afac8c2 | ||
|
|
2befd8b124 |
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -39,7 +39,7 @@
|
||||
"ui.completion.usePlaceholders": false,
|
||||
"ui.diagnostic.analyses": {
|
||||
// https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md
|
||||
// "fieldalignment": false,
|
||||
"fieldalignment": false,
|
||||
"nilness": true,
|
||||
"shadow": false,
|
||||
"unusedparams": true,
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/indexer"
|
||||
"github.com/gaze-network/indexer-network/internal/config"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20"
|
||||
"github.com/gaze-network/indexer-network/modules/runes"
|
||||
"github.com/gaze-network/indexer-network/pkg/automaxprocs"
|
||||
"github.com/gaze-network/indexer-network/pkg/errorhandler"
|
||||
@@ -34,6 +35,7 @@ import (
|
||||
// Register Modules
|
||||
var Modules = do.Package(
|
||||
do.LazyNamed("runes", runes.New),
|
||||
do.LazyNamed("brc20", brc20.New),
|
||||
)
|
||||
|
||||
func NewRunCommand() *cobra.Command {
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
type migrateDownCmdOptions struct {
|
||||
DatabaseURL string
|
||||
Runes bool
|
||||
Modules string
|
||||
All bool
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ func NewMigrateDownCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes down migrations")
|
||||
flags.StringVar(&opts.Modules, "modules", "", "Modules to apply up migrations")
|
||||
flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on")
|
||||
flags.BoolVar(&opts.All, "all", false, "Confirm apply ALL down migrations without prompt")
|
||||
|
||||
@@ -87,6 +87,8 @@ func migrateDownHandler(opts *migrateDownCmdOptions, _ *cobra.Command, args migr
|
||||
}
|
||||
}
|
||||
|
||||
modules := strings.Split(opts.Modules, ",")
|
||||
|
||||
applyDownMigrations := func(module string, sourcePath string, migrationTable string) error {
|
||||
newDatabaseURL := cloneURLWithQuery(databaseURL, url.Values{"x-migrations-table": {migrationTable}})
|
||||
sourceURL := "file://" + sourcePath
|
||||
@@ -116,10 +118,15 @@ func migrateDownHandler(opts *migrateDownCmdOptions, _ *cobra.Command, args migr
|
||||
return nil
|
||||
}
|
||||
|
||||
if opts.Runes {
|
||||
if lo.Contains(modules, "runes") {
|
||||
if err := applyDownMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
if lo.Contains(modules, "brc20") {
|
||||
if err := applyDownMigrations("BRC20", brc20MigrationSource, "brc20_schema_migrations"); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -11,12 +11,13 @@ import (
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
_ "github.com/golang-migrate/migrate/v4/database/postgres"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
"github.com/samber/lo"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type migrateUpCmdOptions struct {
|
||||
DatabaseURL string
|
||||
Runes bool
|
||||
Modules string
|
||||
}
|
||||
|
||||
type migrateUpCmdArgs struct {
|
||||
@@ -54,7 +55,7 @@ func NewMigrateUpCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes up migrations")
|
||||
flags.StringVar(&opts.Modules, "modules", "", "Modules to apply up migrations")
|
||||
flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on")
|
||||
|
||||
return cmd
|
||||
@@ -72,6 +73,8 @@ func migrateUpHandler(opts *migrateUpCmdOptions, _ *cobra.Command, args migrateU
|
||||
return errors.Errorf("unsupported database driver: %s", databaseURL.Scheme)
|
||||
}
|
||||
|
||||
modules := strings.Split(opts.Modules, ",")
|
||||
|
||||
applyUpMigrations := func(module string, sourcePath string, migrationTable string) error {
|
||||
newDatabaseURL := cloneURLWithQuery(databaseURL, url.Values{"x-migrations-table": {migrationTable}})
|
||||
sourceURL := "file://" + sourcePath
|
||||
@@ -101,10 +104,15 @@ func migrateUpHandler(opts *migrateUpCmdOptions, _ *cobra.Command, args migrateU
|
||||
return nil
|
||||
}
|
||||
|
||||
if opts.Runes {
|
||||
if lo.Contains(modules, "runes") {
|
||||
if err := applyUpMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
if lo.Contains(modules, "brc20") {
|
||||
if err := applyUpMigrations("BRC20", brc20MigrationSource, "brc20_schema_migrations"); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import "net/url"
|
||||
|
||||
const (
|
||||
runesMigrationSource = "modules/runes/database/postgresql/migrations"
|
||||
brc20MigrationSource = "modules/brc20/database/postgresql/migrations"
|
||||
)
|
||||
|
||||
func cloneURLWithQuery(u *url.URL, newQuery url.Values) *url.URL {
|
||||
|
||||
@@ -292,3 +292,19 @@ func (d *BitcoinNodeDatasource) GetBlockHeader(ctx context.Context, height int64
|
||||
|
||||
return types.ParseMsgBlockHeader(*block, height), nil
|
||||
}
|
||||
|
||||
// GetTransaction fetch transaction from Bitcoin node
|
||||
func (d *BitcoinNodeDatasource) GetTransactionOutputs(ctx context.Context, txHash chainhash.Hash) ([]*types.TxOut, error) {
|
||||
rawTx, err := d.btcclient.GetRawTransaction(&txHash)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get raw transaction")
|
||||
}
|
||||
|
||||
msgTx := rawTx.MsgTx()
|
||||
txOuts := make([]*types.TxOut, 0, len(msgTx.TxOut))
|
||||
for _, txOut := range msgTx.TxOut {
|
||||
txOuts = append(txOuts, types.ParseTxOut(txOut))
|
||||
}
|
||||
|
||||
return txOuts, nil
|
||||
}
|
||||
|
||||
1
go.mod
1
go.mod
@@ -11,6 +11,7 @@ require (
|
||||
github.com/gaze-network/uint128 v1.3.0
|
||||
github.com/gofiber/fiber/v2 v2.52.4
|
||||
github.com/golang-migrate/migrate/v4 v4.17.1
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||
github.com/jackc/pgx/v5 v5.5.5
|
||||
github.com/mcosta74/pgx-slog v0.3.0
|
||||
github.com/planxnx/concurrent-stream v0.1.5
|
||||
|
||||
4
go.sum
4
go.sum
@@ -92,6 +92,7 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -107,6 +108,8 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@@ -222,6 +225,7 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
brc20config "github.com/gaze-network/indexer-network/modules/brc20/config"
|
||||
runesconfig "github.com/gaze-network/indexer-network/modules/runes/config"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
|
||||
@@ -60,6 +61,7 @@ type BitcoinNodeClient struct {
|
||||
|
||||
type Modules struct {
|
||||
Runes runesconfig.Config `mapstructure:"runes"`
|
||||
BRC20 brc20config.Config `mapstructure:"brc20"`
|
||||
}
|
||||
|
||||
type HTTPServerConfig struct {
|
||||
|
||||
71
modules/brc20/brc20.go
Normal file
71
modules/brc20/brc20.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package brc20
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/btcsuite/btcd/rpcclient"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/datasources"
|
||||
"github.com/gaze-network/indexer-network/core/indexer"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/internal/config"
|
||||
"github.com/gaze-network/indexer-network/internal/postgres"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
|
||||
brc20postgres "github.com/gaze-network/indexer-network/modules/brc20/internal/repository/postgres"
|
||||
"github.com/gaze-network/indexer-network/pkg/btcclient"
|
||||
"github.com/samber/do/v2"
|
||||
)
|
||||
|
||||
func New(injector do.Injector) (indexer.IndexerWorker, error) {
|
||||
ctx := do.MustInvoke[context.Context](injector)
|
||||
conf := do.MustInvoke[config.Config](injector)
|
||||
// reportingClient := do.MustInvoke[*reportingclient.ReportingClient](injector)
|
||||
|
||||
cleanupFuncs := make([]func(context.Context) error, 0)
|
||||
var brc20Dg datagateway.BRC20DataGateway
|
||||
var indexerInfoDg datagateway.IndexerInfoDataGateway
|
||||
switch strings.ToLower(conf.Modules.BRC20.Database) {
|
||||
case "postgresql", "postgres", "pg":
|
||||
pg, err := postgres.NewPool(ctx, conf.Modules.BRC20.Postgres)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.InvalidArgument) {
|
||||
return nil, errors.Wrap(err, "Invalid Postgres configuration for indexer")
|
||||
}
|
||||
return nil, errors.Wrap(err, "can't create Postgres connection pool")
|
||||
}
|
||||
cleanupFuncs = append(cleanupFuncs, func(ctx context.Context) error {
|
||||
pg.Close()
|
||||
return nil
|
||||
})
|
||||
brc20Repo := brc20postgres.NewRepository(pg)
|
||||
brc20Dg = brc20Repo
|
||||
indexerInfoDg = brc20Repo
|
||||
default:
|
||||
return nil, errors.Wrapf(errs.Unsupported, "%q database for indexer is not supported", conf.Modules.BRC20.Database)
|
||||
}
|
||||
|
||||
var bitcoinDatasource datasources.Datasource[*types.Block]
|
||||
var bitcoinClient btcclient.Contract
|
||||
switch strings.ToLower(conf.Modules.BRC20.Datasource) {
|
||||
case "bitcoin-node":
|
||||
btcClient := do.MustInvoke[*rpcclient.Client](injector)
|
||||
bitcoinNodeDatasource := datasources.NewBitcoinNode(btcClient)
|
||||
bitcoinDatasource = bitcoinNodeDatasource
|
||||
bitcoinClient = bitcoinNodeDatasource
|
||||
default:
|
||||
return nil, errors.Wrapf(errs.Unsupported, "%q datasource is not supported", conf.Modules.BRC20.Datasource)
|
||||
}
|
||||
|
||||
processor, err := NewProcessor(brc20Dg, indexerInfoDg, bitcoinClient, conf.Network, cleanupFuncs)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
if err := processor.VerifyStates(ctx); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
indexer := indexer.New(processor, bitcoinDatasource)
|
||||
return indexer, nil
|
||||
}
|
||||
10
modules/brc20/config/config.go
Normal file
10
modules/brc20/config/config.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package config
|
||||
|
||||
import "github.com/gaze-network/indexer-network/internal/postgres"
|
||||
|
||||
type Config struct {
|
||||
Datasource string `mapstructure:"datasource"` // Datasource to fetch bitcoin data for Meta-Protocol e.g. `bitcoin-node`
|
||||
Database string `mapstructure:"database"` // Database to store data.
|
||||
APIHandlers []string `mapstructure:"api_handlers"` // List of API handlers to enable. (e.g. `http`)
|
||||
Postgres postgres.Config `mapstructure:"postgres"`
|
||||
}
|
||||
25
modules/brc20/constants.go
Normal file
25
modules/brc20/constants.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package brc20
|
||||
|
||||
import (
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
)
|
||||
|
||||
const (
|
||||
ClientVersion = "v0.0.1"
|
||||
DBVersion = 1
|
||||
EventHashVersion = 1
|
||||
)
|
||||
|
||||
var startingBlockHeader = map[common.Network]types.BlockHeader{
|
||||
common.NetworkMainnet: {
|
||||
Height: 767429,
|
||||
Hash: *utils.Must(chainhash.NewHashFromStr("00000000000000000002b35aef66eb15cd2b232a800f75a2f25cedca4cfe52c4")),
|
||||
},
|
||||
common.NetworkTestnet: {
|
||||
Height: 2413342,
|
||||
Hash: *utils.Must(chainhash.NewHashFromStr("00000000000022e97030b143af785de812f836dd0651b6ac2b7dd9e90dc9abf9")),
|
||||
},
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
BEGIN;
|
||||
|
||||
DROP TABLE IF EXISTS "brc20_indexer_states";
|
||||
DROP TABLE IF EXISTS "brc20_indexed_blocks";
|
||||
DROP TABLE IF EXISTS "brc20_processor_stats";
|
||||
DROP TABLE IF EXISTS "brc20_tick_entries";
|
||||
DROP TABLE IF EXISTS "brc20_tick_entry_states";
|
||||
DROP TABLE IF EXISTS "brc20_event_deploys";
|
||||
DROP TABLE IF EXISTS "brc20_event_mints";
|
||||
DROP TABLE IF EXISTS "brc20_event_inscribe_transfers";
|
||||
DROP TABLE IF EXISTS "brc20_event_transfer_transfers";
|
||||
DROP TABLE IF EXISTS "brc20_balances";
|
||||
DROP TABLE IF EXISTS "brc20_inscription_entries";
|
||||
DROP TABLE IF EXISTS "brc20_inscription_entry_states";
|
||||
DROP TABLE IF EXISTS "brc20_inscription_transfers";
|
||||
|
||||
COMMIT;
|
||||
@@ -0,0 +1,189 @@
|
||||
BEGIN;
|
||||
|
||||
-- Indexer Client Information
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "brc20_indexer_states" (
|
||||
"id" BIGSERIAL PRIMARY KEY,
|
||||
"client_version" TEXT NOT NULL,
|
||||
"network" TEXT NOT NULL,
|
||||
"db_version" INT NOT NULL,
|
||||
"event_hash_version" INT NOT NULL,
|
||||
"created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS brc20_indexer_state_created_at_idx ON "brc20_indexer_states" USING BTREE ("created_at" DESC);
|
||||
|
||||
-- BRC20 data
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "brc20_indexed_blocks" (
|
||||
"height" INT NOT NULL PRIMARY KEY,
|
||||
"hash" TEXT NOT NULL,
|
||||
"event_hash" TEXT NOT NULL,
|
||||
"cumulative_event_hash" TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "brc20_processor_stats" (
|
||||
"block_height" INT NOT NULL PRIMARY KEY,
|
||||
"cursed_inscription_count" INT NOT NULL,
|
||||
"blessed_inscription_count" INT NOT NULL,
|
||||
"lost_sats" BIGINT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "brc20_tick_entries" (
|
||||
"tick" TEXT NOT NULL PRIMARY KEY, -- lowercase of original_tick
|
||||
"original_tick" TEXT NOT NULL,
|
||||
"total_supply" DECIMAL NOT NULL,
|
||||
"decimals" SMALLINT NOT NULL,
|
||||
"limit_per_mint" DECIMAL NOT NULL,
|
||||
"is_self_mint" BOOLEAN NOT NULL,
|
||||
"deploy_inscription_id" TEXT NOT NULL,
|
||||
"deployed_at" TIMESTAMP NOT NULL,
|
||||
"deployed_at_height" INT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "brc20_tick_entry_states" (
|
||||
"tick" TEXT NOT NULL,
|
||||
"block_height" INT NOT NULL,
|
||||
"minted_amount" DECIMAL NOT NULL,
|
||||
"burned_amount" DECIMAL NOT NULL,
|
||||
"completed_at" TIMESTAMP,
|
||||
"completed_at_height" INT,
|
||||
PRIMARY KEY ("tick", "block_height")
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "brc20_event_deploys" (
|
||||
"id" BIGINT PRIMARY KEY NOT NULL,
|
||||
"inscription_id" TEXT NOT NULL,
|
||||
"inscription_number" BIGINT NOT NULL,
|
||||
"tick" TEXT NOT NULL, -- lowercase of original_tick
|
||||
"original_tick" TEXT NOT NULL,
|
||||
"tx_hash" TEXT NOT NULL,
|
||||
"block_height" INT NOT NULL,
|
||||
"tx_index" INT NOT NULL,
|
||||
"timestamp" TIMESTAMP NOT NULL,
|
||||
|
||||
"pkscript" TEXT NOT NULL,
|
||||
"satpoint" TEXT NOT NULL,
|
||||
"total_supply" DECIMAL NOT NULL,
|
||||
"decimals" SMALLINT NOT NULL,
|
||||
"limit_per_mint" DECIMAL NOT NULL,
|
||||
"is_self_mint" BOOLEAN NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS brc20_event_deploys_block_height_idx ON "brc20_event_deploys" USING BTREE ("block_height");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "brc20_event_mints" (
|
||||
"id" BIGINT PRIMARY KEY NOT NULL,
|
||||
"inscription_id" TEXT NOT NULL,
|
||||
"inscription_number" BIGINT NOT NULL,
|
||||
"tick" TEXT NOT NULL, -- lowercase of original_tick
|
||||
"original_tick" TEXT NOT NULL,
|
||||
"tx_hash" TEXT NOT NULL,
|
||||
"block_height" INT NOT NULL,
|
||||
"tx_index" INT NOT NULL,
|
||||
"timestamp" TIMESTAMP NOT NULL,
|
||||
|
||||
"pkscript" TEXT NOT NULL,
|
||||
"satpoint" TEXT NOT NULL,
|
||||
"amount" DECIMAL NOT NULL,
|
||||
"parent_id" TEXT -- requires parent deploy inscription id if minting a self-mint ticker
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS brc20_event_mints_block_height_idx ON "brc20_event_mints" USING BTREE ("block_height");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "brc20_event_inscribe_transfers" (
|
||||
"id" BIGINT PRIMARY KEY NOT NULL,
|
||||
"inscription_id" TEXT NOT NULL,
|
||||
"inscription_number" BIGINT NOT NULL,
|
||||
"tick" TEXT NOT NULL, -- lowercase of original_tick
|
||||
"original_tick" TEXT NOT NULL,
|
||||
"tx_hash" TEXT NOT NULL,
|
||||
"block_height" INT NOT NULL,
|
||||
"tx_index" INT NOT NULL,
|
||||
"timestamp" TIMESTAMP NOT NULL,
|
||||
|
||||
"pkscript" TEXT NOT NULL,
|
||||
"satpoint" TEXT NOT NULL,
|
||||
"output_index" INT NOT NULL,
|
||||
"sats_amount" BIGINT NOT NULL,
|
||||
"amount" DECIMAL NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS brc20_event_inscribe_transfers_block_height_idx ON "brc20_event_inscribe_transfers" USING BTREE ("block_height");
|
||||
CREATE INDEX IF NOT EXISTS brc20_event_inscribe_transfers_inscription_id_idx ON "brc20_event_inscribe_transfers" USING BTREE ("inscription_id"); -- used for validating transfer transfer events
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "brc20_event_transfer_transfers" (
|
||||
"id" BIGINT PRIMARY KEY NOT NULL,
|
||||
"inscription_id" TEXT NOT NULL,
|
||||
"inscription_number" BIGINT NOT NULL,
|
||||
"tick" TEXT NOT NULL, -- lowercase of original_tick
|
||||
"original_tick" TEXT NOT NULL,
|
||||
"tx_hash" TEXT NOT NULL,
|
||||
"block_height" INT NOT NULL,
|
||||
"tx_index" INT NOT NULL,
|
||||
"timestamp" TIMESTAMP NOT NULL,
|
||||
|
||||
"from_pkscript" TEXT NOT NULL,
|
||||
"from_satpoint" TEXT NOT NULL,
|
||||
"from_input_index" INT NOT NULL,
|
||||
"to_pkscript" TEXT NOT NULL,
|
||||
"to_satpoint" TEXT NOT NULL,
|
||||
"to_output_index" INT NOT NULL,
|
||||
"spent_as_fee" BOOLEAN NOT NULL,
|
||||
"amount" DECIMAL NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS brc20_event_transfer_transfers_block_height_idx ON "brc20_event_transfer_transfers" USING BTREE ("block_height");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "brc20_balances" (
|
||||
"pkscript" TEXT NOT NULL,
|
||||
"block_height" INT NOT NULL,
|
||||
"tick" TEXT NOT NULL,
|
||||
"overall_balance" DECIMAL NOT NULL,
|
||||
"available_balance" DECIMAL NOT NULL,
|
||||
PRIMARY KEY ("pkscript", "tick", "block_height")
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "brc20_inscription_entries" (
|
||||
"id" TEXT NOT NULL PRIMARY KEY,
|
||||
"number" BIGINT NOT NULL,
|
||||
"sequence_number" BIGINT NOT NULL,
|
||||
"delegate" TEXT, -- delegate inscription id
|
||||
"metadata" BYTEA,
|
||||
"metaprotocol" TEXT,
|
||||
"parents" TEXT[], -- parent inscription id, 0.14 only supports 1 parent per inscription
|
||||
"pointer" BIGINT,
|
||||
"content" JSONB, -- can use jsonb because we only track brc20 inscriptions
|
||||
"content_encoding" TEXT,
|
||||
"content_type" TEXT,
|
||||
"cursed" BOOLEAN NOT NULL, -- inscriptions after jubilee are no longer cursed in 0.14, which affects inscription number
|
||||
"cursed_for_brc20" BOOLEAN NOT NULL, -- however, inscriptions that would normally be cursed are still considered cursed for brc20
|
||||
"created_at" TIMESTAMP NOT NULL,
|
||||
"created_at_height" INT NOT NULL
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS brc20_inscription_entries_id_number_idx ON "brc20_inscription_entries" USING BTREE ("id", "number");
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "brc20_inscription_entry_states" (
|
||||
"id" TEXT NOT NULL,
|
||||
"block_height" INT NOT NULL,
|
||||
"transfer_count" INT NOT NULL,
|
||||
PRIMARY KEY ("id", "block_height")
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "brc20_inscription_transfers" (
|
||||
"inscription_id" TEXT NOT NULL,
|
||||
"block_height" INT NOT NULL,
|
||||
"tx_index" INT NOT NULL,
|
||||
"tx_hash" TEXT NOT NULL,
|
||||
"from_input_index" INT NOT NULL,
|
||||
"old_satpoint_tx_hash" TEXT,
|
||||
"old_satpoint_out_idx" INT,
|
||||
"old_satpoint_offset" BIGINT,
|
||||
"new_satpoint_tx_hash" TEXT,
|
||||
"new_satpoint_out_idx" INT,
|
||||
"new_satpoint_offset" BIGINT,
|
||||
"new_pkscript" TEXT NOT NULL,
|
||||
"new_output_value" BIGINT NOT NULL,
|
||||
"sent_as_fee" BOOLEAN NOT NULL,
|
||||
"transfer_count" INT NOT NULL,
|
||||
PRIMARY KEY ("inscription_id", "block_height", "tx_index")
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS brc20_inscription_transfers_block_height_tx_index_idx ON "brc20_inscription_transfers" USING BTREE ("block_height", "tx_index");
|
||||
CREATE INDEX IF NOT EXISTS brc20_inscription_transfers_new_satpoint_idx ON "brc20_inscription_transfers" USING BTREE ("new_satpoint_tx_hash", "new_satpoint_out_idx", "new_satpoint_offset");
|
||||
|
||||
COMMIT;
|
||||
145
modules/brc20/database/postgresql/queries/data.sql
Normal file
145
modules/brc20/database/postgresql/queries/data.sql
Normal file
@@ -0,0 +1,145 @@
|
||||
-- name: GetLatestIndexedBlock :one
|
||||
SELECT * FROM "brc20_indexed_blocks" ORDER BY "height" DESC LIMIT 1;
|
||||
|
||||
-- name: GetIndexedBlockByHeight :one
|
||||
SELECT * FROM "brc20_indexed_blocks" WHERE "height" = $1;
|
||||
|
||||
-- name: GetLatestProcessorStats :one
|
||||
SELECT * FROM "brc20_processor_stats" ORDER BY "block_height" DESC LIMIT 1;
|
||||
|
||||
-- name: GetInscriptionTransfersInOutPoints :many
|
||||
SELECT "it".*, "ie"."content" FROM (
|
||||
SELECT
|
||||
unnest(@tx_hash_arr::text[]) AS "tx_hash",
|
||||
unnest(@tx_out_idx_arr::int[]) AS "tx_out_idx"
|
||||
) "inputs"
|
||||
INNER JOIN "brc20_inscription_transfers" it ON "inputs"."tx_hash" = "it"."new_satpoint_tx_hash" AND "inputs"."tx_out_idx" = "it"."new_satpoint_out_idx"
|
||||
LEFT JOIN "brc20_inscription_entries" ie ON "it"."inscription_id" = "ie"."id";
|
||||
;
|
||||
|
||||
-- name: GetInscriptionEntriesByIds :many
|
||||
WITH "states" AS (
|
||||
-- select latest state
|
||||
SELECT DISTINCT ON ("id") * FROM "brc20_inscription_entry_states" WHERE "id" = ANY(@inscription_ids::text[]) ORDER BY "id", "block_height" DESC
|
||||
)
|
||||
SELECT * FROM "brc20_inscription_entries"
|
||||
LEFT JOIN "states" ON "brc20_inscription_entries"."id" = "states"."id"
|
||||
WHERE "brc20_inscription_entries"."id" = ANY(@inscription_ids::text[]);
|
||||
|
||||
-- name: GetTickEntriesByTicks :many
|
||||
WITH "states" AS (
|
||||
-- select latest state
|
||||
SELECT DISTINCT ON ("tick") * FROM "brc20_tick_entry_states" WHERE "tick" = ANY(@ticks::text[]) ORDER BY "tick", "block_height" DESC
|
||||
)
|
||||
SELECT * FROM "brc20_tick_entries"
|
||||
LEFT JOIN "states" ON "brc20_tick_entries"."tick" = "states"."tick"
|
||||
WHERE "brc20_tick_entries"."tick" = ANY(@ticks::text[]);
|
||||
|
||||
-- name: GetInscriptionNumbersByIds :many
|
||||
SELECT id, number FROM "brc20_inscription_entries" WHERE "id" = ANY(@inscription_ids::text[]);
|
||||
|
||||
-- name: GetInscriptionParentsByIds :many
|
||||
SELECT id, parents FROM "brc20_inscription_entries" WHERE "id" = ANY(@inscription_ids::text[]);
|
||||
|
||||
-- name: GetLatestEventIds :one
|
||||
WITH "latest_deploy_id" AS (
|
||||
SELECT "id" FROM "brc20_event_deploys" ORDER BY "id" DESC LIMIT 1
|
||||
),
|
||||
"latest_mint_id" AS (
|
||||
SELECT "id" FROM "brc20_event_mints" ORDER BY "id" DESC LIMIT 1
|
||||
),
|
||||
"latest_inscribe_transfer_id" AS (
|
||||
SELECT "id" FROM "brc20_event_inscribe_transfers" ORDER BY "id" DESC LIMIT 1
|
||||
),
|
||||
"latest_transfer_transfer_id" AS (
|
||||
SELECT "id" FROM "brc20_event_transfer_transfers" ORDER BY "id" DESC LIMIT 1
|
||||
)
|
||||
SELECT
|
||||
COALESCE((SELECT "id" FROM "latest_deploy_id"), -1) AS "event_deploy_id",
|
||||
COALESCE((SELECT "id" FROM "latest_mint_id"), -1) AS "event_mint_id",
|
||||
COALESCE((SELECT "id" FROM "latest_inscribe_transfer_id"), -1) AS "event_inscribe_transfer_id",
|
||||
COALESCE((SELECT "id" FROM "latest_transfer_transfer_id"), -1) AS "event_transfer_transfer_id";
|
||||
|
||||
-- name: GetBalancesBatchAtHeight :many
|
||||
SELECT DISTINCT ON ("brc20_balances"."pkscript", "brc20_balances"."tick") "brc20_balances".* FROM "brc20_balances"
|
||||
INNER JOIN (
|
||||
SELECT
|
||||
unnest(@pkscript_arr::text[]) AS "pkscript",
|
||||
unnest(@tick_arr::text[]) AS "tick"
|
||||
) "queries" ON "brc20_balances"."pkscript" = "queries"."pkscript" AND "brc20_balances"."tick" = "queries"."tick" AND "brc20_balances"."block_height" <= @block_height
|
||||
ORDER BY "brc20_balances"."pkscript", "brc20_balances"."tick", "block_height" DESC;
|
||||
|
||||
-- name: GetEventInscribeTransfersByInscriptionIds :many
|
||||
SELECT * FROM "brc20_event_inscribe_transfers" WHERE "inscription_id" = ANY(@inscription_ids::text[]);
|
||||
|
||||
-- name: CreateIndexedBlock :exec
|
||||
INSERT INTO "brc20_indexed_blocks" ("height", "hash", "event_hash", "cumulative_event_hash") VALUES ($1, $2, $3, $4);
|
||||
|
||||
-- name: CreateProcessorStats :exec
|
||||
INSERT INTO "brc20_processor_stats" ("block_height", "cursed_inscription_count", "blessed_inscription_count", "lost_sats") VALUES ($1, $2, $3, $4);
|
||||
|
||||
-- name: CreateTickEntries :batchexec
|
||||
INSERT INTO "brc20_tick_entries" ("tick", "original_tick", "total_supply", "decimals", "limit_per_mint", "is_self_mint", "deploy_inscription_id", "deployed_at", "deployed_at_height") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9);
|
||||
|
||||
-- name: CreateTickEntryStates :batchexec
|
||||
INSERT INTO "brc20_tick_entry_states" ("tick", "block_height", "minted_amount", "burned_amount", "completed_at", "completed_at_height") VALUES ($1, $2, $3, $4, $5, $6);
|
||||
|
||||
-- name: CreateInscriptionEntries :batchexec
|
||||
INSERT INTO "brc20_inscription_entries" ("id", "number", "sequence_number", "delegate", "metadata", "metaprotocol", "parents", "pointer", "content", "content_encoding", "content_type", "cursed", "cursed_for_brc20", "created_at", "created_at_height") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15);
|
||||
|
||||
-- name: CreateInscriptionEntryStates :batchexec
|
||||
INSERT INTO "brc20_inscription_entry_states" ("id", "block_height", "transfer_count") VALUES ($1, $2, $3);
|
||||
|
||||
-- name: CreateInscriptionTransfers :batchexec
|
||||
INSERT INTO "brc20_inscription_transfers" ("inscription_id", "block_height", "tx_index", "tx_hash", "from_input_index", "old_satpoint_tx_hash", "old_satpoint_out_idx", "old_satpoint_offset", "new_satpoint_tx_hash", "new_satpoint_out_idx", "new_satpoint_offset", "new_pkscript", "new_output_value", "sent_as_fee", "transfer_count") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15);
|
||||
|
||||
-- name: CreateEventDeploys :batchexec
|
||||
INSERT INTO "brc20_event_deploys" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "total_supply", "decimals", "limit_per_mint", "is_self_mint") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14);
|
||||
|
||||
-- name: CreateEventMints :batchexec
|
||||
INSERT INTO "brc20_event_mints" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "amount", "parent_id") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12);
|
||||
|
||||
-- name: CreateEventInscribeTransfers :batchexec
|
||||
INSERT INTO "brc20_event_inscribe_transfers" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "output_index", "sats_amount", "amount") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13);
|
||||
|
||||
-- name: CreateEventTransferTransfers :batchexec
|
||||
INSERT INTO "brc20_event_transfer_transfers" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "from_pkscript", "from_satpoint", "from_input_index", "to_pkscript", "to_satpoint", "to_output_index", "spent_as_fee", "amount") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16);
|
||||
|
||||
-- name: CreateBalances :batchexec
|
||||
INSERT INTO "brc20_balances" ("pkscript", "block_height", "tick", "overall_balance", "available_balance") VALUES ($1, $2, $3, $4, $5);
|
||||
|
||||
-- name: DeleteIndexedBlocksSinceHeight :exec
|
||||
DELETE FROM "brc20_indexed_blocks" WHERE "height" >= $1;
|
||||
|
||||
-- name: DeleteProcessorStatsSinceHeight :exec
|
||||
DELETE FROM "brc20_processor_stats" WHERE "block_height" >= $1;
|
||||
|
||||
-- name: DeleteTickEntriesSinceHeight :exec
|
||||
DELETE FROM "brc20_tick_entries" WHERE "deployed_at_height" >= $1;
|
||||
|
||||
-- name: DeleteTickEntryStatesSinceHeight :exec
|
||||
DELETE FROM "brc20_tick_entry_states" WHERE "block_height" >= $1;
|
||||
|
||||
-- name: DeleteEventDeploysSinceHeight :exec
|
||||
DELETE FROM "brc20_event_deploys" WHERE "block_height" >= $1;
|
||||
|
||||
-- name: DeleteEventMintsSinceHeight :exec
|
||||
DELETE FROM "brc20_event_mints" WHERE "block_height" >= $1;
|
||||
|
||||
-- name: DeleteEventInscribeTransfersSinceHeight :exec
|
||||
DELETE FROM "brc20_event_inscribe_transfers" WHERE "block_height" >= $1;
|
||||
|
||||
-- name: DeleteEventTransferTransfersSinceHeight :exec
|
||||
DELETE FROM "brc20_event_transfer_transfers" WHERE "block_height" >= $1;
|
||||
|
||||
-- name: DeleteBalancesSinceHeight :exec
|
||||
DELETE FROM "brc20_balances" WHERE "block_height" >= $1;
|
||||
|
||||
-- name: DeleteInscriptionEntriesSinceHeight :exec
|
||||
DELETE FROM "brc20_inscription_entries" WHERE "created_at_height" >= $1;
|
||||
|
||||
-- name: DeleteInscriptionEntryStatesSinceHeight :exec
|
||||
DELETE FROM "brc20_inscription_entry_states" WHERE "block_height" >= $1;
|
||||
|
||||
-- name: DeleteInscriptionTransfersSinceHeight :exec
|
||||
DELETE FROM "brc20_inscription_transfers" WHERE "block_height" >= $1;
|
||||
5
modules/brc20/database/postgresql/queries/info.sql
Normal file
5
modules/brc20/database/postgresql/queries/info.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
-- name: GetLatestIndexerState :one
|
||||
SELECT * FROM brc20_indexer_states ORDER BY created_at DESC LIMIT 1;
|
||||
|
||||
-- name: CreateIndexerState :exec
|
||||
INSERT INTO brc20_indexer_states (client_version, network, db_version, event_hash_version) VALUES ($1, $2, $3, $4);
|
||||
69
modules/brc20/event_hash.go
Normal file
69
modules/brc20/event_hash.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package brc20
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
const eventHashSeparator = "|"
|
||||
|
||||
func getEventDeployString(event *entity.EventDeploy) string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("deploy-inscribe;")
|
||||
sb.WriteString(event.InscriptionId.String() + ";")
|
||||
sb.WriteString(hex.EncodeToString(event.PkScript) + ";")
|
||||
sb.WriteString(event.Tick + ";")
|
||||
sb.WriteString(event.OriginalTick + ";")
|
||||
sb.WriteString(event.TotalSupply.StringFixed(int32(event.Decimals)) + ";")
|
||||
sb.WriteString(strconv.Itoa(int(event.Decimals)) + ";")
|
||||
sb.WriteString(event.LimitPerMint.StringFixed(int32(event.Decimals)) + ";")
|
||||
sb.WriteString(lo.Ternary(event.IsSelfMint, "True", "False"))
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func getEventMintString(event *entity.EventMint, decimals uint16) string {
|
||||
var sb strings.Builder
|
||||
var parentId string
|
||||
if event.ParentId != nil {
|
||||
parentId = event.ParentId.String()
|
||||
}
|
||||
sb.WriteString("mint-inscribe;")
|
||||
sb.WriteString(event.InscriptionId.String() + ";")
|
||||
sb.WriteString(hex.EncodeToString(event.PkScript) + ";")
|
||||
sb.WriteString(event.Tick + ";")
|
||||
sb.WriteString(event.OriginalTick + ";")
|
||||
sb.WriteString(event.Amount.StringFixed(int32(decimals)) + ";")
|
||||
sb.WriteString(parentId)
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func getEventInscribeTransferString(event *entity.EventInscribeTransfer, decimals uint16) string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("inscribe-transfer;")
|
||||
sb.WriteString(event.InscriptionId.String() + ";")
|
||||
sb.WriteString(hex.EncodeToString(event.PkScript) + ";")
|
||||
sb.WriteString(event.Tick + ";")
|
||||
sb.WriteString(event.OriginalTick + ";")
|
||||
sb.WriteString(event.Amount.StringFixed(int32(decimals)))
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func getEventTransferTransferString(event *entity.EventTransferTransfer, decimals uint16) string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("transfer-transfer;")
|
||||
sb.WriteString(event.InscriptionId.String() + ";")
|
||||
sb.WriteString(hex.EncodeToString(event.FromPkScript) + ";")
|
||||
if event.SpentAsFee {
|
||||
sb.WriteString(";")
|
||||
} else {
|
||||
sb.WriteString(hex.EncodeToString(event.ToPkScript) + ";")
|
||||
}
|
||||
sb.WriteString(event.Tick + ";")
|
||||
sb.WriteString(event.OriginalTick + ";")
|
||||
sb.WriteString(event.Amount.StringFixed(int32(decimals)))
|
||||
return sb.String()
|
||||
}
|
||||
16
modules/brc20/internal/brc20/activation.go
Normal file
16
modules/brc20/internal/brc20/activation.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package brc20
|
||||
|
||||
import "github.com/gaze-network/indexer-network/common"
|
||||
|
||||
var selfMintActivationHeights = map[common.Network]uint64{
|
||||
common.NetworkMainnet: 837090,
|
||||
common.NetworkTestnet: 837090,
|
||||
}
|
||||
|
||||
func isSelfMintActivated(height uint64, network common.Network) bool {
|
||||
activationHeight, ok := selfMintActivationHeights[network]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return height >= activationHeight
|
||||
}
|
||||
21
modules/brc20/internal/brc20/operations.go
Normal file
21
modules/brc20/internal/brc20/operations.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package brc20
|
||||
|
||||
type Operation string
|
||||
|
||||
const (
|
||||
OperationDeploy Operation = "deploy"
|
||||
OperationMint Operation = "mint"
|
||||
OperationTransfer Operation = "transfer"
|
||||
)
|
||||
|
||||
func (o Operation) IsValid() bool {
|
||||
switch o {
|
||||
case OperationDeploy, OperationMint, OperationTransfer:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (o Operation) String() string {
|
||||
return string(o)
|
||||
}
|
||||
170
modules/brc20/internal/brc20/payload.go
Normal file
170
modules/brc20/internal/brc20/payload.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package brc20
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
|
||||
"github.com/shopspring/decimal"
|
||||
)
|
||||
|
||||
type rawPayload struct {
|
||||
P string // required
|
||||
Op string `json:"op"` // required
|
||||
Tick string `json:"tick"` // required
|
||||
|
||||
// for deploy operations
|
||||
Max string `json:"max"` // required
|
||||
Lim *string `json:"lim"`
|
||||
Dec *string `json:"dec"`
|
||||
SelfMint *string `json:"self_mint"`
|
||||
|
||||
// for mint/transfer operations
|
||||
Amt string `json:"amt"` // required
|
||||
}
|
||||
|
||||
type Payload struct {
|
||||
Transfer *entity.InscriptionTransfer
|
||||
P string
|
||||
Op Operation
|
||||
Tick string // lower-cased tick
|
||||
OriginalTick string // original tick before lower-cased
|
||||
|
||||
// for deploy operations
|
||||
Max decimal.Decimal
|
||||
Lim decimal.Decimal
|
||||
Dec uint16
|
||||
SelfMint bool
|
||||
|
||||
// for mint/transfer operations
|
||||
Amt decimal.Decimal
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidProtocol = errors.New("invalid protocol: must be 'brc20'")
|
||||
ErrInvalidOperation = errors.New("invalid operation for brc20: must be one of 'deploy', 'mint', or 'transfer'")
|
||||
ErrInvalidTickLength = errors.New("invalid tick length: must be 4 or 5 bytes")
|
||||
ErrEmptyTick = errors.New("empty tick")
|
||||
ErrEmptyMax = errors.New("empty max")
|
||||
ErrInvalidMax = errors.New("invalid max")
|
||||
ErrInvalidDec = errors.New("invalid dec")
|
||||
ErrInvalidSelfMint = errors.New("invalid self_mint")
|
||||
ErrInvalidAmt = errors.New("invalid amt")
|
||||
ErrNumberOverflow = errors.New("number overflow: max value is (2^64-1) * 10^18")
|
||||
)
|
||||
|
||||
func ParsePayload(transfer *entity.InscriptionTransfer) (*Payload, error) {
|
||||
var p rawPayload
|
||||
err := json.Unmarshal(transfer.Content, &p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal payload as json")
|
||||
}
|
||||
|
||||
if p.P != "brc-20" {
|
||||
return nil, errors.WithStack(ErrInvalidProtocol)
|
||||
}
|
||||
if !Operation(p.Op).IsValid() {
|
||||
return nil, errors.WithStack(ErrInvalidOperation)
|
||||
}
|
||||
if p.Tick == "" {
|
||||
return nil, errors.WithStack(ErrEmptyTick)
|
||||
}
|
||||
if len(p.Tick) != 4 && len(p.Tick) != 5 {
|
||||
return nil, errors.WithStack(ErrInvalidTickLength)
|
||||
}
|
||||
|
||||
parsed := Payload{
|
||||
Transfer: transfer,
|
||||
P: p.P,
|
||||
Op: Operation(p.Op),
|
||||
Tick: strings.ToLower(p.Tick),
|
||||
OriginalTick: p.Tick,
|
||||
}
|
||||
|
||||
switch parsed.Op {
|
||||
case OperationDeploy:
|
||||
if p.Max == "" {
|
||||
return nil, errors.WithStack(ErrEmptyMax)
|
||||
}
|
||||
var rawDec string
|
||||
if p.Dec != nil {
|
||||
rawDec = *p.Dec
|
||||
}
|
||||
dec, ok := strconv.ParseUint(rawDec, 10, 16)
|
||||
if ok != nil {
|
||||
return nil, errors.Wrap(ok, "failed to parse dec")
|
||||
}
|
||||
if dec > 18 {
|
||||
return nil, errors.WithStack(ErrInvalidDec)
|
||||
}
|
||||
parsed.Dec = uint16(dec)
|
||||
|
||||
max, err := parseNumericString(p.Max, dec)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse max")
|
||||
}
|
||||
parsed.Max = max
|
||||
|
||||
limit := max
|
||||
if p.Lim != nil {
|
||||
limit, err = parseNumericString(*p.Lim, dec)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse lim")
|
||||
}
|
||||
}
|
||||
parsed.Lim = limit
|
||||
|
||||
// 5-bytes ticks are self-mint only
|
||||
if len(parsed.OriginalTick) == 5 {
|
||||
if p.SelfMint == nil || *p.SelfMint != "true" {
|
||||
return nil, errors.WithStack(ErrInvalidSelfMint)
|
||||
}
|
||||
// infinite mints if tick is self-mint, and max is set to 0
|
||||
if parsed.Max.IsZero() {
|
||||
parsed.Max = maxNumber
|
||||
if parsed.Lim.IsZero() {
|
||||
parsed.Lim = maxNumber
|
||||
}
|
||||
}
|
||||
}
|
||||
if parsed.Max.IsZero() {
|
||||
return nil, errors.WithStack(ErrInvalidMax)
|
||||
}
|
||||
case OperationMint, OperationTransfer:
|
||||
if p.Amt == "" {
|
||||
return nil, errors.WithStack(ErrInvalidAmt)
|
||||
}
|
||||
// NOTE: check tick decimals after parsing payload
|
||||
amt, err := parseNumericString(p.Amt, 18)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse amt")
|
||||
}
|
||||
parsed.Amt = amt
|
||||
default:
|
||||
return nil, errors.WithStack(ErrInvalidOperation)
|
||||
}
|
||||
return &parsed, nil
|
||||
}
|
||||
|
||||
// max number for all numeric fields (except dec) is (2^64-1)
|
||||
var (
|
||||
maxNumber = decimal.NewFromBigInt(new(big.Int).SetUint64(math.MaxUint64), 0)
|
||||
)
|
||||
|
||||
func parseNumericString(s string, maxDec uint64) (decimal.Decimal, error) {
|
||||
d, err := decimal.NewFromString(s)
|
||||
if err != nil {
|
||||
return decimal.Decimal{}, errors.Wrap(err, "failed to parse decimal number")
|
||||
}
|
||||
if -d.Exponent() > int32(maxDec) {
|
||||
return decimal.Decimal{}, errors.Errorf("cannot parse decimal number: too many decimal points: expected %d got %d", maxDec, d.Exponent())
|
||||
}
|
||||
if d.GreaterThan(maxNumber) {
|
||||
return decimal.Decimal{}, errors.WithStack(ErrNumberOverflow)
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
72
modules/brc20/internal/datagateway/brc20.go
Normal file
72
modules/brc20/internal/datagateway/brc20.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package datagateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
)
|
||||
|
||||
type BRC20DataGateway interface {
|
||||
BRC20ReaderDataGateway
|
||||
BRC20WriterDataGateway
|
||||
|
||||
// BeginBRC20Tx returns a new BRC20DataGateway with transaction enabled. All write operations performed in this datagateway must be committed to persist changes.
|
||||
BeginBRC20Tx(ctx context.Context) (BRC20DataGatewayWithTx, error)
|
||||
}
|
||||
|
||||
type BRC20DataGatewayWithTx interface {
|
||||
BRC20DataGateway
|
||||
Tx
|
||||
}
|
||||
|
||||
type BRC20ReaderDataGateway interface {
|
||||
GetLatestBlock(ctx context.Context) (types.BlockHeader, error)
|
||||
GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error)
|
||||
GetProcessorStats(ctx context.Context) (*entity.ProcessorStats, error)
|
||||
GetInscriptionTransfersInOutPoints(ctx context.Context, outPoints []wire.OutPoint) (map[ordinals.SatPoint][]*entity.InscriptionTransfer, error)
|
||||
GetInscriptionEntriesByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*ordinals.InscriptionEntry, error)
|
||||
GetInscriptionNumbersByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]int64, error)
|
||||
GetInscriptionParentsByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]ordinals.InscriptionId, error)
|
||||
GetBalancesBatchAtHeight(ctx context.Context, blockHeight uint64, queries []GetBalancesBatchAtHeightQuery) (map[string]map[string]*entity.Balance, error)
|
||||
GetTickEntriesByTicks(ctx context.Context, ticks []string) (map[string]*entity.TickEntry, error)
|
||||
GetEventInscribeTransfersByInscriptionIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*entity.EventInscribeTransfer, error)
|
||||
GetLatestEventId(ctx context.Context) (int64, error)
|
||||
}
|
||||
|
||||
type BRC20WriterDataGateway interface {
|
||||
CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error
|
||||
CreateProcessorStats(ctx context.Context, stats *entity.ProcessorStats) error
|
||||
CreateTickEntries(ctx context.Context, blockHeight uint64, entries []*entity.TickEntry) error
|
||||
CreateTickEntryStates(ctx context.Context, blockHeight uint64, entryStates []*entity.TickEntry) error
|
||||
CreateInscriptionEntries(ctx context.Context, blockHeight uint64, entries []*ordinals.InscriptionEntry) error
|
||||
CreateInscriptionEntryStates(ctx context.Context, blockHeight uint64, entryStates []*ordinals.InscriptionEntry) error
|
||||
CreateInscriptionTransfers(ctx context.Context, transfers []*entity.InscriptionTransfer) error
|
||||
CreateEventDeploys(ctx context.Context, events []*entity.EventDeploy) error
|
||||
CreateEventMints(ctx context.Context, events []*entity.EventMint) error
|
||||
CreateEventInscribeTransfers(ctx context.Context, events []*entity.EventInscribeTransfer) error
|
||||
CreateEventTransferTransfers(ctx context.Context, events []*entity.EventTransferTransfer) error
|
||||
CreateBalances(ctx context.Context, balances []*entity.Balance) error
|
||||
|
||||
// used for revert data
|
||||
DeleteIndexedBlocksSinceHeight(ctx context.Context, height uint64) error
|
||||
DeleteProcessorStatsSinceHeight(ctx context.Context, height uint64) error
|
||||
DeleteTickEntriesSinceHeight(ctx context.Context, height uint64) error
|
||||
DeleteTickEntryStatesSinceHeight(ctx context.Context, height uint64) error
|
||||
DeleteEventDeploysSinceHeight(ctx context.Context, height uint64) error
|
||||
DeleteEventMintsSinceHeight(ctx context.Context, height uint64) error
|
||||
DeleteEventInscribeTransfersSinceHeight(ctx context.Context, height uint64) error
|
||||
DeleteEventTransferTransfersSinceHeight(ctx context.Context, height uint64) error
|
||||
DeleteBalancesSinceHeight(ctx context.Context, height uint64) error
|
||||
DeleteInscriptionEntriesSinceHeight(ctx context.Context, height uint64) error
|
||||
DeleteInscriptionEntryStatesSinceHeight(ctx context.Context, height uint64) error
|
||||
DeleteInscriptionTransfersSinceHeight(ctx context.Context, height uint64) error
|
||||
}
|
||||
|
||||
type GetBalancesBatchAtHeightQuery struct {
|
||||
PkScriptHex string
|
||||
Tick string
|
||||
BlockHeight uint64
|
||||
}
|
||||
12
modules/brc20/internal/datagateway/indexer_info.go
Normal file
12
modules/brc20/internal/datagateway/indexer_info.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package datagateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
|
||||
)
|
||||
|
||||
type IndexerInfoDataGateway interface {
|
||||
GetLatestIndexerState(ctx context.Context) (entity.IndexerState, error)
|
||||
CreateIndexerState(ctx context.Context, state entity.IndexerState) error
|
||||
}
|
||||
12
modules/brc20/internal/datagateway/tx.go
Normal file
12
modules/brc20/internal/datagateway/tx.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package datagateway
|
||||
|
||||
import "context"
|
||||
|
||||
type Tx interface {
|
||||
// Commit commits the DB transaction. All changes made after Begin() will be persisted. Calling Commit() will close the current transaction.
|
||||
// If Commit() is called without a prior Begin(), it must be a no-op.
|
||||
Commit(ctx context.Context) error
|
||||
// Rollback rolls back the DB transaction. All changes made after Begin() will be discarded.
|
||||
// Rollback() must be safe to call even if no transaction is active. Hence, a defer Rollback() is safe, even if Commit() was called prior with non-error conditions.
|
||||
Rollback(ctx context.Context) error
|
||||
}
|
||||
11
modules/brc20/internal/entity/balance.go
Normal file
11
modules/brc20/internal/entity/balance.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package entity
|
||||
|
||||
import "github.com/shopspring/decimal"
|
||||
|
||||
type Balance struct {
|
||||
PkScript []byte
|
||||
Tick string
|
||||
BlockHeight uint64
|
||||
OverallBalance decimal.Decimal
|
||||
AvailableBalance decimal.Decimal
|
||||
}
|
||||
28
modules/brc20/internal/entity/event_deploy.go
Normal file
28
modules/brc20/internal/entity/event_deploy.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package entity
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
"github.com/shopspring/decimal"
|
||||
)
|
||||
|
||||
type EventDeploy struct {
|
||||
Id int64
|
||||
InscriptionId ordinals.InscriptionId
|
||||
InscriptionNumber int64
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TxHash chainhash.Hash
|
||||
BlockHeight uint64
|
||||
TxIndex uint32
|
||||
Timestamp time.Time
|
||||
|
||||
PkScript []byte
|
||||
SatPoint ordinals.SatPoint
|
||||
TotalSupply decimal.Decimal
|
||||
Decimals uint16
|
||||
LimitPerMint decimal.Decimal
|
||||
IsSelfMint bool
|
||||
}
|
||||
27
modules/brc20/internal/entity/event_inscribe_transfer.go
Normal file
27
modules/brc20/internal/entity/event_inscribe_transfer.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package entity
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
"github.com/shopspring/decimal"
|
||||
)
|
||||
|
||||
type EventInscribeTransfer struct {
|
||||
Id int64
|
||||
InscriptionId ordinals.InscriptionId
|
||||
InscriptionNumber int64
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TxHash chainhash.Hash
|
||||
BlockHeight uint64
|
||||
TxIndex uint32
|
||||
Timestamp time.Time
|
||||
|
||||
PkScript []byte
|
||||
SatPoint ordinals.SatPoint
|
||||
OutputIndex uint32
|
||||
SatsAmount uint64
|
||||
Amount decimal.Decimal
|
||||
}
|
||||
26
modules/brc20/internal/entity/event_mint.go
Normal file
26
modules/brc20/internal/entity/event_mint.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package entity
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
"github.com/shopspring/decimal"
|
||||
)
|
||||
|
||||
type EventMint struct {
|
||||
Id int64
|
||||
InscriptionId ordinals.InscriptionId
|
||||
InscriptionNumber int64
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TxHash chainhash.Hash
|
||||
BlockHeight uint64
|
||||
TxIndex uint32
|
||||
Timestamp time.Time
|
||||
|
||||
PkScript []byte
|
||||
SatPoint ordinals.SatPoint
|
||||
Amount decimal.Decimal
|
||||
ParentId *ordinals.InscriptionId
|
||||
}
|
||||
30
modules/brc20/internal/entity/event_transfer_transfer.go
Normal file
30
modules/brc20/internal/entity/event_transfer_transfer.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package entity
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
"github.com/shopspring/decimal"
|
||||
)
|
||||
|
||||
type EventTransferTransfer struct {
|
||||
Id int64
|
||||
InscriptionId ordinals.InscriptionId
|
||||
InscriptionNumber int64
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TxHash chainhash.Hash
|
||||
BlockHeight uint64
|
||||
TxIndex uint32
|
||||
Timestamp time.Time
|
||||
|
||||
FromPkScript []byte
|
||||
FromSatPoint ordinals.SatPoint
|
||||
FromInputIndex uint32
|
||||
ToPkScript []byte
|
||||
ToSatPoint ordinals.SatPoint
|
||||
ToOutputIndex uint32
|
||||
SpentAsFee bool
|
||||
Amount decimal.Decimal
|
||||
}
|
||||
31
modules/brc20/internal/entity/flotsam.go
Normal file
31
modules/brc20/internal/entity/flotsam.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package entity
|
||||
|
||||
import (
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
)
|
||||
|
||||
type OriginOld struct {
|
||||
Content []byte
|
||||
OldSatPoint ordinals.SatPoint
|
||||
InputIndex uint32
|
||||
}
|
||||
type OriginNew struct {
|
||||
Inscription ordinals.Inscription
|
||||
Parent *ordinals.InscriptionId
|
||||
Pointer *uint64
|
||||
Fee uint64
|
||||
Cursed bool
|
||||
CursedForBRC20 bool
|
||||
Hidden bool
|
||||
Reinscription bool
|
||||
Unbound bool
|
||||
}
|
||||
|
||||
type Flotsam struct {
|
||||
Tx *types.Transaction
|
||||
OriginOld *OriginOld // OriginOld and OriginNew are mutually exclusive
|
||||
OriginNew *OriginNew // OriginOld and OriginNew are mutually exclusive
|
||||
Offset uint64
|
||||
InscriptionId ordinals.InscriptionId
|
||||
}
|
||||
10
modules/brc20/internal/entity/indexed_block.go
Normal file
10
modules/brc20/internal/entity/indexed_block.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package entity
|
||||
|
||||
import "github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
|
||||
type IndexedBlock struct {
|
||||
Height uint64
|
||||
Hash chainhash.Hash
|
||||
EventHash []byte
|
||||
CumulativeEventHash []byte
|
||||
}
|
||||
15
modules/brc20/internal/entity/indexer_state.go
Normal file
15
modules/brc20/internal/entity/indexer_state.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package entity
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
)
|
||||
|
||||
type IndexerState struct {
|
||||
CreatedAt time.Time
|
||||
ClientVersion string
|
||||
DBVersion int32
|
||||
EventHashVersion int32
|
||||
Network common.Network
|
||||
}
|
||||
21
modules/brc20/internal/entity/inscription_transfer.go
Normal file
21
modules/brc20/internal/entity/inscription_transfer.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package entity
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
)
|
||||
|
||||
type InscriptionTransfer struct {
|
||||
InscriptionId ordinals.InscriptionId
|
||||
BlockHeight uint64
|
||||
TxIndex uint32
|
||||
TxHash chainhash.Hash
|
||||
Content []byte
|
||||
FromInputIndex uint32
|
||||
OldSatPoint ordinals.SatPoint
|
||||
NewSatPoint ordinals.SatPoint
|
||||
NewPkScript []byte
|
||||
NewOutputValue uint64
|
||||
SentAsFee bool
|
||||
TransferCount uint32
|
||||
}
|
||||
8
modules/brc20/internal/entity/processor_stats.go
Normal file
8
modules/brc20/internal/entity/processor_stats.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package entity
|
||||
|
||||
type ProcessorStats struct {
|
||||
BlockHeight uint64
|
||||
CursedInscriptionCount uint64
|
||||
BlessedInscriptionCount uint64
|
||||
LostSats uint64
|
||||
}
|
||||
25
modules/brc20/internal/entity/tick_entry.go
Normal file
25
modules/brc20/internal/entity/tick_entry.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package entity
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
"github.com/shopspring/decimal"
|
||||
)
|
||||
|
||||
type TickEntry struct {
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TotalSupply decimal.Decimal
|
||||
Decimals uint16
|
||||
LimitPerMint decimal.Decimal
|
||||
IsSelfMint bool
|
||||
DeployInscriptionId ordinals.InscriptionId
|
||||
DeployedAt time.Time
|
||||
DeployedAtHeight uint64
|
||||
|
||||
MintedAmount decimal.Decimal
|
||||
BurnedAmount decimal.Decimal
|
||||
CompletedAt time.Time
|
||||
CompletedAtHeight uint64
|
||||
}
|
||||
285
modules/brc20/internal/ordinals/envelope.go
Normal file
285
modules/brc20/internal/ordinals/envelope.go
Normal file
@@ -0,0 +1,285 @@
|
||||
package ordinals
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type Envelope struct {
|
||||
Inscription Inscription
|
||||
InputIndex uint32 // Index of input that contains the envelope
|
||||
Offset int // Number of envelope in the input
|
||||
PushNum bool // True if envelope contains pushnum opcodes
|
||||
Stutter bool // True if envelope matches stuttering curse structure
|
||||
IncompleteField bool // True if payload is incomplete
|
||||
DuplicateField bool // True if payload contains duplicated field
|
||||
UnrecognizedEvenField bool // True if payload contains unrecognized even field
|
||||
}
|
||||
|
||||
func ParseEnvelopesFromTx(tx *types.Transaction) []*Envelope {
|
||||
envelopes := make([]*Envelope, 0)
|
||||
|
||||
for i, txIn := range tx.TxIn {
|
||||
tapScript, ok := extractTapScript(txIn.Witness)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
newEnvelopes := envelopesFromTapScript(tapScript, i)
|
||||
envelopes = append(envelopes, newEnvelopes...)
|
||||
}
|
||||
|
||||
return envelopes
|
||||
}
|
||||
|
||||
var protocolId = []byte("ord")
|
||||
|
||||
func envelopesFromTapScript(tokenizer txscript.ScriptTokenizer, inputIndex int) []*Envelope {
|
||||
envelopes := make([]*Envelope, 0)
|
||||
|
||||
var stuttered bool
|
||||
for tokenizer.Next() {
|
||||
if tokenizer.Err() != nil {
|
||||
break
|
||||
}
|
||||
if tokenizer.Opcode() == txscript.OP_FALSE {
|
||||
envelope, stutter := envelopeFromTokenizer(tokenizer, inputIndex, len(envelopes), stuttered)
|
||||
if envelope != nil {
|
||||
envelopes = append(envelopes, envelope)
|
||||
} else {
|
||||
stuttered = stutter
|
||||
}
|
||||
}
|
||||
}
|
||||
if tokenizer.Err() != nil {
|
||||
return envelopes
|
||||
}
|
||||
return envelopes
|
||||
}
|
||||
|
||||
func envelopeFromTokenizer(tokenizer txscript.ScriptTokenizer, inputIndex int, offset int, stuttered bool) (*Envelope, bool) {
|
||||
tokenizer.Next()
|
||||
if tokenizer.Opcode() != txscript.OP_IF {
|
||||
return nil, tokenizer.Opcode() == txscript.OP_FALSE
|
||||
}
|
||||
|
||||
tokenizer.Next()
|
||||
if !bytes.Equal(tokenizer.Data(), protocolId) {
|
||||
return nil, tokenizer.Opcode() == txscript.OP_FALSE
|
||||
}
|
||||
|
||||
var pushNum bool
|
||||
payload := make([][]byte, 0)
|
||||
for tokenizer.Next() {
|
||||
if tokenizer.Err() != nil {
|
||||
return nil, false
|
||||
}
|
||||
opCode := tokenizer.Opcode()
|
||||
if opCode == txscript.OP_ENDIF {
|
||||
break
|
||||
}
|
||||
switch opCode {
|
||||
case txscript.OP_1NEGATE:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x81})
|
||||
case txscript.OP_1:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x01})
|
||||
case txscript.OP_2:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x02})
|
||||
case txscript.OP_3:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x03})
|
||||
case txscript.OP_4:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x04})
|
||||
case txscript.OP_5:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x05})
|
||||
case txscript.OP_6:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x06})
|
||||
case txscript.OP_7:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x07})
|
||||
case txscript.OP_8:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x08})
|
||||
case txscript.OP_9:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x09})
|
||||
case txscript.OP_10:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x10})
|
||||
case txscript.OP_11:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x11})
|
||||
case txscript.OP_12:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x12})
|
||||
case txscript.OP_13:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x13})
|
||||
case txscript.OP_14:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x14})
|
||||
case txscript.OP_15:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x15})
|
||||
case txscript.OP_16:
|
||||
pushNum = true
|
||||
payload = append(payload, []byte{0x16})
|
||||
case txscript.OP_0:
|
||||
// OP_0 is a special case, it is accepted in ord's implementation
|
||||
payload = append(payload, []byte{})
|
||||
default:
|
||||
data := tokenizer.Data()
|
||||
if data == nil {
|
||||
return nil, false
|
||||
}
|
||||
payload = append(payload, data)
|
||||
}
|
||||
}
|
||||
// incomplete envelope
|
||||
if tokenizer.Done() && tokenizer.Opcode() != txscript.OP_ENDIF {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// find body (empty data push in even index payload)
|
||||
bodyIndex := -1
|
||||
for i, value := range payload {
|
||||
if i%2 == 0 && len(value) == 0 {
|
||||
bodyIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
var fieldPayloads [][]byte
|
||||
var body []byte
|
||||
if bodyIndex != -1 {
|
||||
fieldPayloads = payload[:bodyIndex]
|
||||
body = lo.Flatten(payload[bodyIndex+1:])
|
||||
} else {
|
||||
fieldPayloads = payload[:]
|
||||
}
|
||||
|
||||
var incompleteField bool
|
||||
fields := make(Fields)
|
||||
for _, chunk := range lo.Chunk(fieldPayloads, 2) {
|
||||
if len(chunk) != 2 {
|
||||
incompleteField = true
|
||||
break
|
||||
}
|
||||
key := chunk[0]
|
||||
value := chunk[1]
|
||||
// key cannot be empty, as checked by bodyIndex above
|
||||
tag := Tag(key[0])
|
||||
fields[tag] = append(fields[tag], value)
|
||||
}
|
||||
|
||||
var duplicateField bool
|
||||
for _, values := range fields {
|
||||
if len(values) > 1 {
|
||||
duplicateField = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
rawContentEncoding := fields.Take(TagContentEncoding)
|
||||
rawContentType := fields.Take(TagContentType)
|
||||
rawDelegate := fields.Take(TagDelegate)
|
||||
rawMetadata := fields.Take(TagMetadata)
|
||||
rawMetaprotocol := fields.Take(TagMetaprotocol)
|
||||
rawParent := fields.Take(TagParent)
|
||||
rawPointer := fields.Take(TagPointer)
|
||||
|
||||
unrecognizedEvenField := lo.SomeBy(lo.Keys(fields), func(key Tag) bool {
|
||||
return key%2 == 0
|
||||
})
|
||||
|
||||
var delegate, parent *InscriptionId
|
||||
inscriptionId, err := NewInscriptionIdFromString(string(rawDelegate))
|
||||
if err == nil {
|
||||
delegate = &inscriptionId
|
||||
}
|
||||
inscriptionId, err = NewInscriptionIdFromString(string(rawParent))
|
||||
if err == nil {
|
||||
parent = &inscriptionId
|
||||
}
|
||||
|
||||
var pointer *uint64
|
||||
// if rawPointer is not nil and fits in uint64
|
||||
if rawPointer != nil && (len(rawPointer) <= 8 || lo.EveryBy(rawPointer[8:], func(value byte) bool {
|
||||
return value != 0
|
||||
})) {
|
||||
// pad zero bytes to 8 bytes
|
||||
if len(rawPointer) < 8 {
|
||||
rawPointer = append(rawPointer, make([]byte, 8-len(rawPointer))...)
|
||||
}
|
||||
pointer = lo.ToPtr(binary.LittleEndian.Uint64(rawPointer))
|
||||
}
|
||||
|
||||
inscription := Inscription{
|
||||
Content: body,
|
||||
ContentEncoding: string(rawContentEncoding),
|
||||
ContentType: string(rawContentType),
|
||||
Delegate: delegate,
|
||||
Metadata: rawMetadata,
|
||||
Metaprotocol: string(rawMetaprotocol),
|
||||
Parent: parent,
|
||||
Pointer: pointer,
|
||||
}
|
||||
return &Envelope{
|
||||
Inscription: inscription,
|
||||
InputIndex: uint32(inputIndex),
|
||||
Offset: offset,
|
||||
PushNum: pushNum,
|
||||
Stutter: stuttered,
|
||||
IncompleteField: incompleteField,
|
||||
DuplicateField: duplicateField,
|
||||
UnrecognizedEvenField: unrecognizedEvenField,
|
||||
}, false
|
||||
}
|
||||
|
||||
type Fields map[Tag][][]byte
|
||||
|
||||
func (fields Fields) Take(tag Tag) []byte {
|
||||
values, ok := fields[tag]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if tag.IsChunked() {
|
||||
delete(fields, tag)
|
||||
return lo.Flatten(values)
|
||||
} else {
|
||||
first := values[0]
|
||||
values = values[1:]
|
||||
if len(values) == 0 {
|
||||
delete(fields, tag)
|
||||
} else {
|
||||
fields[tag] = values
|
||||
}
|
||||
return first
|
||||
}
|
||||
}
|
||||
|
||||
func extractTapScript(witness [][]byte) (txscript.ScriptTokenizer, bool) {
|
||||
witness = removeAnnexFromWitness(witness)
|
||||
if len(witness) < 2 {
|
||||
return txscript.ScriptTokenizer{}, false
|
||||
}
|
||||
script := witness[len(witness)-2]
|
||||
|
||||
return txscript.MakeScriptTokenizer(0, script), true
|
||||
}
|
||||
|
||||
func removeAnnexFromWitness(witness [][]byte) [][]byte {
|
||||
if len(witness) >= 2 && len(witness[len(witness)-1]) > 0 && witness[len(witness)-1][0] == txscript.TaprootAnnexTag {
|
||||
return witness[:len(witness)-1]
|
||||
}
|
||||
return witness
|
||||
}
|
||||
742
modules/brc20/internal/ordinals/envelope_test.go
Normal file
742
modules/brc20/internal/ordinals/envelope_test.go
Normal file
@@ -0,0 +1,742 @@
|
||||
package ordinals
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/samber/lo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseEnvelopesFromTx(t *testing.T) {
|
||||
testTx := func(t *testing.T, tx *types.Transaction, expected []*Envelope) {
|
||||
t.Helper()
|
||||
|
||||
envelopes := ParseEnvelopesFromTx(tx)
|
||||
assert.Equal(t, expected, envelopes)
|
||||
}
|
||||
testParseWitness := func(t *testing.T, tapScript []byte, expected []*Envelope) {
|
||||
t.Helper()
|
||||
|
||||
tx := &types.Transaction{
|
||||
Version: 2,
|
||||
LockTime: 0,
|
||||
TxIn: []*types.TxIn{
|
||||
{
|
||||
Witness: wire.TxWitness{
|
||||
tapScript,
|
||||
{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
testTx(t, tx, expected)
|
||||
}
|
||||
testEnvelope := func(t *testing.T, payload [][]byte, expected []*Envelope) {
|
||||
t.Helper()
|
||||
|
||||
builder := NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF)
|
||||
for _, data := range payload {
|
||||
builder.AddData(data)
|
||||
}
|
||||
builder.AddOp(txscript.OP_ENDIF)
|
||||
script, err := builder.Script()
|
||||
assert.NoError(t, err)
|
||||
|
||||
testParseWitness(
|
||||
t,
|
||||
script,
|
||||
expected,
|
||||
)
|
||||
}
|
||||
|
||||
t.Run("empty_witness", func(t *testing.T) {
|
||||
testTx(t, &types.Transaction{
|
||||
Version: 2,
|
||||
LockTime: 0,
|
||||
TxIn: []*types.TxIn{{
|
||||
Witness: wire.TxWitness{},
|
||||
}},
|
||||
}, []*Envelope{})
|
||||
})
|
||||
t.Run("ignore_key_path_spends", func(t *testing.T) {
|
||||
testTx(t, &types.Transaction{
|
||||
Version: 2,
|
||||
LockTime: 0,
|
||||
TxIn: []*types.TxIn{{
|
||||
Witness: wire.TxWitness{
|
||||
utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
Script()),
|
||||
},
|
||||
}},
|
||||
}, []*Envelope{})
|
||||
})
|
||||
t.Run("ignore_key_path_spends_with_annex", func(t *testing.T) {
|
||||
testTx(t, &types.Transaction{
|
||||
Version: 2,
|
||||
LockTime: 0,
|
||||
TxIn: []*types.TxIn{{
|
||||
Witness: wire.TxWitness{
|
||||
utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
Script()),
|
||||
[]byte{txscript.TaprootAnnexTag},
|
||||
},
|
||||
}},
|
||||
}, []*Envelope{})
|
||||
})
|
||||
t.Run("parse_from_tapscript", func(t *testing.T) {
|
||||
testParseWitness(
|
||||
t,
|
||||
utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
Script()),
|
||||
[]*Envelope{{}},
|
||||
)
|
||||
})
|
||||
t.Run("ignore_unparsable_scripts", func(t *testing.T) {
|
||||
script := utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
Script())
|
||||
|
||||
script = append(script, 0x01)
|
||||
testParseWitness(
|
||||
t,
|
||||
script,
|
||||
[]*Envelope{
|
||||
{},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("no_inscription", func(t *testing.T) {
|
||||
testParseWitness(
|
||||
t,
|
||||
utils.Must(NewPushScriptBuilder().
|
||||
Script()),
|
||||
[]*Envelope{},
|
||||
)
|
||||
})
|
||||
t.Run("duplicate_field", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagNop.Bytes(),
|
||||
{},
|
||||
TagNop.Bytes(),
|
||||
{},
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
DuplicateField: true,
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("with_content_type", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagContentType.Bytes(),
|
||||
[]byte("text/plain;charset=utf-8"),
|
||||
TagBody.Bytes(),
|
||||
[]byte("ord"),
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte("ord"),
|
||||
ContentType: "text/plain;charset=utf-8",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("with_content_encoding", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagContentType.Bytes(),
|
||||
[]byte("text/plain;charset=utf-8"),
|
||||
TagContentEncoding.Bytes(),
|
||||
[]byte("br"),
|
||||
TagBody.Bytes(),
|
||||
[]byte("ord"),
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte("ord"),
|
||||
ContentType: "text/plain;charset=utf-8",
|
||||
ContentEncoding: "br",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("with_unknown_tag", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagContentType.Bytes(),
|
||||
[]byte("text/plain;charset=utf-8"),
|
||||
TagNop.Bytes(),
|
||||
[]byte("bar"),
|
||||
TagBody.Bytes(),
|
||||
[]byte("ord"),
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte("ord"),
|
||||
ContentType: "text/plain;charset=utf-8",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("no_body", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagContentType.Bytes(),
|
||||
[]byte("text/plain;charset=utf-8"),
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
ContentType: "text/plain;charset=utf-8",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("no_content_type", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagBody.Bytes(),
|
||||
[]byte("foo"),
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte("foo"),
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("valid_body_in_multiple_pushes", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagContentType.Bytes(),
|
||||
[]byte("text/plain;charset=utf-8"),
|
||||
TagBody.Bytes(),
|
||||
[]byte("foo"),
|
||||
[]byte("bar"),
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte("foobar"),
|
||||
ContentType: "text/plain;charset=utf-8",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("valid_body_in_zero_pushes", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagContentType.Bytes(),
|
||||
[]byte("text/plain;charset=utf-8"),
|
||||
TagBody.Bytes(),
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte(""),
|
||||
ContentType: "text/plain;charset=utf-8",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("valid_body_in_multiple_empty_pushes", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagContentType.Bytes(),
|
||||
[]byte("text/plain;charset=utf-8"),
|
||||
TagBody.Bytes(),
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte(""),
|
||||
ContentType: "text/plain;charset=utf-8",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("valid_ignore_trailing", func(t *testing.T) {
|
||||
testParseWitness(
|
||||
t,
|
||||
utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddData(TagContentType.Bytes()).
|
||||
AddData([]byte("text/plain;charset=utf-8")).
|
||||
AddData(TagBody.Bytes()).
|
||||
AddData([]byte("ord")).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
AddOp(txscript.OP_CHECKSIG).
|
||||
Script()),
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte("ord"),
|
||||
ContentType: "text/plain;charset=utf-8",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("valid_ignore_preceding", func(t *testing.T) {
|
||||
testParseWitness(
|
||||
t,
|
||||
utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_CHECKSIG).
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddData(TagContentType.Bytes()).
|
||||
AddData([]byte("text/plain;charset=utf-8")).
|
||||
AddData(TagBody.Bytes()).
|
||||
AddData([]byte("ord")).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
Script()),
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte("ord"),
|
||||
ContentType: "text/plain;charset=utf-8",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("multiple_inscriptions_in_a_single_witness", func(t *testing.T) {
|
||||
testParseWitness(
|
||||
t,
|
||||
utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddData(TagContentType.Bytes()).
|
||||
AddData([]byte("text/plain;charset=utf-8")).
|
||||
AddData(TagBody.Bytes()).
|
||||
AddData([]byte("foo")).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddData(TagContentType.Bytes()).
|
||||
AddData([]byte("text/plain;charset=utf-8")).
|
||||
AddData(TagBody.Bytes()).
|
||||
AddData([]byte("bar")).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
Script()),
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte("foo"),
|
||||
ContentType: "text/plain;charset=utf-8",
|
||||
},
|
||||
},
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte("bar"),
|
||||
ContentType: "text/plain;charset=utf-8",
|
||||
},
|
||||
Offset: 1,
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("invalid_utf8_does_not_render_inscription_invalid", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagContentType.Bytes(),
|
||||
[]byte("text/plain;charset=utf-8"),
|
||||
TagBody.Bytes(),
|
||||
{0b10000000},
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte{0b10000000},
|
||||
ContentType: "text/plain;charset=utf-8",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("no_endif", func(t *testing.T) {
|
||||
testParseWitness(
|
||||
t,
|
||||
utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
Script()),
|
||||
[]*Envelope{},
|
||||
)
|
||||
})
|
||||
t.Run("no_op_false", func(t *testing.T) {
|
||||
testParseWitness(
|
||||
t,
|
||||
utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
Script()),
|
||||
[]*Envelope{},
|
||||
)
|
||||
})
|
||||
t.Run("empty_envelope", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{},
|
||||
[]*Envelope{},
|
||||
)
|
||||
})
|
||||
t.Run("wrong_protocol_identifier", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
[]byte("foo"),
|
||||
},
|
||||
[]*Envelope{},
|
||||
)
|
||||
})
|
||||
t.Run("extract_from_second_input", func(t *testing.T) {
|
||||
testTx(
|
||||
t,
|
||||
&types.Transaction{
|
||||
Version: 2,
|
||||
LockTime: 0,
|
||||
TxIn: []*types.TxIn{{}, {
|
||||
Witness: wire.TxWitness{
|
||||
utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddData(TagContentType.Bytes()).
|
||||
AddData([]byte("text/plain;charset=utf-8")).
|
||||
AddData(TagBody.Bytes()).
|
||||
AddData([]byte("ord")).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
Script(),
|
||||
),
|
||||
{},
|
||||
},
|
||||
}},
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte("ord"),
|
||||
ContentType: "text/plain;charset=utf-8",
|
||||
},
|
||||
InputIndex: 1,
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("inscribe_png", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagContentType.Bytes(),
|
||||
[]byte("image/png"),
|
||||
TagBody.Bytes(),
|
||||
{0x01, 0x02, 0x03},
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: []byte{0x01, 0x02, 0x03},
|
||||
ContentType: "image/png",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("unknown_odd_fields", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagNop.Bytes(),
|
||||
{0x00},
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("unknown_even_fields", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagUnbound.Bytes(),
|
||||
{0x00},
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{},
|
||||
UnrecognizedEvenField: true,
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("pointer_field_is_recognized", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagPointer.Bytes(),
|
||||
{0x01},
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Pointer: lo.ToPtr(uint64(1)),
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("duplicate_pointer_field_makes_inscription_unbound", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagPointer.Bytes(),
|
||||
{0x01},
|
||||
TagPointer.Bytes(),
|
||||
{0x00},
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Pointer: lo.ToPtr(uint64(1)),
|
||||
},
|
||||
DuplicateField: true,
|
||||
UnrecognizedEvenField: true,
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("incomplete_field", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagNop.Bytes(),
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{},
|
||||
IncompleteField: true,
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("metadata_is_parsed_correctly", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagMetadata.Bytes(),
|
||||
{},
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Metadata: []byte{},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("metadata_is_parsed_correctly_from_chunks", func(t *testing.T) {
|
||||
testEnvelope(
|
||||
t,
|
||||
[][]byte{
|
||||
protocolId,
|
||||
TagMetadata.Bytes(),
|
||||
{0x00},
|
||||
TagMetadata.Bytes(),
|
||||
{0x01},
|
||||
},
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Metadata: []byte{0x00, 0x01},
|
||||
},
|
||||
DuplicateField: true,
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
t.Run("pushnum_opcodes_are_parsed_correctly", func(t *testing.T) {
|
||||
pushNumOpCodes := map[byte][]byte{
|
||||
txscript.OP_1NEGATE: {0x81},
|
||||
txscript.OP_1: {0x01},
|
||||
txscript.OP_2: {0x02},
|
||||
txscript.OP_3: {0x03},
|
||||
txscript.OP_4: {0x04},
|
||||
txscript.OP_5: {0x05},
|
||||
txscript.OP_6: {0x06},
|
||||
txscript.OP_7: {0x07},
|
||||
txscript.OP_8: {0x08},
|
||||
txscript.OP_9: {0x09},
|
||||
txscript.OP_10: {0x10},
|
||||
txscript.OP_11: {0x11},
|
||||
txscript.OP_12: {0x12},
|
||||
txscript.OP_13: {0x13},
|
||||
txscript.OP_14: {0x14},
|
||||
txscript.OP_15: {0x15},
|
||||
txscript.OP_16: {0x16},
|
||||
}
|
||||
for opCode, value := range pushNumOpCodes {
|
||||
script := utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddData(TagBody.Bytes()).
|
||||
AddOp(opCode).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
Script())
|
||||
|
||||
testParseWitness(
|
||||
t,
|
||||
script,
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{
|
||||
Content: value,
|
||||
},
|
||||
PushNum: true,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
})
|
||||
t.Run("stuttering", func(t *testing.T) {
|
||||
script := utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
Script())
|
||||
testParseWitness(
|
||||
t,
|
||||
script,
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{},
|
||||
Stutter: true,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
script = utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
Script())
|
||||
testParseWitness(
|
||||
t,
|
||||
script,
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{},
|
||||
Stutter: true,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
script = utils.Must(NewPushScriptBuilder().
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_AND).
|
||||
AddOp(txscript.OP_FALSE).
|
||||
AddOp(txscript.OP_IF).
|
||||
AddData(protocolId).
|
||||
AddOp(txscript.OP_ENDIF).
|
||||
Script())
|
||||
testParseWitness(
|
||||
t,
|
||||
script,
|
||||
[]*Envelope{
|
||||
{
|
||||
Inscription: Inscription{},
|
||||
Stutter: false,
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
}
|
||||
13
modules/brc20/internal/ordinals/height.go
Normal file
13
modules/brc20/internal/ordinals/height.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package ordinals
|
||||
|
||||
import "github.com/gaze-network/indexer-network/common"
|
||||
|
||||
func GetJubileeHeight(network common.Network) uint64 {
|
||||
switch network {
|
||||
case common.NetworkMainnet:
|
||||
return 824544
|
||||
case common.NetworkTestnet:
|
||||
return 2544192
|
||||
}
|
||||
panic("unsupported network")
|
||||
}
|
||||
27
modules/brc20/internal/ordinals/inscription.go
Normal file
27
modules/brc20/internal/ordinals/inscription.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package ordinals
|
||||
|
||||
import "time"
|
||||
|
||||
type Inscription struct {
|
||||
Content []byte
|
||||
ContentEncoding string
|
||||
ContentType string
|
||||
Delegate *InscriptionId
|
||||
Metadata []byte
|
||||
Metaprotocol string
|
||||
Parent *InscriptionId // in 0.14, inscription has only one parent
|
||||
Pointer *uint64
|
||||
}
|
||||
|
||||
// TODO: refactor ordinals.InscriptionEntry to entity.InscriptionEntry
|
||||
type InscriptionEntry struct {
|
||||
Id InscriptionId
|
||||
Number int64
|
||||
SequenceNumber uint64
|
||||
Cursed bool
|
||||
CursedForBRC20 bool
|
||||
CreatedAt time.Time
|
||||
CreatedAtHeight uint64
|
||||
Inscription Inscription
|
||||
TransferCount uint32
|
||||
}
|
||||
67
modules/brc20/internal/ordinals/inscription_id.go
Normal file
67
modules/brc20/internal/ordinals/inscription_id.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package ordinals
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/cockroachdb/errors"
|
||||
)
|
||||
|
||||
type InscriptionId struct {
|
||||
TxHash chainhash.Hash
|
||||
Index uint32
|
||||
}
|
||||
|
||||
func (i InscriptionId) String() string {
|
||||
return fmt.Sprintf("%si%d", i.TxHash.String(), i.Index)
|
||||
}
|
||||
|
||||
func NewInscriptionId(txHash chainhash.Hash, index uint32) InscriptionId {
|
||||
return InscriptionId{
|
||||
TxHash: txHash,
|
||||
Index: index,
|
||||
}
|
||||
}
|
||||
|
||||
var ErrInscriptionIdInvalidSeparator = fmt.Errorf("invalid inscription id: must contain exactly one separator")
|
||||
|
||||
func NewInscriptionIdFromString(s string) (InscriptionId, error) {
|
||||
parts := strings.SplitN(s, "i", 2)
|
||||
if len(parts) != 2 {
|
||||
return InscriptionId{}, errors.WithStack(ErrInscriptionIdInvalidSeparator)
|
||||
}
|
||||
txHash, err := chainhash.NewHashFromStr(parts[0])
|
||||
if err != nil {
|
||||
return InscriptionId{}, errors.Wrap(err, "invalid inscription id: cannot parse txHash")
|
||||
}
|
||||
index, err := strconv.ParseUint(parts[1], 10, 32)
|
||||
if err != nil {
|
||||
return InscriptionId{}, errors.Wrap(err, "invalid inscription id: cannot parse index")
|
||||
}
|
||||
return InscriptionId{
|
||||
TxHash: *txHash,
|
||||
Index: uint32(index),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler
|
||||
func (r InscriptionId) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + r.String() + `"`), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler
|
||||
func (r *InscriptionId) UnmarshalJSON(data []byte) error {
|
||||
// data must be quoted
|
||||
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
|
||||
return errors.New("must be string")
|
||||
}
|
||||
data = data[1 : len(data)-1]
|
||||
parsed, err := NewInscriptionIdFromString(string(data))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
*r = parsed
|
||||
return nil
|
||||
}
|
||||
109
modules/brc20/internal/ordinals/inscription_id_test.go
Normal file
109
modules/brc20/internal/ordinals/inscription_id_test.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package ordinals
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewInscriptionIdFromString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected InscriptionId
|
||||
shouldError bool
|
||||
}{
|
||||
{
|
||||
name: "valid inscription id 1",
|
||||
input: "1111111111111111111111111111111111111111111111111111111111111111i0",
|
||||
expected: InscriptionId{
|
||||
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
|
||||
Index: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid inscription id 2",
|
||||
input: "1111111111111111111111111111111111111111111111111111111111111111i1",
|
||||
expected: InscriptionId{
|
||||
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
|
||||
Index: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid inscription id 3",
|
||||
input: "1111111111111111111111111111111111111111111111111111111111111111i4294967295",
|
||||
expected: InscriptionId{
|
||||
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
|
||||
Index: 4294967295,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "error no separator",
|
||||
input: "abc",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "error invalid index",
|
||||
input: "xyzixyz",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "error invalid index",
|
||||
input: "abcixyz",
|
||||
shouldError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
actual, err := NewInscriptionIdFromString(tt.input)
|
||||
if tt.shouldError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInscriptionIdString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
expected string
|
||||
input InscriptionId
|
||||
}{
|
||||
{
|
||||
name: "valid inscription id 1",
|
||||
expected: "1111111111111111111111111111111111111111111111111111111111111111i0",
|
||||
input: InscriptionId{
|
||||
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
|
||||
Index: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid inscription id 2",
|
||||
expected: "1111111111111111111111111111111111111111111111111111111111111111i1",
|
||||
input: InscriptionId{
|
||||
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
|
||||
Index: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid inscription id 3",
|
||||
expected: "1111111111111111111111111111111111111111111111111111111111111111i4294967295",
|
||||
input: InscriptionId{
|
||||
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
|
||||
Index: 4294967295,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.expected, tt.input.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
68
modules/brc20/internal/ordinals/sat_point.go
Normal file
68
modules/brc20/internal/ordinals/sat_point.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package ordinals
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/cockroachdb/errors"
|
||||
)
|
||||
|
||||
type SatPoint struct {
|
||||
OutPoint wire.OutPoint
|
||||
Offset uint64
|
||||
}
|
||||
|
||||
func (s SatPoint) String() string {
|
||||
return fmt.Sprintf("%s:%d", s.OutPoint.String(), s.Offset)
|
||||
}
|
||||
|
||||
var ErrSatPointInvalidSeparator = fmt.Errorf("invalid sat point: must contain exactly two separators")
|
||||
|
||||
func NewSatPointFromString(s string) (SatPoint, error) {
|
||||
parts := strings.SplitN(s, ":", 3)
|
||||
if len(parts) != 3 {
|
||||
return SatPoint{}, errors.WithStack(ErrSatPointInvalidSeparator)
|
||||
}
|
||||
txHash, err := chainhash.NewHashFromStr(parts[0])
|
||||
if err != nil {
|
||||
return SatPoint{}, errors.Wrap(err, "invalid inscription id: cannot parse txHash")
|
||||
}
|
||||
index, err := strconv.ParseUint(parts[1], 10, 32)
|
||||
if err != nil {
|
||||
return SatPoint{}, errors.Wrap(err, "invalid inscription id: cannot parse index")
|
||||
}
|
||||
offset, err := strconv.ParseUint(parts[2], 10, 64)
|
||||
if err != nil {
|
||||
return SatPoint{}, errors.Wrap(err, "invalid sat point: cannot parse offset")
|
||||
}
|
||||
return SatPoint{
|
||||
OutPoint: wire.OutPoint{
|
||||
Hash: *txHash,
|
||||
Index: uint32(index),
|
||||
},
|
||||
Offset: offset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler
|
||||
func (r SatPoint) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + r.String() + `"`), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler
|
||||
func (r *SatPoint) UnmarshalJSON(data []byte) error {
|
||||
// data must be quoted
|
||||
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
|
||||
return errors.New("must be string")
|
||||
}
|
||||
data = data[1 : len(data)-1]
|
||||
parsed, err := NewSatPointFromString(string(data))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
*r = parsed
|
||||
return nil
|
||||
}
|
||||
89
modules/brc20/internal/ordinals/sat_point_test.go
Normal file
89
modules/brc20/internal/ordinals/sat_point_test.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package ordinals
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Cleverse/go-utilities/utils"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewSatPointFromString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected SatPoint
|
||||
shouldError bool
|
||||
}{
|
||||
{
|
||||
name: "valid sat point",
|
||||
input: "1111111111111111111111111111111111111111111111111111111111111111:1:2",
|
||||
expected: SatPoint{
|
||||
OutPoint: wire.OutPoint{
|
||||
Hash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
|
||||
Index: 1,
|
||||
},
|
||||
Offset: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "error no separator",
|
||||
input: "abc",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "error invalid output index",
|
||||
input: "abc:xyz",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "error no offset",
|
||||
input: "1111111111111111111111111111111111111111111111111111111111111111:1",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
name: "error invalid offset",
|
||||
input: "1111111111111111111111111111111111111111111111111111111111111111:1:foo",
|
||||
shouldError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
actual, err := NewSatPointFromString(tt.input)
|
||||
if tt.shouldError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSatPointString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input SatPoint
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "valid sat point",
|
||||
input: SatPoint{
|
||||
OutPoint: wire.OutPoint{
|
||||
Hash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
|
||||
Index: 1,
|
||||
},
|
||||
Offset: 2,
|
||||
},
|
||||
expected: "1111111111111111111111111111111111111111111111111111111111111111:1:2",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.expected, tt.input.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
170
modules/brc20/internal/ordinals/script_builder.go
Normal file
170
modules/brc20/internal/ordinals/script_builder.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package ordinals
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
)
|
||||
|
||||
// PushScriptBuilder is a helper to build scripts that requires data pushes to use OP_PUSHDATA* or OP_DATA_* opcodes only.
|
||||
// Empty data pushes are still encoded as OP_0.
|
||||
type PushScriptBuilder struct {
|
||||
script []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func NewPushScriptBuilder() *PushScriptBuilder {
|
||||
return &PushScriptBuilder{}
|
||||
}
|
||||
|
||||
// canonicalDataSize returns the number of bytes the canonical encoding of the
|
||||
// data will take.
|
||||
func canonicalDataSize(data []byte) int {
|
||||
dataLen := len(data)
|
||||
|
||||
// When the data consists of a single number that can be represented
|
||||
// by one of the "small integer" opcodes, that opcode will be instead
|
||||
// of a data push opcode followed by the number.
|
||||
if dataLen == 0 {
|
||||
return 1
|
||||
}
|
||||
|
||||
if dataLen < txscript.OP_PUSHDATA1 {
|
||||
return 1 + dataLen
|
||||
} else if dataLen <= 0xff {
|
||||
return 2 + dataLen
|
||||
} else if dataLen <= 0xffff {
|
||||
return 3 + dataLen
|
||||
}
|
||||
|
||||
return 5 + dataLen
|
||||
}
|
||||
|
||||
func pushDataToBytes(data []byte) []byte {
|
||||
if len(data) == 0 {
|
||||
return []byte{txscript.OP_0}
|
||||
}
|
||||
script := make([]byte, 0)
|
||||
dataLen := len(data)
|
||||
if dataLen < txscript.OP_PUSHDATA1 {
|
||||
script = append(script, byte(txscript.OP_DATA_1-1+dataLen))
|
||||
} else if dataLen <= 0xff {
|
||||
script = append(script, txscript.OP_PUSHDATA1, byte(dataLen))
|
||||
} else if dataLen <= 0xffff {
|
||||
buf := make([]byte, 2)
|
||||
binary.LittleEndian.PutUint16(buf, uint16(dataLen))
|
||||
script = append(script, txscript.OP_PUSHDATA2)
|
||||
script = append(script, buf...)
|
||||
} else {
|
||||
buf := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(buf, uint32(dataLen))
|
||||
script = append(script, txscript.OP_PUSHDATA4)
|
||||
script = append(script, buf...)
|
||||
}
|
||||
// Append the actual data.
|
||||
script = append(script, data...)
|
||||
return script
|
||||
}
|
||||
|
||||
// AddData pushes the passed data to the end of the script. It automatically
|
||||
// chooses canonical opcodes depending on the length of the data. A zero length
|
||||
// buffer will lead to a push of empty data onto the stack (OP_0) and any push
|
||||
// of data greater than MaxScriptElementSize will not modify the script since
|
||||
// that is not allowed by the script engine. Also, the script will not be
|
||||
// modified if pushing the data would cause the script to exceed the maximum
|
||||
// allowed script engine size.
|
||||
func (b *PushScriptBuilder) AddData(data []byte) *PushScriptBuilder {
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
// Pushes that would cause the script to exceed the largest allowed
|
||||
// script size would result in a non-canonical script.
|
||||
dataSize := canonicalDataSize(data)
|
||||
if len(b.script)+dataSize > txscript.MaxScriptSize {
|
||||
str := fmt.Sprintf("adding %d bytes of data would exceed the "+
|
||||
"maximum allowed canonical script length of %d",
|
||||
dataSize, txscript.MaxScriptSize)
|
||||
b.err = txscript.ErrScriptNotCanonical(str)
|
||||
return b
|
||||
}
|
||||
|
||||
// Pushes larger than the max script element size would result in a
|
||||
// script that is not canonical.
|
||||
dataLen := len(data)
|
||||
if dataLen > txscript.MaxScriptElementSize {
|
||||
str := fmt.Sprintf("adding a data element of %d bytes would "+
|
||||
"exceed the maximum allowed script element size of %d",
|
||||
dataLen, txscript.MaxScriptElementSize)
|
||||
b.err = txscript.ErrScriptNotCanonical(str)
|
||||
return b
|
||||
}
|
||||
|
||||
b.script = append(b.script, pushDataToBytes(data)...)
|
||||
return b
|
||||
}
|
||||
|
||||
// AddFullData should not typically be used by ordinary users as it does not
|
||||
// include the checks which prevent data pushes larger than the maximum allowed
|
||||
// sizes which leads to scripts that can't be executed. This is provided for
|
||||
// testing purposes such as regression tests where sizes are intentionally made
|
||||
// larger than allowed.
|
||||
//
|
||||
// Use AddData instead.
|
||||
func (b *PushScriptBuilder) AddFullData(data []byte) *PushScriptBuilder {
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
b.script = append(b.script, pushDataToBytes(data)...)
|
||||
return b
|
||||
}
|
||||
|
||||
// AddOp pushes the passed opcode to the end of the script. The script will not
|
||||
// be modified if pushing the opcode would cause the script to exceed the
|
||||
// maximum allowed script engine size.
|
||||
func (b *PushScriptBuilder) AddOp(opcode byte) *PushScriptBuilder {
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
// Pushes that would cause the script to exceed the largest allowed
|
||||
// script size would result in a non-canonical script.
|
||||
if len(b.script)+1 > txscript.MaxScriptSize {
|
||||
str := fmt.Sprintf("adding an opcode would exceed the maximum "+
|
||||
"allowed canonical script length of %d", txscript.MaxScriptSize)
|
||||
b.err = txscript.ErrScriptNotCanonical(str)
|
||||
return b
|
||||
}
|
||||
|
||||
b.script = append(b.script, opcode)
|
||||
return b
|
||||
}
|
||||
|
||||
// AddOps pushes the passed opcodes to the end of the script. The script will
|
||||
// not be modified if pushing the opcodes would cause the script to exceed the
|
||||
// maximum allowed script engine size.
|
||||
func (b *PushScriptBuilder) AddOps(opcodes []byte) *PushScriptBuilder {
|
||||
if b.err != nil {
|
||||
return b
|
||||
}
|
||||
|
||||
// Pushes that would cause the script to exceed the largest allowed
|
||||
// script size would result in a non-canonical script.
|
||||
if len(b.script)+len(opcodes) > txscript.MaxScriptSize {
|
||||
str := fmt.Sprintf("adding opcodes would exceed the maximum "+
|
||||
"allowed canonical script length of %d", txscript.MaxScriptSize)
|
||||
b.err = txscript.ErrScriptNotCanonical(str)
|
||||
return b
|
||||
}
|
||||
|
||||
b.script = append(b.script, opcodes...)
|
||||
return b
|
||||
}
|
||||
|
||||
// Script returns the currently built script. When any errors occurred while
|
||||
// building the script, the script will be returned up the point of the first
|
||||
// error along with the error.
|
||||
func (b *PushScriptBuilder) Script() ([]byte, error) {
|
||||
return b.script, b.err
|
||||
}
|
||||
81
modules/brc20/internal/ordinals/tag.go
Normal file
81
modules/brc20/internal/ordinals/tag.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package ordinals
|
||||
|
||||
// Tags represent data fields in a runestone. Unrecognized odd tags are ignored. Unrecognized even tags produce a cenotaph.
|
||||
type Tag uint8
|
||||
|
||||
var (
|
||||
TagBody = Tag(0)
|
||||
TagPointer = Tag(2)
|
||||
// TagUnbound is unrecognized
|
||||
TagUnbound = Tag(66)
|
||||
|
||||
TagContentType = Tag(1)
|
||||
TagParent = Tag(3)
|
||||
TagMetadata = Tag(5)
|
||||
TagMetaprotocol = Tag(7)
|
||||
TagContentEncoding = Tag(9)
|
||||
TagDelegate = Tag(11)
|
||||
// TagNop is unrecognized
|
||||
TagNop = Tag(255)
|
||||
)
|
||||
|
||||
var allTags = map[Tag]struct{}{
|
||||
TagPointer: {},
|
||||
|
||||
TagContentType: {},
|
||||
TagParent: {},
|
||||
TagMetadata: {},
|
||||
TagMetaprotocol: {},
|
||||
TagContentEncoding: {},
|
||||
TagDelegate: {},
|
||||
}
|
||||
|
||||
func (t Tag) IsValid() bool {
|
||||
_, ok := allTags[t]
|
||||
return ok
|
||||
}
|
||||
|
||||
var chunkedTags = map[Tag]struct{}{
|
||||
TagMetadata: {},
|
||||
}
|
||||
|
||||
func (t Tag) IsChunked() bool {
|
||||
_, ok := chunkedTags[t]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (t Tag) Bytes() []byte {
|
||||
if t == TagBody {
|
||||
return []byte{} // body tag is empty data push
|
||||
}
|
||||
return []byte{byte(t)}
|
||||
}
|
||||
|
||||
func ParseTag(input interface{}) (Tag, error) {
|
||||
switch input := input.(type) {
|
||||
case Tag:
|
||||
return input, nil
|
||||
case int:
|
||||
return Tag(input), nil
|
||||
case int8:
|
||||
return Tag(input), nil
|
||||
case int16:
|
||||
return Tag(input), nil
|
||||
case int32:
|
||||
return Tag(input), nil
|
||||
case int64:
|
||||
return Tag(input), nil
|
||||
case uint:
|
||||
return Tag(input), nil
|
||||
case uint8:
|
||||
return Tag(input), nil
|
||||
case uint16:
|
||||
return Tag(input), nil
|
||||
case uint32:
|
||||
return Tag(input), nil
|
||||
case uint64:
|
||||
return Tag(input), nil
|
||||
default:
|
||||
panic("invalid tag input type")
|
||||
}
|
||||
}
|
||||
542
modules/brc20/internal/repository/postgres/brc20.go
Normal file
542
modules/brc20/internal/repository/postgres/brc20.go
Normal file
@@ -0,0 +1,542 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/repository/postgres/gen"
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
var _ datagateway.BRC20DataGateway = (*Repository)(nil)
|
||||
|
||||
// warning: GetLatestBlock currently returns a types.BlockHeader with only Height and Hash fields populated.
|
||||
// This is because it is known that all usage of this function only requires these fields. In the future, we may want to populate all fields for type safety.
|
||||
func (r *Repository) GetLatestBlock(ctx context.Context) (types.BlockHeader, error) {
|
||||
block, err := r.queries.GetLatestIndexedBlock(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, pgx.ErrNoRows) {
|
||||
return types.BlockHeader{}, errors.WithStack(errs.NotFound)
|
||||
}
|
||||
return types.BlockHeader{}, errors.Wrap(err, "error during query")
|
||||
}
|
||||
hash, err := chainhash.NewHashFromStr(block.Hash)
|
||||
if err != nil {
|
||||
return types.BlockHeader{}, errors.Wrap(err, "failed to parse block hash")
|
||||
}
|
||||
return types.BlockHeader{
|
||||
Height: int64(block.Height),
|
||||
Hash: *hash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetIndexedBlockByHeight implements datagateway.BRC20DataGateway.
|
||||
func (r *Repository) GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error) {
|
||||
model, err := r.queries.GetIndexedBlockByHeight(ctx, int32(height))
|
||||
if err != nil {
|
||||
if errors.Is(err, pgx.ErrNoRows) {
|
||||
return nil, errors.WithStack(errs.NotFound)
|
||||
}
|
||||
return nil, errors.Wrap(err, "error during query")
|
||||
}
|
||||
|
||||
indexedBlock, err := mapIndexedBlockModelToType(model)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse indexed block model")
|
||||
}
|
||||
return &indexedBlock, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetProcessorStats(ctx context.Context) (*entity.ProcessorStats, error) {
|
||||
model, err := r.queries.GetLatestProcessorStats(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, pgx.ErrNoRows) {
|
||||
return nil, errors.WithStack(errs.NotFound)
|
||||
}
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
stats := mapProcessorStatsModelToType(model)
|
||||
return &stats, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetInscriptionTransfersInOutPoints(ctx context.Context, outPoints []wire.OutPoint) (map[ordinals.SatPoint][]*entity.InscriptionTransfer, error) {
|
||||
txHashArr := lo.Map(outPoints, func(outPoint wire.OutPoint, _ int) string {
|
||||
return outPoint.Hash.String()
|
||||
})
|
||||
txOutIdxArr := lo.Map(outPoints, func(outPoint wire.OutPoint, _ int) int32 {
|
||||
return int32(outPoint.Index)
|
||||
})
|
||||
models, err := r.queries.GetInscriptionTransfersInOutPoints(ctx, gen.GetInscriptionTransfersInOutPointsParams{
|
||||
TxHashArr: txHashArr,
|
||||
TxOutIdxArr: txOutIdxArr,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
results := make(map[ordinals.SatPoint][]*entity.InscriptionTransfer)
|
||||
for _, model := range models {
|
||||
inscriptionTransfer, err := mapInscriptionTransferModelToType(model)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
results[inscriptionTransfer.NewSatPoint] = append(results[inscriptionTransfer.NewSatPoint], &inscriptionTransfer)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetInscriptionEntriesByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*ordinals.InscriptionEntry, error) {
|
||||
idStrs := lo.Map(ids, func(id ordinals.InscriptionId, _ int) string { return id.String() })
|
||||
models, err := r.queries.GetInscriptionEntriesByIds(ctx, idStrs)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
result := make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
|
||||
for _, model := range models {
|
||||
inscriptionEntry, err := mapInscriptionEntryModelToType(model)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse inscription entry model")
|
||||
}
|
||||
result[inscriptionEntry.Id] = &inscriptionEntry
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetInscriptionNumbersByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]int64, error) {
|
||||
idStrs := lo.Map(ids, func(id ordinals.InscriptionId, _ int) string { return id.String() })
|
||||
models, err := r.queries.GetInscriptionNumbersByIds(ctx, idStrs)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
result := make(map[ordinals.InscriptionId]int64)
|
||||
for _, model := range models {
|
||||
inscriptionId, err := ordinals.NewInscriptionIdFromString(model.Id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse inscription id")
|
||||
}
|
||||
result[inscriptionId] = model.Number
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetInscriptionParentsByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]ordinals.InscriptionId, error) {
|
||||
idStrs := lo.Map(ids, func(id ordinals.InscriptionId, _ int) string { return id.String() })
|
||||
models, err := r.queries.GetInscriptionParentsByIds(ctx, idStrs)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
result := make(map[ordinals.InscriptionId]ordinals.InscriptionId)
|
||||
for _, model := range models {
|
||||
if len(model.Parents) == 0 {
|
||||
// no parent
|
||||
continue
|
||||
}
|
||||
if len(model.Parents) > 1 {
|
||||
// sanity check, should not happen since 0.14 ord supports only 1 parent
|
||||
continue
|
||||
}
|
||||
inscriptionId, err := ordinals.NewInscriptionIdFromString(model.Id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse inscription id")
|
||||
}
|
||||
parentId, err := ordinals.NewInscriptionIdFromString(model.Parents[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse parent id")
|
||||
}
|
||||
result[inscriptionId] = parentId
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetLatestEventId(ctx context.Context) (int64, error) {
|
||||
row, err := r.queries.GetLatestEventIds(ctx)
|
||||
if err != nil {
|
||||
return 0, errors.WithStack(err)
|
||||
}
|
||||
return max(row.EventDeployID.(int64), row.EventMintID.(int64), row.EventInscribeTransferID.(int64), row.EventTransferTransferID.(int64)), nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetBalancesBatchAtHeight(ctx context.Context, blockHeight uint64, queries []datagateway.GetBalancesBatchAtHeightQuery) (map[string]map[string]*entity.Balance, error) {
|
||||
pkScripts := make([]string, 0)
|
||||
ticks := make([]string, 0)
|
||||
for _, query := range queries {
|
||||
pkScripts = append(pkScripts, query.PkScriptHex)
|
||||
ticks = append(ticks, query.Tick)
|
||||
}
|
||||
models, err := r.queries.GetBalancesBatchAtHeight(ctx, gen.GetBalancesBatchAtHeightParams{
|
||||
PkscriptArr: pkScripts,
|
||||
TickArr: ticks,
|
||||
BlockHeight: int32(blockHeight),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
result := make(map[string]map[string]*entity.Balance)
|
||||
for _, model := range models {
|
||||
balance, err := mapBalanceModelToType(model)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse balance model")
|
||||
}
|
||||
if _, ok := result[model.Pkscript]; !ok {
|
||||
result[model.Pkscript] = make(map[string]*entity.Balance)
|
||||
}
|
||||
result[model.Pkscript][model.Tick] = &balance
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetEventInscribeTransfersByInscriptionIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*entity.EventInscribeTransfer, error) {
|
||||
idStrs := lo.Map(ids, func(id ordinals.InscriptionId, _ int) string { return id.String() })
|
||||
models, err := r.queries.GetEventInscribeTransfersByInscriptionIds(ctx, idStrs)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
result := make(map[ordinals.InscriptionId]*entity.EventInscribeTransfer)
|
||||
for _, model := range models {
|
||||
event, err := mapEventInscribeTransferModelToType(model)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse event inscribe transfer model")
|
||||
}
|
||||
result[event.InscriptionId] = &event
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *Repository) GetTickEntriesByTicks(ctx context.Context, ticks []string) (map[string]*entity.TickEntry, error) {
|
||||
models, err := r.queries.GetTickEntriesByTicks(ctx, ticks)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
result := make(map[string]*entity.TickEntry)
|
||||
for _, model := range models {
|
||||
tickEntry, err := mapTickEntryModelToType(model)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse tick entry model")
|
||||
}
|
||||
result[tickEntry.Tick] = &tickEntry
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error {
|
||||
params := mapIndexedBlockTypeToParams(*block)
|
||||
if err := r.queries.CreateIndexedBlock(ctx, params); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateProcessorStats(ctx context.Context, stats *entity.ProcessorStats) error {
|
||||
params := mapProcessorStatsTypeToParams(*stats)
|
||||
if err := r.queries.CreateProcessorStats(ctx, params); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateTickEntries(ctx context.Context, blockHeight uint64, entries []*entity.TickEntry) error {
|
||||
entryParams := make([]gen.CreateTickEntriesParams, 0)
|
||||
for _, entry := range entries {
|
||||
params, _, err := mapTickEntryTypeToParams(*entry, blockHeight)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot map tick entry to create params")
|
||||
}
|
||||
entryParams = append(entryParams, params)
|
||||
}
|
||||
results := r.queries.CreateTickEntries(ctx, entryParams)
|
||||
var execErrors []error
|
||||
results.Exec(func(i int, err error) {
|
||||
if err != nil {
|
||||
execErrors = append(execErrors, err)
|
||||
}
|
||||
})
|
||||
if len(execErrors) > 0 {
|
||||
return errors.Wrap(errors.Join(execErrors...), "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateTickEntryStates(ctx context.Context, blockHeight uint64, entryStates []*entity.TickEntry) error {
|
||||
entryParams := make([]gen.CreateTickEntryStatesParams, 0)
|
||||
for _, entry := range entryStates {
|
||||
_, params, err := mapTickEntryTypeToParams(*entry, blockHeight)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot map tick entry to create params")
|
||||
}
|
||||
entryParams = append(entryParams, params)
|
||||
}
|
||||
results := r.queries.CreateTickEntryStates(ctx, entryParams)
|
||||
var execErrors []error
|
||||
results.Exec(func(i int, err error) {
|
||||
if err != nil {
|
||||
execErrors = append(execErrors, err)
|
||||
}
|
||||
})
|
||||
if len(execErrors) > 0 {
|
||||
return errors.Wrap(errors.Join(execErrors...), "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateInscriptionEntries(ctx context.Context, blockHeight uint64, entries []*ordinals.InscriptionEntry) error {
|
||||
inscriptionEntryParams := make([]gen.CreateInscriptionEntriesParams, 0)
|
||||
for _, entry := range entries {
|
||||
params, _, err := mapInscriptionEntryTypeToParams(*entry, blockHeight)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot map inscription entry to create params")
|
||||
}
|
||||
inscriptionEntryParams = append(inscriptionEntryParams, params)
|
||||
}
|
||||
results := r.queries.CreateInscriptionEntries(ctx, inscriptionEntryParams)
|
||||
var execErrors []error
|
||||
results.Exec(func(i int, err error) {
|
||||
if err != nil {
|
||||
execErrors = append(execErrors, err)
|
||||
}
|
||||
})
|
||||
if len(execErrors) > 0 {
|
||||
return errors.Wrap(errors.Join(execErrors...), "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateInscriptionEntryStates(ctx context.Context, blockHeight uint64, entryStates []*ordinals.InscriptionEntry) error {
|
||||
inscriptionEntryStatesParams := make([]gen.CreateInscriptionEntryStatesParams, 0)
|
||||
for _, entry := range entryStates {
|
||||
_, params, err := mapInscriptionEntryTypeToParams(*entry, blockHeight)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot map inscription entry to create params")
|
||||
}
|
||||
inscriptionEntryStatesParams = append(inscriptionEntryStatesParams, params)
|
||||
}
|
||||
results := r.queries.CreateInscriptionEntryStates(ctx, inscriptionEntryStatesParams)
|
||||
var execErrors []error
|
||||
results.Exec(func(i int, err error) {
|
||||
if err != nil {
|
||||
execErrors = append(execErrors, err)
|
||||
}
|
||||
})
|
||||
if len(execErrors) > 0 {
|
||||
return errors.Wrap(errors.Join(execErrors...), "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateInscriptionTransfers(ctx context.Context, transfers []*entity.InscriptionTransfer) error {
|
||||
params := lo.Map(transfers, func(transfer *entity.InscriptionTransfer, _ int) gen.CreateInscriptionTransfersParams {
|
||||
return mapInscriptionTransferTypeToParams(*transfer)
|
||||
})
|
||||
results := r.queries.CreateInscriptionTransfers(ctx, params)
|
||||
var execErrors []error
|
||||
results.Exec(func(i int, err error) {
|
||||
if err != nil {
|
||||
execErrors = append(execErrors, err)
|
||||
}
|
||||
})
|
||||
if len(execErrors) > 0 {
|
||||
return errors.Wrap(errors.Join(execErrors...), "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateEventDeploys(ctx context.Context, events []*entity.EventDeploy) error {
|
||||
params := make([]gen.CreateEventDeploysParams, 0)
|
||||
for _, event := range events {
|
||||
param, err := mapEventDeployTypeToParams(*event)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot map event deploy to create params")
|
||||
}
|
||||
params = append(params, param)
|
||||
}
|
||||
results := r.queries.CreateEventDeploys(ctx, params)
|
||||
var execErrors []error
|
||||
results.Exec(func(i int, err error) {
|
||||
if err != nil {
|
||||
execErrors = append(execErrors, err)
|
||||
}
|
||||
})
|
||||
if len(execErrors) > 0 {
|
||||
return errors.Wrap(errors.Join(execErrors...), "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateEventMints(ctx context.Context, events []*entity.EventMint) error {
|
||||
params := make([]gen.CreateEventMintsParams, 0)
|
||||
for _, event := range events {
|
||||
param, err := mapEventMintTypeToParams(*event)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot map event mint to create params")
|
||||
}
|
||||
params = append(params, param)
|
||||
}
|
||||
results := r.queries.CreateEventMints(ctx, params)
|
||||
var execErrors []error
|
||||
results.Exec(func(i int, err error) {
|
||||
if err != nil {
|
||||
execErrors = append(execErrors, err)
|
||||
}
|
||||
})
|
||||
if len(execErrors) > 0 {
|
||||
return errors.Wrap(errors.Join(execErrors...), "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateEventInscribeTransfers(ctx context.Context, events []*entity.EventInscribeTransfer) error {
|
||||
params := make([]gen.CreateEventInscribeTransfersParams, 0)
|
||||
for _, event := range events {
|
||||
param, err := mapEventInscribeTransferTypeToParams(*event)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot map event transfer to create params")
|
||||
}
|
||||
params = append(params, param)
|
||||
}
|
||||
results := r.queries.CreateEventInscribeTransfers(ctx, params)
|
||||
var execErrors []error
|
||||
results.Exec(func(i int, err error) {
|
||||
if err != nil {
|
||||
execErrors = append(execErrors, err)
|
||||
}
|
||||
})
|
||||
if len(execErrors) > 0 {
|
||||
return errors.Wrap(errors.Join(execErrors...), "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateEventTransferTransfers(ctx context.Context, events []*entity.EventTransferTransfer) error {
|
||||
params := make([]gen.CreateEventTransferTransfersParams, 0)
|
||||
for _, event := range events {
|
||||
param, err := mapEventTransferTransferTypeToParams(*event)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot map event transfer to create params")
|
||||
}
|
||||
params = append(params, param)
|
||||
}
|
||||
results := r.queries.CreateEventTransferTransfers(ctx, params)
|
||||
var execErrors []error
|
||||
results.Exec(func(i int, err error) {
|
||||
if err != nil {
|
||||
execErrors = append(execErrors, err)
|
||||
}
|
||||
})
|
||||
if len(execErrors) > 0 {
|
||||
return errors.Wrap(errors.Join(execErrors...), "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateBalances(ctx context.Context, balances []*entity.Balance) error {
|
||||
params := lo.Map(balances, func(balance *entity.Balance, _ int) gen.CreateBalancesParams {
|
||||
return mapBalanceTypeToParams(*balance)
|
||||
})
|
||||
results := r.queries.CreateBalances(ctx, params)
|
||||
var execErrors []error
|
||||
results.Exec(func(i int, err error) {
|
||||
if err != nil {
|
||||
execErrors = append(execErrors, err)
|
||||
}
|
||||
})
|
||||
if len(execErrors) > 0 {
|
||||
return errors.Wrap(errors.Join(execErrors...), "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) DeleteIndexedBlocksSinceHeight(ctx context.Context, height uint64) error {
|
||||
if err := r.queries.DeleteIndexedBlocksSinceHeight(ctx, int32(height)); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) DeleteProcessorStatsSinceHeight(ctx context.Context, height uint64) error {
|
||||
if err := r.queries.DeleteProcessorStatsSinceHeight(ctx, int32(height)); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) DeleteTickEntriesSinceHeight(ctx context.Context, height uint64) error {
|
||||
if err := r.queries.DeleteTickEntriesSinceHeight(ctx, int32(height)); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) DeleteTickEntryStatesSinceHeight(ctx context.Context, height uint64) error {
|
||||
if err := r.queries.DeleteTickEntryStatesSinceHeight(ctx, int32(height)); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) DeleteEventDeploysSinceHeight(ctx context.Context, height uint64) error {
|
||||
if err := r.queries.DeleteEventDeploysSinceHeight(ctx, int32(height)); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) DeleteEventMintsSinceHeight(ctx context.Context, height uint64) error {
|
||||
if err := r.queries.DeleteEventMintsSinceHeight(ctx, int32(height)); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) DeleteEventInscribeTransfersSinceHeight(ctx context.Context, height uint64) error {
|
||||
if err := r.queries.DeleteEventInscribeTransfersSinceHeight(ctx, int32(height)); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) DeleteEventTransferTransfersSinceHeight(ctx context.Context, height uint64) error {
|
||||
if err := r.queries.DeleteEventTransferTransfersSinceHeight(ctx, int32(height)); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) DeleteBalancesSinceHeight(ctx context.Context, height uint64) error {
|
||||
if err := r.queries.DeleteBalancesSinceHeight(ctx, int32(height)); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) DeleteInscriptionEntriesSinceHeight(ctx context.Context, height uint64) error {
|
||||
if err := r.queries.DeleteInscriptionEntriesSinceHeight(ctx, int32(height)); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) DeleteInscriptionEntryStatesSinceHeight(ctx context.Context, height uint64) error {
|
||||
if err := r.queries.DeleteInscriptionEntryStatesSinceHeight(ctx, int32(height)); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) DeleteInscriptionTransfersSinceHeight(ctx context.Context, height uint64) error {
|
||||
if err := r.queries.DeleteInscriptionTransfersSinceHeight(ctx, int32(height)); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
684
modules/brc20/internal/repository/postgres/gen/batch.go
Normal file
684
modules/brc20/internal/repository/postgres/gen/batch.go
Normal file
@@ -0,0 +1,684 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
// source: batch.go
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrBatchAlreadyClosed = errors.New("batch already closed")
|
||||
)
|
||||
|
||||
const createBalances = `-- name: CreateBalances :batchexec
|
||||
INSERT INTO "brc20_balances" ("pkscript", "block_height", "tick", "overall_balance", "available_balance") VALUES ($1, $2, $3, $4, $5)
|
||||
`
|
||||
|
||||
type CreateBalancesBatchResults struct {
|
||||
br pgx.BatchResults
|
||||
tot int
|
||||
closed bool
|
||||
}
|
||||
|
||||
type CreateBalancesParams struct {
|
||||
Pkscript string
|
||||
BlockHeight int32
|
||||
Tick string
|
||||
OverallBalance pgtype.Numeric
|
||||
AvailableBalance pgtype.Numeric
|
||||
}
|
||||
|
||||
func (q *Queries) CreateBalances(ctx context.Context, arg []CreateBalancesParams) *CreateBalancesBatchResults {
|
||||
batch := &pgx.Batch{}
|
||||
for _, a := range arg {
|
||||
vals := []interface{}{
|
||||
a.Pkscript,
|
||||
a.BlockHeight,
|
||||
a.Tick,
|
||||
a.OverallBalance,
|
||||
a.AvailableBalance,
|
||||
}
|
||||
batch.Queue(createBalances, vals...)
|
||||
}
|
||||
br := q.db.SendBatch(ctx, batch)
|
||||
return &CreateBalancesBatchResults{br, len(arg), false}
|
||||
}
|
||||
|
||||
func (b *CreateBalancesBatchResults) Exec(f func(int, error)) {
|
||||
defer b.br.Close()
|
||||
for t := 0; t < b.tot; t++ {
|
||||
if b.closed {
|
||||
if f != nil {
|
||||
f(t, ErrBatchAlreadyClosed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, err := b.br.Exec()
|
||||
if f != nil {
|
||||
f(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *CreateBalancesBatchResults) Close() error {
|
||||
b.closed = true
|
||||
return b.br.Close()
|
||||
}
|
||||
|
||||
const createEventDeploys = `-- name: CreateEventDeploys :batchexec
|
||||
INSERT INTO "brc20_event_deploys" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "total_supply", "decimals", "limit_per_mint", "is_self_mint") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
|
||||
`
|
||||
|
||||
type CreateEventDeploysBatchResults struct {
|
||||
br pgx.BatchResults
|
||||
tot int
|
||||
closed bool
|
||||
}
|
||||
|
||||
type CreateEventDeploysParams struct {
|
||||
InscriptionID string
|
||||
InscriptionNumber int64
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TxHash string
|
||||
BlockHeight int32
|
||||
TxIndex int32
|
||||
Timestamp pgtype.Timestamp
|
||||
Pkscript string
|
||||
Satpoint string
|
||||
TotalSupply pgtype.Numeric
|
||||
Decimals int16
|
||||
LimitPerMint pgtype.Numeric
|
||||
IsSelfMint bool
|
||||
}
|
||||
|
||||
func (q *Queries) CreateEventDeploys(ctx context.Context, arg []CreateEventDeploysParams) *CreateEventDeploysBatchResults {
|
||||
batch := &pgx.Batch{}
|
||||
for _, a := range arg {
|
||||
vals := []interface{}{
|
||||
a.InscriptionID,
|
||||
a.InscriptionNumber,
|
||||
a.Tick,
|
||||
a.OriginalTick,
|
||||
a.TxHash,
|
||||
a.BlockHeight,
|
||||
a.TxIndex,
|
||||
a.Timestamp,
|
||||
a.Pkscript,
|
||||
a.Satpoint,
|
||||
a.TotalSupply,
|
||||
a.Decimals,
|
||||
a.LimitPerMint,
|
||||
a.IsSelfMint,
|
||||
}
|
||||
batch.Queue(createEventDeploys, vals...)
|
||||
}
|
||||
br := q.db.SendBatch(ctx, batch)
|
||||
return &CreateEventDeploysBatchResults{br, len(arg), false}
|
||||
}
|
||||
|
||||
func (b *CreateEventDeploysBatchResults) Exec(f func(int, error)) {
|
||||
defer b.br.Close()
|
||||
for t := 0; t < b.tot; t++ {
|
||||
if b.closed {
|
||||
if f != nil {
|
||||
f(t, ErrBatchAlreadyClosed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, err := b.br.Exec()
|
||||
if f != nil {
|
||||
f(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *CreateEventDeploysBatchResults) Close() error {
|
||||
b.closed = true
|
||||
return b.br.Close()
|
||||
}
|
||||
|
||||
const createEventInscribeTransfers = `-- name: CreateEventInscribeTransfers :batchexec
|
||||
INSERT INTO "brc20_event_inscribe_transfers" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "output_index", "sats_amount", "amount") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
|
||||
`
|
||||
|
||||
type CreateEventInscribeTransfersBatchResults struct {
|
||||
br pgx.BatchResults
|
||||
tot int
|
||||
closed bool
|
||||
}
|
||||
|
||||
type CreateEventInscribeTransfersParams struct {
|
||||
InscriptionID string
|
||||
InscriptionNumber int64
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TxHash string
|
||||
BlockHeight int32
|
||||
TxIndex int32
|
||||
Timestamp pgtype.Timestamp
|
||||
Pkscript string
|
||||
Satpoint string
|
||||
OutputIndex int32
|
||||
SatsAmount int64
|
||||
Amount pgtype.Numeric
|
||||
}
|
||||
|
||||
func (q *Queries) CreateEventInscribeTransfers(ctx context.Context, arg []CreateEventInscribeTransfersParams) *CreateEventInscribeTransfersBatchResults {
|
||||
batch := &pgx.Batch{}
|
||||
for _, a := range arg {
|
||||
vals := []interface{}{
|
||||
a.InscriptionID,
|
||||
a.InscriptionNumber,
|
||||
a.Tick,
|
||||
a.OriginalTick,
|
||||
a.TxHash,
|
||||
a.BlockHeight,
|
||||
a.TxIndex,
|
||||
a.Timestamp,
|
||||
a.Pkscript,
|
||||
a.Satpoint,
|
||||
a.OutputIndex,
|
||||
a.SatsAmount,
|
||||
a.Amount,
|
||||
}
|
||||
batch.Queue(createEventInscribeTransfers, vals...)
|
||||
}
|
||||
br := q.db.SendBatch(ctx, batch)
|
||||
return &CreateEventInscribeTransfersBatchResults{br, len(arg), false}
|
||||
}
|
||||
|
||||
func (b *CreateEventInscribeTransfersBatchResults) Exec(f func(int, error)) {
|
||||
defer b.br.Close()
|
||||
for t := 0; t < b.tot; t++ {
|
||||
if b.closed {
|
||||
if f != nil {
|
||||
f(t, ErrBatchAlreadyClosed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, err := b.br.Exec()
|
||||
if f != nil {
|
||||
f(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *CreateEventInscribeTransfersBatchResults) Close() error {
|
||||
b.closed = true
|
||||
return b.br.Close()
|
||||
}
|
||||
|
||||
const createEventMints = `-- name: CreateEventMints :batchexec
|
||||
INSERT INTO "brc20_event_mints" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "amount", "parent_id") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
|
||||
`
|
||||
|
||||
type CreateEventMintsBatchResults struct {
|
||||
br pgx.BatchResults
|
||||
tot int
|
||||
closed bool
|
||||
}
|
||||
|
||||
type CreateEventMintsParams struct {
|
||||
InscriptionID string
|
||||
InscriptionNumber int64
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TxHash string
|
||||
BlockHeight int32
|
||||
TxIndex int32
|
||||
Timestamp pgtype.Timestamp
|
||||
Pkscript string
|
||||
Satpoint string
|
||||
Amount pgtype.Numeric
|
||||
ParentID pgtype.Text
|
||||
}
|
||||
|
||||
func (q *Queries) CreateEventMints(ctx context.Context, arg []CreateEventMintsParams) *CreateEventMintsBatchResults {
|
||||
batch := &pgx.Batch{}
|
||||
for _, a := range arg {
|
||||
vals := []interface{}{
|
||||
a.InscriptionID,
|
||||
a.InscriptionNumber,
|
||||
a.Tick,
|
||||
a.OriginalTick,
|
||||
a.TxHash,
|
||||
a.BlockHeight,
|
||||
a.TxIndex,
|
||||
a.Timestamp,
|
||||
a.Pkscript,
|
||||
a.Satpoint,
|
||||
a.Amount,
|
||||
a.ParentID,
|
||||
}
|
||||
batch.Queue(createEventMints, vals...)
|
||||
}
|
||||
br := q.db.SendBatch(ctx, batch)
|
||||
return &CreateEventMintsBatchResults{br, len(arg), false}
|
||||
}
|
||||
|
||||
func (b *CreateEventMintsBatchResults) Exec(f func(int, error)) {
|
||||
defer b.br.Close()
|
||||
for t := 0; t < b.tot; t++ {
|
||||
if b.closed {
|
||||
if f != nil {
|
||||
f(t, ErrBatchAlreadyClosed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, err := b.br.Exec()
|
||||
if f != nil {
|
||||
f(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *CreateEventMintsBatchResults) Close() error {
|
||||
b.closed = true
|
||||
return b.br.Close()
|
||||
}
|
||||
|
||||
const createEventTransferTransfers = `-- name: CreateEventTransferTransfers :batchexec
|
||||
INSERT INTO "brc20_event_transfer_transfers" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "from_pkscript", "from_satpoint", "from_input_index", "to_pkscript", "to_satpoint", "to_output_index", "spent_as_fee", "amount") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
|
||||
`
|
||||
|
||||
type CreateEventTransferTransfersBatchResults struct {
|
||||
br pgx.BatchResults
|
||||
tot int
|
||||
closed bool
|
||||
}
|
||||
|
||||
type CreateEventTransferTransfersParams struct {
|
||||
InscriptionID string
|
||||
InscriptionNumber int64
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TxHash string
|
||||
BlockHeight int32
|
||||
TxIndex int32
|
||||
Timestamp pgtype.Timestamp
|
||||
FromPkscript string
|
||||
FromSatpoint string
|
||||
FromInputIndex int32
|
||||
ToPkscript string
|
||||
ToSatpoint string
|
||||
ToOutputIndex int32
|
||||
SpentAsFee bool
|
||||
Amount pgtype.Numeric
|
||||
}
|
||||
|
||||
func (q *Queries) CreateEventTransferTransfers(ctx context.Context, arg []CreateEventTransferTransfersParams) *CreateEventTransferTransfersBatchResults {
|
||||
batch := &pgx.Batch{}
|
||||
for _, a := range arg {
|
||||
vals := []interface{}{
|
||||
a.InscriptionID,
|
||||
a.InscriptionNumber,
|
||||
a.Tick,
|
||||
a.OriginalTick,
|
||||
a.TxHash,
|
||||
a.BlockHeight,
|
||||
a.TxIndex,
|
||||
a.Timestamp,
|
||||
a.FromPkscript,
|
||||
a.FromSatpoint,
|
||||
a.FromInputIndex,
|
||||
a.ToPkscript,
|
||||
a.ToSatpoint,
|
||||
a.ToOutputIndex,
|
||||
a.SpentAsFee,
|
||||
a.Amount,
|
||||
}
|
||||
batch.Queue(createEventTransferTransfers, vals...)
|
||||
}
|
||||
br := q.db.SendBatch(ctx, batch)
|
||||
return &CreateEventTransferTransfersBatchResults{br, len(arg), false}
|
||||
}
|
||||
|
||||
func (b *CreateEventTransferTransfersBatchResults) Exec(f func(int, error)) {
|
||||
defer b.br.Close()
|
||||
for t := 0; t < b.tot; t++ {
|
||||
if b.closed {
|
||||
if f != nil {
|
||||
f(t, ErrBatchAlreadyClosed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, err := b.br.Exec()
|
||||
if f != nil {
|
||||
f(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *CreateEventTransferTransfersBatchResults) Close() error {
|
||||
b.closed = true
|
||||
return b.br.Close()
|
||||
}
|
||||
|
||||
const createInscriptionEntries = `-- name: CreateInscriptionEntries :batchexec
|
||||
INSERT INTO "brc20_inscription_entries" ("id", "number", "sequence_number", "delegate", "metadata", "metaprotocol", "parents", "pointer", "content", "content_encoding", "content_type", "cursed", "cursed_for_brc20", "created_at", "created_at_height") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
|
||||
`
|
||||
|
||||
type CreateInscriptionEntriesBatchResults struct {
|
||||
br pgx.BatchResults
|
||||
tot int
|
||||
closed bool
|
||||
}
|
||||
|
||||
type CreateInscriptionEntriesParams struct {
|
||||
Id string
|
||||
Number int64
|
||||
SequenceNumber int64
|
||||
Delegate pgtype.Text
|
||||
Metadata []byte
|
||||
Metaprotocol pgtype.Text
|
||||
Parents []string
|
||||
Pointer pgtype.Int8
|
||||
Content []byte
|
||||
ContentEncoding pgtype.Text
|
||||
ContentType pgtype.Text
|
||||
Cursed bool
|
||||
CursedForBrc20 bool
|
||||
CreatedAt pgtype.Timestamp
|
||||
CreatedAtHeight int32
|
||||
}
|
||||
|
||||
func (q *Queries) CreateInscriptionEntries(ctx context.Context, arg []CreateInscriptionEntriesParams) *CreateInscriptionEntriesBatchResults {
|
||||
batch := &pgx.Batch{}
|
||||
for _, a := range arg {
|
||||
vals := []interface{}{
|
||||
a.Id,
|
||||
a.Number,
|
||||
a.SequenceNumber,
|
||||
a.Delegate,
|
||||
a.Metadata,
|
||||
a.Metaprotocol,
|
||||
a.Parents,
|
||||
a.Pointer,
|
||||
a.Content,
|
||||
a.ContentEncoding,
|
||||
a.ContentType,
|
||||
a.Cursed,
|
||||
a.CursedForBrc20,
|
||||
a.CreatedAt,
|
||||
a.CreatedAtHeight,
|
||||
}
|
||||
batch.Queue(createInscriptionEntries, vals...)
|
||||
}
|
||||
br := q.db.SendBatch(ctx, batch)
|
||||
return &CreateInscriptionEntriesBatchResults{br, len(arg), false}
|
||||
}
|
||||
|
||||
func (b *CreateInscriptionEntriesBatchResults) Exec(f func(int, error)) {
|
||||
defer b.br.Close()
|
||||
for t := 0; t < b.tot; t++ {
|
||||
if b.closed {
|
||||
if f != nil {
|
||||
f(t, ErrBatchAlreadyClosed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, err := b.br.Exec()
|
||||
if f != nil {
|
||||
f(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *CreateInscriptionEntriesBatchResults) Close() error {
|
||||
b.closed = true
|
||||
return b.br.Close()
|
||||
}
|
||||
|
||||
const createInscriptionEntryStates = `-- name: CreateInscriptionEntryStates :batchexec
|
||||
INSERT INTO "brc20_inscription_entry_states" ("id", "block_height", "transfer_count") VALUES ($1, $2, $3)
|
||||
`
|
||||
|
||||
type CreateInscriptionEntryStatesBatchResults struct {
|
||||
br pgx.BatchResults
|
||||
tot int
|
||||
closed bool
|
||||
}
|
||||
|
||||
type CreateInscriptionEntryStatesParams struct {
|
||||
Id string
|
||||
BlockHeight int32
|
||||
TransferCount int32
|
||||
}
|
||||
|
||||
func (q *Queries) CreateInscriptionEntryStates(ctx context.Context, arg []CreateInscriptionEntryStatesParams) *CreateInscriptionEntryStatesBatchResults {
|
||||
batch := &pgx.Batch{}
|
||||
for _, a := range arg {
|
||||
vals := []interface{}{
|
||||
a.Id,
|
||||
a.BlockHeight,
|
||||
a.TransferCount,
|
||||
}
|
||||
batch.Queue(createInscriptionEntryStates, vals...)
|
||||
}
|
||||
br := q.db.SendBatch(ctx, batch)
|
||||
return &CreateInscriptionEntryStatesBatchResults{br, len(arg), false}
|
||||
}
|
||||
|
||||
func (b *CreateInscriptionEntryStatesBatchResults) Exec(f func(int, error)) {
|
||||
defer b.br.Close()
|
||||
for t := 0; t < b.tot; t++ {
|
||||
if b.closed {
|
||||
if f != nil {
|
||||
f(t, ErrBatchAlreadyClosed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, err := b.br.Exec()
|
||||
if f != nil {
|
||||
f(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *CreateInscriptionEntryStatesBatchResults) Close() error {
|
||||
b.closed = true
|
||||
return b.br.Close()
|
||||
}
|
||||
|
||||
const createInscriptionTransfers = `-- name: CreateInscriptionTransfers :batchexec
|
||||
INSERT INTO "brc20_inscription_transfers" ("inscription_id", "block_height", "tx_index", "tx_hash", "from_input_index", "old_satpoint_tx_hash", "old_satpoint_out_idx", "old_satpoint_offset", "new_satpoint_tx_hash", "new_satpoint_out_idx", "new_satpoint_offset", "new_pkscript", "new_output_value", "sent_as_fee", "transfer_count") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
|
||||
`
|
||||
|
||||
type CreateInscriptionTransfersBatchResults struct {
|
||||
br pgx.BatchResults
|
||||
tot int
|
||||
closed bool
|
||||
}
|
||||
|
||||
type CreateInscriptionTransfersParams struct {
|
||||
InscriptionID string
|
||||
BlockHeight int32
|
||||
TxIndex int32
|
||||
TxHash string
|
||||
FromInputIndex int32
|
||||
OldSatpointTxHash pgtype.Text
|
||||
OldSatpointOutIdx pgtype.Int4
|
||||
OldSatpointOffset pgtype.Int8
|
||||
NewSatpointTxHash pgtype.Text
|
||||
NewSatpointOutIdx pgtype.Int4
|
||||
NewSatpointOffset pgtype.Int8
|
||||
NewPkscript string
|
||||
NewOutputValue int64
|
||||
SentAsFee bool
|
||||
TransferCount int32
|
||||
}
|
||||
|
||||
func (q *Queries) CreateInscriptionTransfers(ctx context.Context, arg []CreateInscriptionTransfersParams) *CreateInscriptionTransfersBatchResults {
|
||||
batch := &pgx.Batch{}
|
||||
for _, a := range arg {
|
||||
vals := []interface{}{
|
||||
a.InscriptionID,
|
||||
a.BlockHeight,
|
||||
a.TxIndex,
|
||||
a.TxHash,
|
||||
a.FromInputIndex,
|
||||
a.OldSatpointTxHash,
|
||||
a.OldSatpointOutIdx,
|
||||
a.OldSatpointOffset,
|
||||
a.NewSatpointTxHash,
|
||||
a.NewSatpointOutIdx,
|
||||
a.NewSatpointOffset,
|
||||
a.NewPkscript,
|
||||
a.NewOutputValue,
|
||||
a.SentAsFee,
|
||||
a.TransferCount,
|
||||
}
|
||||
batch.Queue(createInscriptionTransfers, vals...)
|
||||
}
|
||||
br := q.db.SendBatch(ctx, batch)
|
||||
return &CreateInscriptionTransfersBatchResults{br, len(arg), false}
|
||||
}
|
||||
|
||||
func (b *CreateInscriptionTransfersBatchResults) Exec(f func(int, error)) {
|
||||
defer b.br.Close()
|
||||
for t := 0; t < b.tot; t++ {
|
||||
if b.closed {
|
||||
if f != nil {
|
||||
f(t, ErrBatchAlreadyClosed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, err := b.br.Exec()
|
||||
if f != nil {
|
||||
f(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *CreateInscriptionTransfersBatchResults) Close() error {
|
||||
b.closed = true
|
||||
return b.br.Close()
|
||||
}
|
||||
|
||||
const createTickEntries = `-- name: CreateTickEntries :batchexec
|
||||
INSERT INTO "brc20_tick_entries" ("tick", "original_tick", "total_supply", "decimals", "limit_per_mint", "is_self_mint", "deploy_inscription_id", "deployed_at", "deployed_at_height") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||
`
|
||||
|
||||
type CreateTickEntriesBatchResults struct {
|
||||
br pgx.BatchResults
|
||||
tot int
|
||||
closed bool
|
||||
}
|
||||
|
||||
type CreateTickEntriesParams struct {
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TotalSupply pgtype.Numeric
|
||||
Decimals int16
|
||||
LimitPerMint pgtype.Numeric
|
||||
IsSelfMint bool
|
||||
DeployInscriptionID string
|
||||
DeployedAt pgtype.Timestamp
|
||||
DeployedAtHeight int32
|
||||
}
|
||||
|
||||
func (q *Queries) CreateTickEntries(ctx context.Context, arg []CreateTickEntriesParams) *CreateTickEntriesBatchResults {
|
||||
batch := &pgx.Batch{}
|
||||
for _, a := range arg {
|
||||
vals := []interface{}{
|
||||
a.Tick,
|
||||
a.OriginalTick,
|
||||
a.TotalSupply,
|
||||
a.Decimals,
|
||||
a.LimitPerMint,
|
||||
a.IsSelfMint,
|
||||
a.DeployInscriptionID,
|
||||
a.DeployedAt,
|
||||
a.DeployedAtHeight,
|
||||
}
|
||||
batch.Queue(createTickEntries, vals...)
|
||||
}
|
||||
br := q.db.SendBatch(ctx, batch)
|
||||
return &CreateTickEntriesBatchResults{br, len(arg), false}
|
||||
}
|
||||
|
||||
func (b *CreateTickEntriesBatchResults) Exec(f func(int, error)) {
|
||||
defer b.br.Close()
|
||||
for t := 0; t < b.tot; t++ {
|
||||
if b.closed {
|
||||
if f != nil {
|
||||
f(t, ErrBatchAlreadyClosed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, err := b.br.Exec()
|
||||
if f != nil {
|
||||
f(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *CreateTickEntriesBatchResults) Close() error {
|
||||
b.closed = true
|
||||
return b.br.Close()
|
||||
}
|
||||
|
||||
const createTickEntryStates = `-- name: CreateTickEntryStates :batchexec
|
||||
INSERT INTO "brc20_tick_entry_states" ("tick", "block_height", "minted_amount", "burned_amount", "completed_at", "completed_at_height") VALUES ($1, $2, $3, $4, $5, $6)
|
||||
`
|
||||
|
||||
type CreateTickEntryStatesBatchResults struct {
|
||||
br pgx.BatchResults
|
||||
tot int
|
||||
closed bool
|
||||
}
|
||||
|
||||
type CreateTickEntryStatesParams struct {
|
||||
Tick string
|
||||
BlockHeight int32
|
||||
MintedAmount pgtype.Numeric
|
||||
BurnedAmount pgtype.Numeric
|
||||
CompletedAt pgtype.Timestamp
|
||||
CompletedAtHeight pgtype.Int4
|
||||
}
|
||||
|
||||
func (q *Queries) CreateTickEntryStates(ctx context.Context, arg []CreateTickEntryStatesParams) *CreateTickEntryStatesBatchResults {
|
||||
batch := &pgx.Batch{}
|
||||
for _, a := range arg {
|
||||
vals := []interface{}{
|
||||
a.Tick,
|
||||
a.BlockHeight,
|
||||
a.MintedAmount,
|
||||
a.BurnedAmount,
|
||||
a.CompletedAt,
|
||||
a.CompletedAtHeight,
|
||||
}
|
||||
batch.Queue(createTickEntryStates, vals...)
|
||||
}
|
||||
br := q.db.SendBatch(ctx, batch)
|
||||
return &CreateTickEntryStatesBatchResults{br, len(arg), false}
|
||||
}
|
||||
|
||||
func (b *CreateTickEntryStatesBatchResults) Exec(f func(int, error)) {
|
||||
defer b.br.Close()
|
||||
for t := 0; t < b.tot; t++ {
|
||||
if b.closed {
|
||||
if f != nil {
|
||||
f(t, ErrBatchAlreadyClosed)
|
||||
}
|
||||
continue
|
||||
}
|
||||
_, err := b.br.Exec()
|
||||
if f != nil {
|
||||
f(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *CreateTickEntryStatesBatchResults) Close() error {
|
||||
b.closed = true
|
||||
return b.br.Close()
|
||||
}
|
||||
593
modules/brc20/internal/repository/postgres/gen/data.sql.go
Normal file
593
modules/brc20/internal/repository/postgres/gen/data.sql.go
Normal file
@@ -0,0 +1,593 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
// source: data.sql
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
const createIndexedBlock = `-- name: CreateIndexedBlock :exec
|
||||
INSERT INTO "brc20_indexed_blocks" ("height", "hash", "event_hash", "cumulative_event_hash") VALUES ($1, $2, $3, $4)
|
||||
`
|
||||
|
||||
type CreateIndexedBlockParams struct {
|
||||
Height int32
|
||||
Hash string
|
||||
EventHash string
|
||||
CumulativeEventHash string
|
||||
}
|
||||
|
||||
func (q *Queries) CreateIndexedBlock(ctx context.Context, arg CreateIndexedBlockParams) error {
|
||||
_, err := q.db.Exec(ctx, createIndexedBlock,
|
||||
arg.Height,
|
||||
arg.Hash,
|
||||
arg.EventHash,
|
||||
arg.CumulativeEventHash,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const createProcessorStats = `-- name: CreateProcessorStats :exec
|
||||
INSERT INTO "brc20_processor_stats" ("block_height", "cursed_inscription_count", "blessed_inscription_count", "lost_sats") VALUES ($1, $2, $3, $4)
|
||||
`
|
||||
|
||||
type CreateProcessorStatsParams struct {
|
||||
BlockHeight int32
|
||||
CursedInscriptionCount int32
|
||||
BlessedInscriptionCount int32
|
||||
LostSats int64
|
||||
}
|
||||
|
||||
func (q *Queries) CreateProcessorStats(ctx context.Context, arg CreateProcessorStatsParams) error {
|
||||
_, err := q.db.Exec(ctx, createProcessorStats,
|
||||
arg.BlockHeight,
|
||||
arg.CursedInscriptionCount,
|
||||
arg.BlessedInscriptionCount,
|
||||
arg.LostSats,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteBalancesSinceHeight = `-- name: DeleteBalancesSinceHeight :exec
|
||||
DELETE FROM "brc20_balances" WHERE "block_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) DeleteBalancesSinceHeight(ctx context.Context, blockHeight int32) error {
|
||||
_, err := q.db.Exec(ctx, deleteBalancesSinceHeight, blockHeight)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteEventDeploysSinceHeight = `-- name: DeleteEventDeploysSinceHeight :exec
|
||||
DELETE FROM "brc20_event_deploys" WHERE "block_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) DeleteEventDeploysSinceHeight(ctx context.Context, blockHeight int32) error {
|
||||
_, err := q.db.Exec(ctx, deleteEventDeploysSinceHeight, blockHeight)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteEventInscribeTransfersSinceHeight = `-- name: DeleteEventInscribeTransfersSinceHeight :exec
|
||||
DELETE FROM "brc20_event_inscribe_transfers" WHERE "block_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) DeleteEventInscribeTransfersSinceHeight(ctx context.Context, blockHeight int32) error {
|
||||
_, err := q.db.Exec(ctx, deleteEventInscribeTransfersSinceHeight, blockHeight)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteEventMintsSinceHeight = `-- name: DeleteEventMintsSinceHeight :exec
|
||||
DELETE FROM "brc20_event_mints" WHERE "block_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) DeleteEventMintsSinceHeight(ctx context.Context, blockHeight int32) error {
|
||||
_, err := q.db.Exec(ctx, deleteEventMintsSinceHeight, blockHeight)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteEventTransferTransfersSinceHeight = `-- name: DeleteEventTransferTransfersSinceHeight :exec
|
||||
DELETE FROM "brc20_event_transfer_transfers" WHERE "block_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) DeleteEventTransferTransfersSinceHeight(ctx context.Context, blockHeight int32) error {
|
||||
_, err := q.db.Exec(ctx, deleteEventTransferTransfersSinceHeight, blockHeight)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteIndexedBlocksSinceHeight = `-- name: DeleteIndexedBlocksSinceHeight :exec
|
||||
DELETE FROM "brc20_indexed_blocks" WHERE "height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) DeleteIndexedBlocksSinceHeight(ctx context.Context, height int32) error {
|
||||
_, err := q.db.Exec(ctx, deleteIndexedBlocksSinceHeight, height)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteInscriptionEntriesSinceHeight = `-- name: DeleteInscriptionEntriesSinceHeight :exec
|
||||
DELETE FROM "brc20_inscription_entries" WHERE "created_at_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) DeleteInscriptionEntriesSinceHeight(ctx context.Context, createdAtHeight int32) error {
|
||||
_, err := q.db.Exec(ctx, deleteInscriptionEntriesSinceHeight, createdAtHeight)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteInscriptionEntryStatesSinceHeight = `-- name: DeleteInscriptionEntryStatesSinceHeight :exec
|
||||
DELETE FROM "brc20_inscription_entry_states" WHERE "block_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) DeleteInscriptionEntryStatesSinceHeight(ctx context.Context, blockHeight int32) error {
|
||||
_, err := q.db.Exec(ctx, deleteInscriptionEntryStatesSinceHeight, blockHeight)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteInscriptionTransfersSinceHeight = `-- name: DeleteInscriptionTransfersSinceHeight :exec
|
||||
DELETE FROM "brc20_inscription_transfers" WHERE "block_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) DeleteInscriptionTransfersSinceHeight(ctx context.Context, blockHeight int32) error {
|
||||
_, err := q.db.Exec(ctx, deleteInscriptionTransfersSinceHeight, blockHeight)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteProcessorStatsSinceHeight = `-- name: DeleteProcessorStatsSinceHeight :exec
|
||||
DELETE FROM "brc20_processor_stats" WHERE "block_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) DeleteProcessorStatsSinceHeight(ctx context.Context, blockHeight int32) error {
|
||||
_, err := q.db.Exec(ctx, deleteProcessorStatsSinceHeight, blockHeight)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteTickEntriesSinceHeight = `-- name: DeleteTickEntriesSinceHeight :exec
|
||||
DELETE FROM "brc20_tick_entries" WHERE "deployed_at_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) DeleteTickEntriesSinceHeight(ctx context.Context, deployedAtHeight int32) error {
|
||||
_, err := q.db.Exec(ctx, deleteTickEntriesSinceHeight, deployedAtHeight)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteTickEntryStatesSinceHeight = `-- name: DeleteTickEntryStatesSinceHeight :exec
|
||||
DELETE FROM "brc20_tick_entry_states" WHERE "block_height" >= $1
|
||||
`
|
||||
|
||||
func (q *Queries) DeleteTickEntryStatesSinceHeight(ctx context.Context, blockHeight int32) error {
|
||||
_, err := q.db.Exec(ctx, deleteTickEntryStatesSinceHeight, blockHeight)
|
||||
return err
|
||||
}
|
||||
|
||||
const getBalancesBatchAtHeight = `-- name: GetBalancesBatchAtHeight :many
|
||||
SELECT DISTINCT ON ("brc20_balances"."pkscript", "brc20_balances"."tick") brc20_balances.pkscript, brc20_balances.block_height, brc20_balances.tick, brc20_balances.overall_balance, brc20_balances.available_balance FROM "brc20_balances"
|
||||
INNER JOIN (
|
||||
SELECT
|
||||
unnest($1::text[]) AS "pkscript",
|
||||
unnest($2::text[]) AS "tick"
|
||||
) "queries" ON "brc20_balances"."pkscript" = "queries"."pkscript" AND "brc20_balances"."tick" = "queries"."tick" AND "brc20_balances"."block_height" <= $3
|
||||
ORDER BY "brc20_balances"."pkscript", "brc20_balances"."tick", "block_height" DESC
|
||||
`
|
||||
|
||||
type GetBalancesBatchAtHeightParams struct {
|
||||
PkscriptArr []string
|
||||
TickArr []string
|
||||
BlockHeight int32
|
||||
}
|
||||
|
||||
func (q *Queries) GetBalancesBatchAtHeight(ctx context.Context, arg GetBalancesBatchAtHeightParams) ([]Brc20Balance, error) {
|
||||
rows, err := q.db.Query(ctx, getBalancesBatchAtHeight, arg.PkscriptArr, arg.TickArr, arg.BlockHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Brc20Balance
|
||||
for rows.Next() {
|
||||
var i Brc20Balance
|
||||
if err := rows.Scan(
|
||||
&i.Pkscript,
|
||||
&i.BlockHeight,
|
||||
&i.Tick,
|
||||
&i.OverallBalance,
|
||||
&i.AvailableBalance,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getEventInscribeTransfersByInscriptionIds = `-- name: GetEventInscribeTransfersByInscriptionIds :many
|
||||
SELECT id, inscription_id, inscription_number, tick, original_tick, tx_hash, block_height, tx_index, timestamp, pkscript, satpoint, output_index, sats_amount, amount FROM "brc20_event_inscribe_transfers" WHERE "inscription_id" = ANY($1::text[])
|
||||
`
|
||||
|
||||
func (q *Queries) GetEventInscribeTransfersByInscriptionIds(ctx context.Context, inscriptionIds []string) ([]Brc20EventInscribeTransfer, error) {
|
||||
rows, err := q.db.Query(ctx, getEventInscribeTransfersByInscriptionIds, inscriptionIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Brc20EventInscribeTransfer
|
||||
for rows.Next() {
|
||||
var i Brc20EventInscribeTransfer
|
||||
if err := rows.Scan(
|
||||
&i.Id,
|
||||
&i.InscriptionID,
|
||||
&i.InscriptionNumber,
|
||||
&i.Tick,
|
||||
&i.OriginalTick,
|
||||
&i.TxHash,
|
||||
&i.BlockHeight,
|
||||
&i.TxIndex,
|
||||
&i.Timestamp,
|
||||
&i.Pkscript,
|
||||
&i.Satpoint,
|
||||
&i.OutputIndex,
|
||||
&i.SatsAmount,
|
||||
&i.Amount,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getIndexedBlockByHeight = `-- name: GetIndexedBlockByHeight :one
|
||||
SELECT height, hash, event_hash, cumulative_event_hash FROM "brc20_indexed_blocks" WHERE "height" = $1
|
||||
`
|
||||
|
||||
func (q *Queries) GetIndexedBlockByHeight(ctx context.Context, height int32) (Brc20IndexedBlock, error) {
|
||||
row := q.db.QueryRow(ctx, getIndexedBlockByHeight, height)
|
||||
var i Brc20IndexedBlock
|
||||
err := row.Scan(
|
||||
&i.Height,
|
||||
&i.Hash,
|
||||
&i.EventHash,
|
||||
&i.CumulativeEventHash,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getInscriptionEntriesByIds = `-- name: GetInscriptionEntriesByIds :many
|
||||
WITH "states" AS (
|
||||
-- select latest state
|
||||
SELECT DISTINCT ON ("id") id, block_height, transfer_count FROM "brc20_inscription_entry_states" WHERE "id" = ANY($1::text[]) ORDER BY "id", "block_height" DESC
|
||||
)
|
||||
SELECT brc20_inscription_entries.id, number, sequence_number, delegate, metadata, metaprotocol, parents, pointer, content, content_encoding, content_type, cursed, cursed_for_brc20, created_at, created_at_height, states.id, block_height, transfer_count FROM "brc20_inscription_entries"
|
||||
LEFT JOIN "states" ON "brc20_inscription_entries"."id" = "states"."id"
|
||||
WHERE "brc20_inscription_entries"."id" = ANY($1::text[])
|
||||
`
|
||||
|
||||
type GetInscriptionEntriesByIdsRow struct {
|
||||
Id string
|
||||
Number int64
|
||||
SequenceNumber int64
|
||||
Delegate pgtype.Text
|
||||
Metadata []byte
|
||||
Metaprotocol pgtype.Text
|
||||
Parents []string
|
||||
Pointer pgtype.Int8
|
||||
Content []byte
|
||||
ContentEncoding pgtype.Text
|
||||
ContentType pgtype.Text
|
||||
Cursed bool
|
||||
CursedForBrc20 bool
|
||||
CreatedAt pgtype.Timestamp
|
||||
CreatedAtHeight int32
|
||||
Id_2 pgtype.Text
|
||||
BlockHeight pgtype.Int4
|
||||
TransferCount pgtype.Int4
|
||||
}
|
||||
|
||||
func (q *Queries) GetInscriptionEntriesByIds(ctx context.Context, inscriptionIds []string) ([]GetInscriptionEntriesByIdsRow, error) {
|
||||
rows, err := q.db.Query(ctx, getInscriptionEntriesByIds, inscriptionIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetInscriptionEntriesByIdsRow
|
||||
for rows.Next() {
|
||||
var i GetInscriptionEntriesByIdsRow
|
||||
if err := rows.Scan(
|
||||
&i.Id,
|
||||
&i.Number,
|
||||
&i.SequenceNumber,
|
||||
&i.Delegate,
|
||||
&i.Metadata,
|
||||
&i.Metaprotocol,
|
||||
&i.Parents,
|
||||
&i.Pointer,
|
||||
&i.Content,
|
||||
&i.ContentEncoding,
|
||||
&i.ContentType,
|
||||
&i.Cursed,
|
||||
&i.CursedForBrc20,
|
||||
&i.CreatedAt,
|
||||
&i.CreatedAtHeight,
|
||||
&i.Id_2,
|
||||
&i.BlockHeight,
|
||||
&i.TransferCount,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getInscriptionNumbersByIds = `-- name: GetInscriptionNumbersByIds :many
|
||||
SELECT id, number FROM "brc20_inscription_entries" WHERE "id" = ANY($1::text[])
|
||||
`
|
||||
|
||||
type GetInscriptionNumbersByIdsRow struct {
|
||||
Id string
|
||||
Number int64
|
||||
}
|
||||
|
||||
func (q *Queries) GetInscriptionNumbersByIds(ctx context.Context, inscriptionIds []string) ([]GetInscriptionNumbersByIdsRow, error) {
|
||||
rows, err := q.db.Query(ctx, getInscriptionNumbersByIds, inscriptionIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetInscriptionNumbersByIdsRow
|
||||
for rows.Next() {
|
||||
var i GetInscriptionNumbersByIdsRow
|
||||
if err := rows.Scan(&i.Id, &i.Number); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getInscriptionParentsByIds = `-- name: GetInscriptionParentsByIds :many
|
||||
SELECT id, parents FROM "brc20_inscription_entries" WHERE "id" = ANY($1::text[])
|
||||
`
|
||||
|
||||
type GetInscriptionParentsByIdsRow struct {
|
||||
Id string
|
||||
Parents []string
|
||||
}
|
||||
|
||||
func (q *Queries) GetInscriptionParentsByIds(ctx context.Context, inscriptionIds []string) ([]GetInscriptionParentsByIdsRow, error) {
|
||||
rows, err := q.db.Query(ctx, getInscriptionParentsByIds, inscriptionIds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetInscriptionParentsByIdsRow
|
||||
for rows.Next() {
|
||||
var i GetInscriptionParentsByIdsRow
|
||||
if err := rows.Scan(&i.Id, &i.Parents); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getInscriptionTransfersInOutPoints = `-- name: GetInscriptionTransfersInOutPoints :many
|
||||
SELECT it.inscription_id, it.block_height, it.tx_index, it.tx_hash, it.from_input_index, it.old_satpoint_tx_hash, it.old_satpoint_out_idx, it.old_satpoint_offset, it.new_satpoint_tx_hash, it.new_satpoint_out_idx, it.new_satpoint_offset, it.new_pkscript, it.new_output_value, it.sent_as_fee, it.transfer_count, "ie"."content" FROM (
|
||||
SELECT
|
||||
unnest($1::text[]) AS "tx_hash",
|
||||
unnest($2::int[]) AS "tx_out_idx"
|
||||
) "inputs"
|
||||
INNER JOIN "brc20_inscription_transfers" it ON "inputs"."tx_hash" = "it"."new_satpoint_tx_hash" AND "inputs"."tx_out_idx" = "it"."new_satpoint_out_idx"
|
||||
LEFT JOIN "brc20_inscription_entries" ie ON "it"."inscription_id" = "ie"."id"
|
||||
`
|
||||
|
||||
type GetInscriptionTransfersInOutPointsParams struct {
|
||||
TxHashArr []string
|
||||
TxOutIdxArr []int32
|
||||
}
|
||||
|
||||
type GetInscriptionTransfersInOutPointsRow struct {
|
||||
InscriptionID string
|
||||
BlockHeight int32
|
||||
TxIndex int32
|
||||
TxHash string
|
||||
FromInputIndex int32
|
||||
OldSatpointTxHash pgtype.Text
|
||||
OldSatpointOutIdx pgtype.Int4
|
||||
OldSatpointOffset pgtype.Int8
|
||||
NewSatpointTxHash pgtype.Text
|
||||
NewSatpointOutIdx pgtype.Int4
|
||||
NewSatpointOffset pgtype.Int8
|
||||
NewPkscript string
|
||||
NewOutputValue int64
|
||||
SentAsFee bool
|
||||
TransferCount int32
|
||||
Content []byte
|
||||
}
|
||||
|
||||
func (q *Queries) GetInscriptionTransfersInOutPoints(ctx context.Context, arg GetInscriptionTransfersInOutPointsParams) ([]GetInscriptionTransfersInOutPointsRow, error) {
|
||||
rows, err := q.db.Query(ctx, getInscriptionTransfersInOutPoints, arg.TxHashArr, arg.TxOutIdxArr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetInscriptionTransfersInOutPointsRow
|
||||
for rows.Next() {
|
||||
var i GetInscriptionTransfersInOutPointsRow
|
||||
if err := rows.Scan(
|
||||
&i.InscriptionID,
|
||||
&i.BlockHeight,
|
||||
&i.TxIndex,
|
||||
&i.TxHash,
|
||||
&i.FromInputIndex,
|
||||
&i.OldSatpointTxHash,
|
||||
&i.OldSatpointOutIdx,
|
||||
&i.OldSatpointOffset,
|
||||
&i.NewSatpointTxHash,
|
||||
&i.NewSatpointOutIdx,
|
||||
&i.NewSatpointOffset,
|
||||
&i.NewPkscript,
|
||||
&i.NewOutputValue,
|
||||
&i.SentAsFee,
|
||||
&i.TransferCount,
|
||||
&i.Content,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getLatestEventIds = `-- name: GetLatestEventIds :one
|
||||
WITH "latest_deploy_id" AS (
|
||||
SELECT "id" FROM "brc20_event_deploys" ORDER BY "id" DESC LIMIT 1
|
||||
),
|
||||
"latest_mint_id" AS (
|
||||
SELECT "id" FROM "brc20_event_mints" ORDER BY "id" DESC LIMIT 1
|
||||
),
|
||||
"latest_inscribe_transfer_id" AS (
|
||||
SELECT "id" FROM "brc20_event_inscribe_transfers" ORDER BY "id" DESC LIMIT 1
|
||||
),
|
||||
"latest_transfer_transfer_id" AS (
|
||||
SELECT "id" FROM "brc20_event_transfer_transfers" ORDER BY "id" DESC LIMIT 1
|
||||
)
|
||||
SELECT
|
||||
COALESCE((SELECT "id" FROM "latest_deploy_id"), -1) AS "event_deploy_id",
|
||||
COALESCE((SELECT "id" FROM "latest_mint_id"), -1) AS "event_mint_id",
|
||||
COALESCE((SELECT "id" FROM "latest_inscribe_transfer_id"), -1) AS "event_inscribe_transfer_id",
|
||||
COALESCE((SELECT "id" FROM "latest_transfer_transfer_id"), -1) AS "event_transfer_transfer_id"
|
||||
`
|
||||
|
||||
type GetLatestEventIdsRow struct {
|
||||
EventDeployID interface{}
|
||||
EventMintID interface{}
|
||||
EventInscribeTransferID interface{}
|
||||
EventTransferTransferID interface{}
|
||||
}
|
||||
|
||||
func (q *Queries) GetLatestEventIds(ctx context.Context) (GetLatestEventIdsRow, error) {
|
||||
row := q.db.QueryRow(ctx, getLatestEventIds)
|
||||
var i GetLatestEventIdsRow
|
||||
err := row.Scan(
|
||||
&i.EventDeployID,
|
||||
&i.EventMintID,
|
||||
&i.EventInscribeTransferID,
|
||||
&i.EventTransferTransferID,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getLatestIndexedBlock = `-- name: GetLatestIndexedBlock :one
|
||||
SELECT height, hash, event_hash, cumulative_event_hash FROM "brc20_indexed_blocks" ORDER BY "height" DESC LIMIT 1
|
||||
`
|
||||
|
||||
func (q *Queries) GetLatestIndexedBlock(ctx context.Context) (Brc20IndexedBlock, error) {
|
||||
row := q.db.QueryRow(ctx, getLatestIndexedBlock)
|
||||
var i Brc20IndexedBlock
|
||||
err := row.Scan(
|
||||
&i.Height,
|
||||
&i.Hash,
|
||||
&i.EventHash,
|
||||
&i.CumulativeEventHash,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getLatestProcessorStats = `-- name: GetLatestProcessorStats :one
|
||||
SELECT block_height, cursed_inscription_count, blessed_inscription_count, lost_sats FROM "brc20_processor_stats" ORDER BY "block_height" DESC LIMIT 1
|
||||
`
|
||||
|
||||
func (q *Queries) GetLatestProcessorStats(ctx context.Context) (Brc20ProcessorStat, error) {
|
||||
row := q.db.QueryRow(ctx, getLatestProcessorStats)
|
||||
var i Brc20ProcessorStat
|
||||
err := row.Scan(
|
||||
&i.BlockHeight,
|
||||
&i.CursedInscriptionCount,
|
||||
&i.BlessedInscriptionCount,
|
||||
&i.LostSats,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getTickEntriesByTicks = `-- name: GetTickEntriesByTicks :many
|
||||
WITH "states" AS (
|
||||
-- select latest state
|
||||
SELECT DISTINCT ON ("tick") tick, block_height, minted_amount, burned_amount, completed_at, completed_at_height FROM "brc20_tick_entry_states" WHERE "tick" = ANY($1::text[]) ORDER BY "tick", "block_height" DESC
|
||||
)
|
||||
SELECT brc20_tick_entries.tick, original_tick, total_supply, decimals, limit_per_mint, is_self_mint, deploy_inscription_id, deployed_at, deployed_at_height, states.tick, block_height, minted_amount, burned_amount, completed_at, completed_at_height FROM "brc20_tick_entries"
|
||||
LEFT JOIN "states" ON "brc20_tick_entries"."tick" = "states"."tick"
|
||||
WHERE "brc20_tick_entries"."tick" = ANY($1::text[])
|
||||
`
|
||||
|
||||
type GetTickEntriesByTicksRow struct {
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TotalSupply pgtype.Numeric
|
||||
Decimals int16
|
||||
LimitPerMint pgtype.Numeric
|
||||
IsSelfMint bool
|
||||
DeployInscriptionID string
|
||||
DeployedAt pgtype.Timestamp
|
||||
DeployedAtHeight int32
|
||||
Tick_2 pgtype.Text
|
||||
BlockHeight pgtype.Int4
|
||||
MintedAmount pgtype.Numeric
|
||||
BurnedAmount pgtype.Numeric
|
||||
CompletedAt pgtype.Timestamp
|
||||
CompletedAtHeight pgtype.Int4
|
||||
}
|
||||
|
||||
func (q *Queries) GetTickEntriesByTicks(ctx context.Context, ticks []string) ([]GetTickEntriesByTicksRow, error) {
|
||||
rows, err := q.db.Query(ctx, getTickEntriesByTicks, ticks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []GetTickEntriesByTicksRow
|
||||
for rows.Next() {
|
||||
var i GetTickEntriesByTicksRow
|
||||
if err := rows.Scan(
|
||||
&i.Tick,
|
||||
&i.OriginalTick,
|
||||
&i.TotalSupply,
|
||||
&i.Decimals,
|
||||
&i.LimitPerMint,
|
||||
&i.IsSelfMint,
|
||||
&i.DeployInscriptionID,
|
||||
&i.DeployedAt,
|
||||
&i.DeployedAtHeight,
|
||||
&i.Tick_2,
|
||||
&i.BlockHeight,
|
||||
&i.MintedAmount,
|
||||
&i.BurnedAmount,
|
||||
&i.CompletedAt,
|
||||
&i.CompletedAtHeight,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
33
modules/brc20/internal/repository/postgres/gen/db.go
Normal file
33
modules/brc20/internal/repository/postgres/gen/db.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
type DBTX interface {
|
||||
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
|
||||
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
|
||||
QueryRow(context.Context, string, ...interface{}) pgx.Row
|
||||
SendBatch(context.Context, *pgx.Batch) pgx.BatchResults
|
||||
}
|
||||
|
||||
func New(db DBTX) *Queries {
|
||||
return &Queries{db: db}
|
||||
}
|
||||
|
||||
type Queries struct {
|
||||
db DBTX
|
||||
}
|
||||
|
||||
func (q *Queries) WithTx(tx pgx.Tx) *Queries {
|
||||
return &Queries{
|
||||
db: tx,
|
||||
}
|
||||
}
|
||||
49
modules/brc20/internal/repository/postgres/gen/info.sql.go
Normal file
49
modules/brc20/internal/repository/postgres/gen/info.sql.go
Normal file
@@ -0,0 +1,49 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
// source: info.sql
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
const createIndexerState = `-- name: CreateIndexerState :exec
|
||||
INSERT INTO brc20_indexer_states (client_version, network, db_version, event_hash_version) VALUES ($1, $2, $3, $4)
|
||||
`
|
||||
|
||||
type CreateIndexerStateParams struct {
|
||||
ClientVersion string
|
||||
Network string
|
||||
DbVersion int32
|
||||
EventHashVersion int32
|
||||
}
|
||||
|
||||
func (q *Queries) CreateIndexerState(ctx context.Context, arg CreateIndexerStateParams) error {
|
||||
_, err := q.db.Exec(ctx, createIndexerState,
|
||||
arg.ClientVersion,
|
||||
arg.Network,
|
||||
arg.DbVersion,
|
||||
arg.EventHashVersion,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const getLatestIndexerState = `-- name: GetLatestIndexerState :one
|
||||
SELECT id, client_version, network, db_version, event_hash_version, created_at FROM brc20_indexer_states ORDER BY created_at DESC LIMIT 1
|
||||
`
|
||||
|
||||
func (q *Queries) GetLatestIndexerState(ctx context.Context) (Brc20IndexerState, error) {
|
||||
row := q.db.QueryRow(ctx, getLatestIndexerState)
|
||||
var i Brc20IndexerState
|
||||
err := row.Scan(
|
||||
&i.Id,
|
||||
&i.ClientVersion,
|
||||
&i.Network,
|
||||
&i.DbVersion,
|
||||
&i.EventHashVersion,
|
||||
&i.CreatedAt,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
174
modules/brc20/internal/repository/postgres/gen/models.go
Normal file
174
modules/brc20/internal/repository/postgres/gen/models.go
Normal file
@@ -0,0 +1,174 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.26.0
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
|
||||
type Brc20Balance struct {
|
||||
Pkscript string
|
||||
BlockHeight int32
|
||||
Tick string
|
||||
OverallBalance pgtype.Numeric
|
||||
AvailableBalance pgtype.Numeric
|
||||
}
|
||||
|
||||
type Brc20EventDeploy struct {
|
||||
Id int64
|
||||
InscriptionID string
|
||||
InscriptionNumber int64
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TxHash string
|
||||
BlockHeight int32
|
||||
TxIndex int32
|
||||
Timestamp pgtype.Timestamp
|
||||
Pkscript string
|
||||
Satpoint string
|
||||
TotalSupply pgtype.Numeric
|
||||
Decimals int16
|
||||
LimitPerMint pgtype.Numeric
|
||||
IsSelfMint bool
|
||||
}
|
||||
|
||||
type Brc20EventInscribeTransfer struct {
|
||||
Id int64
|
||||
InscriptionID string
|
||||
InscriptionNumber int64
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TxHash string
|
||||
BlockHeight int32
|
||||
TxIndex int32
|
||||
Timestamp pgtype.Timestamp
|
||||
Pkscript string
|
||||
Satpoint string
|
||||
OutputIndex int32
|
||||
SatsAmount int64
|
||||
Amount pgtype.Numeric
|
||||
}
|
||||
|
||||
type Brc20EventMint struct {
|
||||
Id int64
|
||||
InscriptionID string
|
||||
InscriptionNumber int64
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TxHash string
|
||||
BlockHeight int32
|
||||
TxIndex int32
|
||||
Timestamp pgtype.Timestamp
|
||||
Pkscript string
|
||||
Satpoint string
|
||||
Amount pgtype.Numeric
|
||||
ParentID pgtype.Text
|
||||
}
|
||||
|
||||
type Brc20EventTransferTransfer struct {
|
||||
Id int64
|
||||
InscriptionID string
|
||||
InscriptionNumber int64
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TxHash string
|
||||
BlockHeight int32
|
||||
TxIndex int32
|
||||
Timestamp pgtype.Timestamp
|
||||
FromPkscript string
|
||||
FromSatpoint string
|
||||
FromInputIndex int32
|
||||
ToPkscript string
|
||||
ToSatpoint string
|
||||
ToOutputIndex int32
|
||||
SpentAsFee bool
|
||||
Amount pgtype.Numeric
|
||||
}
|
||||
|
||||
type Brc20IndexedBlock struct {
|
||||
Height int32
|
||||
Hash string
|
||||
EventHash string
|
||||
CumulativeEventHash string
|
||||
}
|
||||
|
||||
type Brc20IndexerState struct {
|
||||
Id int64
|
||||
ClientVersion string
|
||||
Network string
|
||||
DbVersion int32
|
||||
EventHashVersion int32
|
||||
CreatedAt pgtype.Timestamptz
|
||||
}
|
||||
|
||||
type Brc20InscriptionEntry struct {
|
||||
Id string
|
||||
Number int64
|
||||
SequenceNumber int64
|
||||
Delegate pgtype.Text
|
||||
Metadata []byte
|
||||
Metaprotocol pgtype.Text
|
||||
Parents []string
|
||||
Pointer pgtype.Int8
|
||||
Content []byte
|
||||
ContentEncoding pgtype.Text
|
||||
ContentType pgtype.Text
|
||||
Cursed bool
|
||||
CursedForBrc20 bool
|
||||
CreatedAt pgtype.Timestamp
|
||||
CreatedAtHeight int32
|
||||
}
|
||||
|
||||
type Brc20InscriptionEntryState struct {
|
||||
Id string
|
||||
BlockHeight int32
|
||||
TransferCount int32
|
||||
}
|
||||
|
||||
type Brc20InscriptionTransfer struct {
|
||||
InscriptionID string
|
||||
BlockHeight int32
|
||||
TxIndex int32
|
||||
TxHash string
|
||||
FromInputIndex int32
|
||||
OldSatpointTxHash pgtype.Text
|
||||
OldSatpointOutIdx pgtype.Int4
|
||||
OldSatpointOffset pgtype.Int8
|
||||
NewSatpointTxHash pgtype.Text
|
||||
NewSatpointOutIdx pgtype.Int4
|
||||
NewSatpointOffset pgtype.Int8
|
||||
NewPkscript string
|
||||
NewOutputValue int64
|
||||
SentAsFee bool
|
||||
TransferCount int32
|
||||
}
|
||||
|
||||
type Brc20ProcessorStat struct {
|
||||
BlockHeight int32
|
||||
CursedInscriptionCount int32
|
||||
BlessedInscriptionCount int32
|
||||
LostSats int64
|
||||
}
|
||||
|
||||
type Brc20TickEntry struct {
|
||||
Tick string
|
||||
OriginalTick string
|
||||
TotalSupply pgtype.Numeric
|
||||
Decimals int16
|
||||
LimitPerMint pgtype.Numeric
|
||||
IsSelfMint bool
|
||||
DeployInscriptionID string
|
||||
DeployedAt pgtype.Timestamp
|
||||
DeployedAtHeight int32
|
||||
}
|
||||
|
||||
type Brc20TickEntryState struct {
|
||||
Tick string
|
||||
BlockHeight int32
|
||||
MintedAmount pgtype.Numeric
|
||||
BurnedAmount pgtype.Numeric
|
||||
CompletedAt pgtype.Timestamp
|
||||
CompletedAtHeight pgtype.Int4
|
||||
}
|
||||
33
modules/brc20/internal/repository/postgres/indexer_info.go
Normal file
33
modules/brc20/internal/repository/postgres/indexer_info.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
var _ datagateway.IndexerInfoDataGateway = (*Repository)(nil)
|
||||
|
||||
func (r *Repository) GetLatestIndexerState(ctx context.Context) (entity.IndexerState, error) {
|
||||
model, err := r.queries.GetLatestIndexerState(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, pgx.ErrNoRows) {
|
||||
return entity.IndexerState{}, errors.WithStack(errs.NotFound)
|
||||
}
|
||||
return entity.IndexerState{}, errors.Wrap(err, "error during query")
|
||||
}
|
||||
state := mapIndexerStatesModelToType(model)
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func (r *Repository) CreateIndexerState(ctx context.Context, state entity.IndexerState) error {
|
||||
params := mapIndexerStatesTypeToParams(state)
|
||||
if err := r.queries.CreateIndexerState(ctx, params); err != nil {
|
||||
return errors.Wrap(err, "error during exec")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
614
modules/brc20/internal/repository/postgres/mapper.go
Normal file
614
modules/brc20/internal/repository/postgres/mapper.go
Normal file
@@ -0,0 +1,614 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/repository/postgres/gen"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
"github.com/samber/lo"
|
||||
"github.com/shopspring/decimal"
|
||||
)
|
||||
|
||||
func decimalFromNumeric(src pgtype.Numeric) decimal.NullDecimal {
|
||||
if !src.Valid || src.NaN || src.InfinityModifier != pgtype.Finite {
|
||||
return decimal.NullDecimal{}
|
||||
}
|
||||
result := decimal.NewFromBigInt(src.Int, src.Exp)
|
||||
return decimal.NewNullDecimal(result)
|
||||
}
|
||||
|
||||
func numericFromDecimal(src decimal.Decimal) pgtype.Numeric {
|
||||
result := pgtype.Numeric{
|
||||
Int: src.Coefficient(),
|
||||
Exp: src.Exponent(),
|
||||
NaN: false,
|
||||
InfinityModifier: pgtype.Finite,
|
||||
Valid: true,
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func numericFromNullDecimal(src decimal.NullDecimal) pgtype.Numeric {
|
||||
if !src.Valid {
|
||||
return pgtype.Numeric{}
|
||||
}
|
||||
return numericFromDecimal(src.Decimal)
|
||||
}
|
||||
|
||||
func mapIndexerStatesModelToType(src gen.Brc20IndexerState) entity.IndexerState {
|
||||
var createdAt time.Time
|
||||
if src.CreatedAt.Valid {
|
||||
createdAt = src.CreatedAt.Time
|
||||
}
|
||||
return entity.IndexerState{
|
||||
ClientVersion: src.ClientVersion,
|
||||
Network: common.Network(src.Network),
|
||||
DBVersion: int32(src.DbVersion),
|
||||
EventHashVersion: int32(src.EventHashVersion),
|
||||
CreatedAt: createdAt,
|
||||
}
|
||||
}
|
||||
|
||||
func mapIndexerStatesTypeToParams(src entity.IndexerState) gen.CreateIndexerStateParams {
|
||||
return gen.CreateIndexerStateParams{
|
||||
ClientVersion: src.ClientVersion,
|
||||
Network: string(src.Network),
|
||||
DbVersion: int32(src.DBVersion),
|
||||
EventHashVersion: int32(src.EventHashVersion),
|
||||
}
|
||||
}
|
||||
|
||||
func mapIndexedBlockModelToType(src gen.Brc20IndexedBlock) (entity.IndexedBlock, error) {
|
||||
hash, err := chainhash.NewHashFromStr(src.Hash)
|
||||
if err != nil {
|
||||
return entity.IndexedBlock{}, errors.Wrap(err, "invalid block hash")
|
||||
}
|
||||
eventHash, err := hex.DecodeString(src.EventHash)
|
||||
if err != nil {
|
||||
return entity.IndexedBlock{}, errors.Wrap(err, "invalid event hash")
|
||||
}
|
||||
cumulativeEventHash, err := hex.DecodeString(src.CumulativeEventHash)
|
||||
if err != nil {
|
||||
return entity.IndexedBlock{}, errors.Wrap(err, "invalid cumulative event hash")
|
||||
}
|
||||
return entity.IndexedBlock{
|
||||
Height: uint64(src.Height),
|
||||
Hash: *hash,
|
||||
EventHash: eventHash,
|
||||
CumulativeEventHash: cumulativeEventHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapIndexedBlockTypeToParams(src entity.IndexedBlock) gen.CreateIndexedBlockParams {
|
||||
return gen.CreateIndexedBlockParams{
|
||||
Height: int32(src.Height),
|
||||
Hash: src.Hash.String(),
|
||||
EventHash: hex.EncodeToString(src.EventHash),
|
||||
CumulativeEventHash: hex.EncodeToString(src.CumulativeEventHash),
|
||||
}
|
||||
}
|
||||
|
||||
func mapProcessorStatsModelToType(src gen.Brc20ProcessorStat) entity.ProcessorStats {
|
||||
return entity.ProcessorStats{
|
||||
BlockHeight: uint64(src.BlockHeight),
|
||||
CursedInscriptionCount: uint64(src.CursedInscriptionCount),
|
||||
BlessedInscriptionCount: uint64(src.BlessedInscriptionCount),
|
||||
LostSats: uint64(src.LostSats),
|
||||
}
|
||||
}
|
||||
|
||||
func mapProcessorStatsTypeToParams(src entity.ProcessorStats) gen.CreateProcessorStatsParams {
|
||||
return gen.CreateProcessorStatsParams{
|
||||
BlockHeight: int32(src.BlockHeight),
|
||||
CursedInscriptionCount: int32(src.CursedInscriptionCount),
|
||||
BlessedInscriptionCount: int32(src.BlessedInscriptionCount),
|
||||
LostSats: int64(src.LostSats),
|
||||
}
|
||||
}
|
||||
|
||||
func mapTickEntryModelToType(src gen.GetTickEntriesByTicksRow) (entity.TickEntry, error) {
|
||||
deployInscriptionId, err := ordinals.NewInscriptionIdFromString(src.DeployInscriptionID)
|
||||
if err != nil {
|
||||
return entity.TickEntry{}, errors.Wrap(err, "invalid deployInscriptionId")
|
||||
}
|
||||
var completedAt time.Time
|
||||
if src.CompletedAt.Valid {
|
||||
completedAt = src.CompletedAt.Time
|
||||
}
|
||||
return entity.TickEntry{
|
||||
Tick: src.Tick,
|
||||
OriginalTick: src.OriginalTick,
|
||||
TotalSupply: decimalFromNumeric(src.TotalSupply).Decimal,
|
||||
Decimals: uint16(src.Decimals),
|
||||
LimitPerMint: decimalFromNumeric(src.LimitPerMint).Decimal,
|
||||
IsSelfMint: src.IsSelfMint,
|
||||
DeployInscriptionId: deployInscriptionId,
|
||||
DeployedAt: src.DeployedAt.Time,
|
||||
DeployedAtHeight: uint64(src.DeployedAtHeight),
|
||||
MintedAmount: decimalFromNumeric(src.MintedAmount).Decimal,
|
||||
BurnedAmount: decimalFromNumeric(src.BurnedAmount).Decimal,
|
||||
CompletedAt: completedAt,
|
||||
CompletedAtHeight: lo.Ternary(src.CompletedAtHeight.Valid, uint64(src.CompletedAtHeight.Int32), 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapTickEntryTypeToParams(src entity.TickEntry, blockHeight uint64) (gen.CreateTickEntriesParams, gen.CreateTickEntryStatesParams, error) {
|
||||
return gen.CreateTickEntriesParams{
|
||||
Tick: src.Tick,
|
||||
OriginalTick: src.OriginalTick,
|
||||
TotalSupply: numericFromDecimal(src.TotalSupply),
|
||||
Decimals: int16(src.Decimals),
|
||||
LimitPerMint: numericFromDecimal(src.LimitPerMint),
|
||||
IsSelfMint: src.IsSelfMint,
|
||||
DeployInscriptionID: src.DeployInscriptionId.String(),
|
||||
DeployedAt: pgtype.Timestamp{Time: src.DeployedAt, Valid: true},
|
||||
DeployedAtHeight: int32(src.DeployedAtHeight),
|
||||
}, gen.CreateTickEntryStatesParams{
|
||||
Tick: src.Tick,
|
||||
BlockHeight: int32(blockHeight),
|
||||
CompletedAt: pgtype.Timestamp{Time: src.CompletedAt, Valid: !src.CompletedAt.IsZero()},
|
||||
CompletedAtHeight: pgtype.Int4{Int32: int32(src.CompletedAtHeight), Valid: src.CompletedAtHeight != 0},
|
||||
MintedAmount: numericFromDecimal(src.MintedAmount),
|
||||
BurnedAmount: numericFromDecimal(src.BurnedAmount),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapInscriptionEntryModelToType(src gen.GetInscriptionEntriesByIdsRow) (ordinals.InscriptionEntry, error) {
|
||||
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.Id)
|
||||
if err != nil {
|
||||
return ordinals.InscriptionEntry{}, errors.Wrap(err, "invalid inscription id")
|
||||
}
|
||||
|
||||
var delegate, parent *ordinals.InscriptionId
|
||||
if src.Delegate.Valid {
|
||||
delegateValue, err := ordinals.NewInscriptionIdFromString(src.Delegate.String)
|
||||
if err != nil {
|
||||
return ordinals.InscriptionEntry{}, errors.Wrap(err, "invalid delegate id")
|
||||
}
|
||||
delegate = &delegateValue
|
||||
}
|
||||
// ord 0.14.0 supports only one parent
|
||||
if len(src.Parents) > 0 {
|
||||
parentValue, err := ordinals.NewInscriptionIdFromString(src.Parents[0])
|
||||
if err != nil {
|
||||
return ordinals.InscriptionEntry{}, errors.Wrap(err, "invalid parent id")
|
||||
}
|
||||
parent = &parentValue
|
||||
}
|
||||
|
||||
inscription := ordinals.Inscription{
|
||||
Content: src.Content,
|
||||
ContentEncoding: lo.Ternary(src.ContentEncoding.Valid, src.ContentEncoding.String, ""),
|
||||
ContentType: lo.Ternary(src.ContentType.Valid, src.ContentType.String, ""),
|
||||
Delegate: delegate,
|
||||
Metadata: src.Metadata,
|
||||
Metaprotocol: lo.Ternary(src.Metaprotocol.Valid, src.Metaprotocol.String, ""),
|
||||
Parent: parent,
|
||||
Pointer: lo.Ternary(src.Pointer.Valid, lo.ToPtr(uint64(src.Pointer.Int64)), nil),
|
||||
}
|
||||
|
||||
return ordinals.InscriptionEntry{
|
||||
Id: inscriptionId,
|
||||
Number: src.Number,
|
||||
SequenceNumber: uint64(src.SequenceNumber),
|
||||
Cursed: src.Cursed,
|
||||
CursedForBRC20: src.CursedForBrc20,
|
||||
CreatedAt: lo.Ternary(src.CreatedAt.Valid, src.CreatedAt.Time, time.Time{}),
|
||||
CreatedAtHeight: uint64(src.CreatedAtHeight),
|
||||
Inscription: inscription,
|
||||
TransferCount: lo.Ternary(src.TransferCount.Valid, uint32(src.TransferCount.Int32), 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapInscriptionEntryTypeToParams(src ordinals.InscriptionEntry, blockHeight uint64) (gen.CreateInscriptionEntriesParams, gen.CreateInscriptionEntryStatesParams, error) {
|
||||
var delegate, metaprotocol, contentEncoding, contentType pgtype.Text
|
||||
if src.Inscription.Delegate != nil {
|
||||
delegate = pgtype.Text{String: src.Inscription.Delegate.String(), Valid: true}
|
||||
}
|
||||
if src.Inscription.Metaprotocol != "" {
|
||||
metaprotocol = pgtype.Text{String: src.Inscription.Metaprotocol, Valid: true}
|
||||
}
|
||||
if src.Inscription.ContentEncoding != "" {
|
||||
contentEncoding = pgtype.Text{String: src.Inscription.ContentEncoding, Valid: true}
|
||||
}
|
||||
if src.Inscription.ContentType != "" {
|
||||
contentType = pgtype.Text{String: src.Inscription.ContentType, Valid: true}
|
||||
}
|
||||
var parents []string
|
||||
if src.Inscription.Parent != nil {
|
||||
parents = append(parents, src.Inscription.Parent.String())
|
||||
}
|
||||
var pointer pgtype.Int8
|
||||
if src.Inscription.Pointer != nil {
|
||||
pointer = pgtype.Int8{Int64: int64(*src.Inscription.Pointer), Valid: true}
|
||||
}
|
||||
return gen.CreateInscriptionEntriesParams{
|
||||
Id: src.Id.String(),
|
||||
Number: src.Number,
|
||||
SequenceNumber: int64(src.SequenceNumber),
|
||||
Delegate: delegate,
|
||||
Metadata: src.Inscription.Metadata,
|
||||
Metaprotocol: metaprotocol,
|
||||
Parents: parents,
|
||||
Pointer: pointer,
|
||||
Content: src.Inscription.Content,
|
||||
ContentEncoding: contentEncoding,
|
||||
ContentType: contentType,
|
||||
Cursed: src.Cursed,
|
||||
CursedForBrc20: src.CursedForBRC20,
|
||||
CreatedAt: lo.Ternary(!src.CreatedAt.IsZero(), pgtype.Timestamp{Time: src.CreatedAt, Valid: true}, pgtype.Timestamp{}),
|
||||
CreatedAtHeight: int32(src.CreatedAtHeight),
|
||||
}, gen.CreateInscriptionEntryStatesParams{
|
||||
Id: src.Id.String(),
|
||||
BlockHeight: int32(blockHeight),
|
||||
TransferCount: int32(src.TransferCount),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapInscriptionTransferModelToType(src gen.GetInscriptionTransfersInOutPointsRow) (entity.InscriptionTransfer, error) {
|
||||
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
|
||||
if err != nil {
|
||||
return entity.InscriptionTransfer{}, errors.Wrap(err, "invalid inscription id")
|
||||
}
|
||||
txHash, err := chainhash.NewHashFromStr(src.TxHash)
|
||||
if err != nil {
|
||||
return entity.InscriptionTransfer{}, errors.Wrap(err, "invalid tx hash")
|
||||
}
|
||||
var oldSatPoint, newSatPoint ordinals.SatPoint
|
||||
if src.OldSatpointTxHash.Valid {
|
||||
if !src.OldSatpointOutIdx.Valid || !src.OldSatpointOffset.Valid {
|
||||
return entity.InscriptionTransfer{}, errors.New("old satpoint out idx and offset must exist if hash exists")
|
||||
}
|
||||
txHash, err := chainhash.NewHashFromStr(src.OldSatpointTxHash.String)
|
||||
if err != nil {
|
||||
return entity.InscriptionTransfer{}, errors.Wrap(err, "invalid old satpoint tx hash")
|
||||
}
|
||||
oldSatPoint = ordinals.SatPoint{
|
||||
OutPoint: wire.OutPoint{
|
||||
Hash: *txHash,
|
||||
Index: uint32(src.OldSatpointOutIdx.Int32),
|
||||
},
|
||||
Offset: uint64(src.OldSatpointOffset.Int64),
|
||||
}
|
||||
}
|
||||
if src.NewSatpointTxHash.Valid {
|
||||
if !src.NewSatpointOutIdx.Valid || !src.NewSatpointOffset.Valid {
|
||||
return entity.InscriptionTransfer{}, errors.New("new satpoint out idx and offset must exist if hash exists")
|
||||
}
|
||||
txHash, err := chainhash.NewHashFromStr(src.NewSatpointTxHash.String)
|
||||
if err != nil {
|
||||
return entity.InscriptionTransfer{}, errors.Wrap(err, "invalid new satpoint tx hash")
|
||||
}
|
||||
newSatPoint = ordinals.SatPoint{
|
||||
OutPoint: wire.OutPoint{
|
||||
Hash: *txHash,
|
||||
Index: uint32(src.NewSatpointOutIdx.Int32),
|
||||
},
|
||||
Offset: uint64(src.NewSatpointOffset.Int64),
|
||||
}
|
||||
}
|
||||
newPkScript, err := hex.DecodeString(src.NewPkscript)
|
||||
if err != nil {
|
||||
return entity.InscriptionTransfer{}, errors.Wrap(err, "failed to parse pkscript")
|
||||
}
|
||||
|
||||
return entity.InscriptionTransfer{
|
||||
InscriptionId: inscriptionId,
|
||||
BlockHeight: uint64(src.BlockHeight),
|
||||
TxIndex: uint32(src.TxIndex),
|
||||
TxHash: *txHash,
|
||||
FromInputIndex: uint32(src.FromInputIndex),
|
||||
Content: src.Content,
|
||||
OldSatPoint: oldSatPoint,
|
||||
NewSatPoint: newSatPoint,
|
||||
NewPkScript: newPkScript,
|
||||
NewOutputValue: uint64(src.NewOutputValue),
|
||||
SentAsFee: src.SentAsFee,
|
||||
TransferCount: uint32(src.TransferCount),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapInscriptionTransferTypeToParams(src entity.InscriptionTransfer) gen.CreateInscriptionTransfersParams {
|
||||
return gen.CreateInscriptionTransfersParams{
|
||||
InscriptionID: src.InscriptionId.String(),
|
||||
BlockHeight: int32(src.BlockHeight),
|
||||
TxIndex: int32(src.TxIndex),
|
||||
TxHash: src.TxHash.String(),
|
||||
FromInputIndex: int32(src.FromInputIndex),
|
||||
OldSatpointTxHash: lo.Ternary(src.OldSatPoint != ordinals.SatPoint{}, pgtype.Text{String: src.OldSatPoint.OutPoint.Hash.String(), Valid: true}, pgtype.Text{}),
|
||||
OldSatpointOutIdx: lo.Ternary(src.OldSatPoint != ordinals.SatPoint{}, pgtype.Int4{Int32: int32(src.OldSatPoint.OutPoint.Index), Valid: true}, pgtype.Int4{}),
|
||||
OldSatpointOffset: lo.Ternary(src.OldSatPoint != ordinals.SatPoint{}, pgtype.Int8{Int64: int64(src.OldSatPoint.Offset), Valid: true}, pgtype.Int8{}),
|
||||
NewSatpointTxHash: lo.Ternary(src.NewSatPoint != ordinals.SatPoint{}, pgtype.Text{String: src.NewSatPoint.OutPoint.Hash.String(), Valid: true}, pgtype.Text{}),
|
||||
NewSatpointOutIdx: lo.Ternary(src.NewSatPoint != ordinals.SatPoint{}, pgtype.Int4{Int32: int32(src.NewSatPoint.OutPoint.Index), Valid: true}, pgtype.Int4{}),
|
||||
NewSatpointOffset: lo.Ternary(src.NewSatPoint != ordinals.SatPoint{}, pgtype.Int8{Int64: int64(src.NewSatPoint.Offset), Valid: true}, pgtype.Int8{}),
|
||||
NewPkscript: hex.EncodeToString(src.NewPkScript),
|
||||
NewOutputValue: int64(src.NewOutputValue),
|
||||
SentAsFee: src.SentAsFee,
|
||||
TransferCount: int32(src.TransferCount),
|
||||
}
|
||||
}
|
||||
|
||||
func mapEventDeployModelToType(src gen.Brc20EventDeploy) (entity.EventDeploy, error) {
|
||||
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
|
||||
if err != nil {
|
||||
return entity.EventDeploy{}, errors.Wrap(err, "invalid inscription id")
|
||||
}
|
||||
txHash, err := chainhash.NewHashFromStr(src.TxHash)
|
||||
if err != nil {
|
||||
return entity.EventDeploy{}, errors.Wrap(err, "invalid tx hash")
|
||||
}
|
||||
pkScript, err := hex.DecodeString(src.Pkscript)
|
||||
if err != nil {
|
||||
return entity.EventDeploy{}, errors.Wrap(err, "failed to parse pkscript")
|
||||
}
|
||||
satPoint, err := ordinals.NewSatPointFromString(src.Satpoint)
|
||||
if err != nil {
|
||||
return entity.EventDeploy{}, errors.Wrap(err, "cannot parse satpoint")
|
||||
}
|
||||
return entity.EventDeploy{
|
||||
Id: src.Id,
|
||||
InscriptionId: inscriptionId,
|
||||
InscriptionNumber: src.InscriptionNumber,
|
||||
Tick: src.Tick,
|
||||
OriginalTick: src.OriginalTick,
|
||||
TxHash: *txHash,
|
||||
BlockHeight: uint64(src.BlockHeight),
|
||||
TxIndex: uint32(src.TxIndex),
|
||||
Timestamp: src.Timestamp.Time,
|
||||
PkScript: pkScript,
|
||||
SatPoint: satPoint,
|
||||
TotalSupply: decimalFromNumeric(src.TotalSupply).Decimal,
|
||||
Decimals: uint16(src.Decimals),
|
||||
LimitPerMint: decimalFromNumeric(src.LimitPerMint).Decimal,
|
||||
IsSelfMint: src.IsSelfMint,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapEventDeployTypeToParams(src entity.EventDeploy) (gen.CreateEventDeploysParams, error) {
|
||||
var timestamp pgtype.Timestamp
|
||||
if !src.Timestamp.IsZero() {
|
||||
timestamp = pgtype.Timestamp{Time: src.Timestamp, Valid: true}
|
||||
}
|
||||
return gen.CreateEventDeploysParams{
|
||||
InscriptionID: src.InscriptionId.String(),
|
||||
InscriptionNumber: src.InscriptionNumber,
|
||||
Tick: src.Tick,
|
||||
OriginalTick: src.OriginalTick,
|
||||
TxHash: src.TxHash.String(),
|
||||
BlockHeight: int32(src.BlockHeight),
|
||||
TxIndex: int32(src.TxIndex),
|
||||
Timestamp: timestamp,
|
||||
Pkscript: hex.EncodeToString(src.PkScript),
|
||||
Satpoint: src.SatPoint.String(),
|
||||
TotalSupply: numericFromDecimal(src.TotalSupply),
|
||||
Decimals: int16(src.Decimals),
|
||||
LimitPerMint: numericFromDecimal(src.LimitPerMint),
|
||||
IsSelfMint: src.IsSelfMint,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapEventMintModelToType(src gen.Brc20EventMint) (entity.EventMint, error) {
|
||||
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
|
||||
if err != nil {
|
||||
return entity.EventMint{}, errors.Wrap(err, "invalid inscription id")
|
||||
}
|
||||
txHash, err := chainhash.NewHashFromStr(src.TxHash)
|
||||
if err != nil {
|
||||
return entity.EventMint{}, errors.Wrap(err, "invalid tx hash")
|
||||
}
|
||||
pkScript, err := hex.DecodeString(src.Pkscript)
|
||||
if err != nil {
|
||||
return entity.EventMint{}, errors.Wrap(err, "failed to parse pkscript")
|
||||
}
|
||||
satPoint, err := ordinals.NewSatPointFromString(src.Satpoint)
|
||||
if err != nil {
|
||||
return entity.EventMint{}, errors.Wrap(err, "cannot parse satpoint")
|
||||
}
|
||||
var parentId *ordinals.InscriptionId
|
||||
if src.ParentID.Valid {
|
||||
parentIdValue, err := ordinals.NewInscriptionIdFromString(src.ParentID.String)
|
||||
if err != nil {
|
||||
return entity.EventMint{}, errors.Wrap(err, "invalid parent id")
|
||||
}
|
||||
parentId = &parentIdValue
|
||||
}
|
||||
return entity.EventMint{
|
||||
Id: src.Id,
|
||||
InscriptionId: inscriptionId,
|
||||
InscriptionNumber: src.InscriptionNumber,
|
||||
Tick: src.Tick,
|
||||
OriginalTick: src.OriginalTick,
|
||||
TxHash: *txHash,
|
||||
BlockHeight: uint64(src.BlockHeight),
|
||||
TxIndex: uint32(src.TxIndex),
|
||||
Timestamp: src.Timestamp.Time,
|
||||
PkScript: pkScript,
|
||||
SatPoint: satPoint,
|
||||
Amount: decimalFromNumeric(src.Amount).Decimal,
|
||||
ParentId: parentId,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapEventMintTypeToParams(src entity.EventMint) (gen.CreateEventMintsParams, error) {
|
||||
var timestamp pgtype.Timestamp
|
||||
if !src.Timestamp.IsZero() {
|
||||
timestamp = pgtype.Timestamp{Time: src.Timestamp, Valid: true}
|
||||
}
|
||||
var parentId pgtype.Text
|
||||
if src.ParentId != nil {
|
||||
parentId = pgtype.Text{String: src.ParentId.String(), Valid: true}
|
||||
}
|
||||
return gen.CreateEventMintsParams{
|
||||
InscriptionID: src.InscriptionId.String(),
|
||||
InscriptionNumber: src.InscriptionNumber,
|
||||
Tick: src.Tick,
|
||||
OriginalTick: src.OriginalTick,
|
||||
TxHash: src.TxHash.String(),
|
||||
BlockHeight: int32(src.BlockHeight),
|
||||
TxIndex: int32(src.TxIndex),
|
||||
Timestamp: timestamp,
|
||||
Pkscript: hex.EncodeToString(src.PkScript),
|
||||
Satpoint: src.SatPoint.String(),
|
||||
Amount: numericFromDecimal(src.Amount),
|
||||
ParentID: parentId,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapEventInscribeTransferModelToType(src gen.Brc20EventInscribeTransfer) (entity.EventInscribeTransfer, error) {
|
||||
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
|
||||
if err != nil {
|
||||
return entity.EventInscribeTransfer{}, errors.Wrap(err, "cannot parse inscription id")
|
||||
}
|
||||
txHash, err := chainhash.NewHashFromStr(src.TxHash)
|
||||
if err != nil {
|
||||
return entity.EventInscribeTransfer{}, errors.Wrap(err, "cannot parse hash")
|
||||
}
|
||||
pkScript, err := hex.DecodeString(src.Pkscript)
|
||||
if err != nil {
|
||||
return entity.EventInscribeTransfer{}, errors.Wrap(err, "cannot parse pkScript")
|
||||
}
|
||||
satPoint, err := ordinals.NewSatPointFromString(src.Satpoint)
|
||||
if err != nil {
|
||||
return entity.EventInscribeTransfer{}, errors.Wrap(err, "cannot parse satPoint")
|
||||
}
|
||||
return entity.EventInscribeTransfer{
|
||||
Id: src.Id,
|
||||
InscriptionId: inscriptionId,
|
||||
InscriptionNumber: src.InscriptionNumber,
|
||||
Tick: src.Tick,
|
||||
OriginalTick: src.OriginalTick,
|
||||
TxHash: *txHash,
|
||||
BlockHeight: uint64(src.BlockHeight),
|
||||
TxIndex: uint32(src.TxIndex),
|
||||
Timestamp: src.Timestamp.Time,
|
||||
PkScript: pkScript,
|
||||
SatPoint: satPoint,
|
||||
OutputIndex: uint32(src.OutputIndex),
|
||||
SatsAmount: uint64(src.SatsAmount),
|
||||
Amount: decimalFromNumeric(src.Amount).Decimal,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapEventInscribeTransferTypeToParams(src entity.EventInscribeTransfer) (gen.CreateEventInscribeTransfersParams, error) {
|
||||
var timestamp pgtype.Timestamp
|
||||
if !src.Timestamp.IsZero() {
|
||||
timestamp = pgtype.Timestamp{Time: src.Timestamp, Valid: true}
|
||||
}
|
||||
return gen.CreateEventInscribeTransfersParams{
|
||||
InscriptionID: src.InscriptionId.String(),
|
||||
InscriptionNumber: src.InscriptionNumber,
|
||||
Tick: src.Tick,
|
||||
OriginalTick: src.OriginalTick,
|
||||
TxHash: src.TxHash.String(),
|
||||
BlockHeight: int32(src.BlockHeight),
|
||||
TxIndex: int32(src.TxIndex),
|
||||
Timestamp: timestamp,
|
||||
Pkscript: hex.EncodeToString(src.PkScript),
|
||||
Satpoint: src.SatPoint.String(),
|
||||
OutputIndex: int32(src.OutputIndex),
|
||||
SatsAmount: int64(src.SatsAmount),
|
||||
Amount: numericFromDecimal(src.Amount),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapEventTransferTransferModelToType(src gen.Brc20EventTransferTransfer) (entity.EventTransferTransfer, error) {
|
||||
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
|
||||
if err != nil {
|
||||
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse inscription id")
|
||||
}
|
||||
txHash, err := chainhash.NewHashFromStr(src.TxHash)
|
||||
if err != nil {
|
||||
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse hash")
|
||||
}
|
||||
fromPkScript, err := hex.DecodeString(src.FromPkscript)
|
||||
if err != nil {
|
||||
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse fromPkScript")
|
||||
}
|
||||
fromSatPoint, err := ordinals.NewSatPointFromString(src.FromSatpoint)
|
||||
if err != nil {
|
||||
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse fromSatPoint")
|
||||
}
|
||||
toPkScript, err := hex.DecodeString(src.ToPkscript)
|
||||
if err != nil {
|
||||
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse toPkScript")
|
||||
}
|
||||
toSatPoint, err := ordinals.NewSatPointFromString(src.ToSatpoint)
|
||||
if err != nil {
|
||||
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse toSatPoint")
|
||||
}
|
||||
return entity.EventTransferTransfer{
|
||||
Id: src.Id,
|
||||
InscriptionId: inscriptionId,
|
||||
InscriptionNumber: src.InscriptionNumber,
|
||||
Tick: src.Tick,
|
||||
OriginalTick: src.OriginalTick,
|
||||
TxHash: *txHash,
|
||||
BlockHeight: uint64(src.BlockHeight),
|
||||
TxIndex: uint32(src.TxIndex),
|
||||
Timestamp: src.Timestamp.Time,
|
||||
FromPkScript: fromPkScript,
|
||||
FromSatPoint: fromSatPoint,
|
||||
FromInputIndex: uint32(src.FromInputIndex),
|
||||
ToPkScript: toPkScript,
|
||||
ToSatPoint: toSatPoint,
|
||||
ToOutputIndex: uint32(src.ToOutputIndex),
|
||||
SpentAsFee: src.SpentAsFee,
|
||||
Amount: decimalFromNumeric(src.Amount).Decimal,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapEventTransferTransferTypeToParams(src entity.EventTransferTransfer) (gen.CreateEventTransferTransfersParams, error) {
|
||||
var timestamp pgtype.Timestamp
|
||||
if !src.Timestamp.IsZero() {
|
||||
timestamp = pgtype.Timestamp{Time: src.Timestamp, Valid: true}
|
||||
}
|
||||
return gen.CreateEventTransferTransfersParams{
|
||||
InscriptionID: src.InscriptionId.String(),
|
||||
InscriptionNumber: src.InscriptionNumber,
|
||||
Tick: src.Tick,
|
||||
OriginalTick: src.OriginalTick,
|
||||
TxHash: src.TxHash.String(),
|
||||
BlockHeight: int32(src.BlockHeight),
|
||||
TxIndex: int32(src.TxIndex),
|
||||
Timestamp: timestamp,
|
||||
FromPkscript: hex.EncodeToString(src.FromPkScript),
|
||||
FromSatpoint: src.FromSatPoint.String(),
|
||||
FromInputIndex: int32(src.FromInputIndex),
|
||||
ToPkscript: hex.EncodeToString(src.ToPkScript),
|
||||
ToSatpoint: src.ToSatPoint.String(),
|
||||
ToOutputIndex: int32(src.ToOutputIndex),
|
||||
SpentAsFee: src.SpentAsFee,
|
||||
Amount: numericFromDecimal(src.Amount),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapBalanceModelToType(src gen.Brc20Balance) (entity.Balance, error) {
|
||||
pkScript, err := hex.DecodeString(src.Pkscript)
|
||||
if err != nil {
|
||||
return entity.Balance{}, errors.Wrap(err, "failed to parse pkscript")
|
||||
}
|
||||
return entity.Balance{
|
||||
PkScript: pkScript,
|
||||
Tick: src.Tick,
|
||||
BlockHeight: uint64(src.BlockHeight),
|
||||
OverallBalance: decimalFromNumeric(src.OverallBalance).Decimal,
|
||||
AvailableBalance: decimalFromNumeric(src.AvailableBalance).Decimal,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mapBalanceTypeToParams(src entity.Balance) gen.CreateBalancesParams {
|
||||
return gen.CreateBalancesParams{
|
||||
Pkscript: hex.EncodeToString(src.PkScript),
|
||||
Tick: src.Tick,
|
||||
BlockHeight: int32(src.BlockHeight),
|
||||
OverallBalance: numericFromDecimal(src.OverallBalance),
|
||||
AvailableBalance: numericFromDecimal(src.AvailableBalance),
|
||||
}
|
||||
}
|
||||
20
modules/brc20/internal/repository/postgres/postgres.go
Normal file
20
modules/brc20/internal/repository/postgres/postgres.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"github.com/gaze-network/indexer-network/internal/postgres"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/repository/postgres/gen"
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
type Repository struct {
|
||||
db postgres.DB
|
||||
queries *gen.Queries
|
||||
tx pgx.Tx
|
||||
}
|
||||
|
||||
func NewRepository(db postgres.DB) *Repository {
|
||||
return &Repository{
|
||||
db: db,
|
||||
queries: gen.New(db),
|
||||
}
|
||||
}
|
||||
62
modules/brc20/internal/repository/postgres/tx.go
Normal file
62
modules/brc20/internal/repository/postgres/tx.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
var ErrTxAlreadyExists = errors.New("Transaction already exists. Call Commit() or Rollback() first.")
|
||||
|
||||
func (r *Repository) begin(ctx context.Context) (*Repository, error) {
|
||||
if r.tx != nil {
|
||||
return nil, errors.WithStack(ErrTxAlreadyExists)
|
||||
}
|
||||
tx, err := r.db.Begin(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to begin transaction")
|
||||
}
|
||||
return &Repository{
|
||||
db: r.db,
|
||||
queries: r.queries.WithTx(tx),
|
||||
tx: tx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *Repository) BeginBRC20Tx(ctx context.Context) (datagateway.BRC20DataGatewayWithTx, error) {
|
||||
repo, err := r.begin(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func (r *Repository) Commit(ctx context.Context) error {
|
||||
if r.tx == nil {
|
||||
return nil
|
||||
}
|
||||
err := r.tx.Commit(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to commit transaction")
|
||||
}
|
||||
r.tx = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Repository) Rollback(ctx context.Context) error {
|
||||
if r.tx == nil {
|
||||
return nil
|
||||
}
|
||||
err := r.tx.Rollback(ctx)
|
||||
if err != nil && !errors.Is(err, pgx.ErrTxClosed) {
|
||||
return errors.Wrap(err, "failed to rollback transaction")
|
||||
}
|
||||
if err == nil {
|
||||
logger.DebugContext(ctx, "rolled back transaction")
|
||||
}
|
||||
r.tx = nil
|
||||
return nil
|
||||
}
|
||||
238
modules/brc20/processor.go
Normal file
238
modules/brc20/processor.go
Normal file
@@ -0,0 +1,238 @@
|
||||
package brc20
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/indexer"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
"github.com/gaze-network/indexer-network/pkg/btcclient"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
|
||||
"github.com/gaze-network/indexer-network/pkg/lru"
|
||||
)
|
||||
|
||||
// Make sure to implement the Bitcoin Processor interface
|
||||
var _ indexer.Processor[*types.Block] = (*Processor)(nil)
|
||||
|
||||
type Processor struct {
|
||||
brc20Dg datagateway.BRC20DataGateway
|
||||
indexerInfoDg datagateway.IndexerInfoDataGateway
|
||||
btcClient btcclient.Contract
|
||||
network common.Network
|
||||
cleanupFuncs []func(context.Context) error
|
||||
|
||||
// block states
|
||||
flotsamsSentAsFee []*entity.Flotsam
|
||||
blockReward uint64
|
||||
|
||||
// processor stats
|
||||
cursedInscriptionCount uint64
|
||||
blessedInscriptionCount uint64
|
||||
lostSats uint64
|
||||
|
||||
// cache
|
||||
outPointValueCache *lru.Cache[wire.OutPoint, uint64]
|
||||
|
||||
// flush buffers - inscription states
|
||||
newInscriptionTransfers []*entity.InscriptionTransfer
|
||||
newInscriptionEntries map[ordinals.InscriptionId]*ordinals.InscriptionEntry
|
||||
newInscriptionEntryStates map[ordinals.InscriptionId]*ordinals.InscriptionEntry
|
||||
// flush buffers - brc20 states
|
||||
newTickEntries map[string]*entity.TickEntry
|
||||
newTickEntryStates map[string]*entity.TickEntry
|
||||
newEventDeploys []*entity.EventDeploy
|
||||
newEventMints []*entity.EventMint
|
||||
newEventInscribeTransfers []*entity.EventInscribeTransfer
|
||||
newEventTransferTransfers []*entity.EventTransferTransfer
|
||||
newBalances map[string]map[string]*entity.Balance // pkscript -> tick -> balance
|
||||
|
||||
eventHashString string
|
||||
}
|
||||
|
||||
// TODO: move this to config
|
||||
const outPointValueCacheSize = 100000
|
||||
|
||||
func NewProcessor(brc20Dg datagateway.BRC20DataGateway, indexerInfoDg datagateway.IndexerInfoDataGateway, btcClient btcclient.Contract, network common.Network, cleanupFuncs []func(context.Context) error) (*Processor, error) {
|
||||
outPointValueCache, err := lru.New[wire.OutPoint, uint64](outPointValueCacheSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create outPointValueCache")
|
||||
}
|
||||
|
||||
return &Processor{
|
||||
brc20Dg: brc20Dg,
|
||||
indexerInfoDg: indexerInfoDg,
|
||||
btcClient: btcClient,
|
||||
network: network,
|
||||
cleanupFuncs: cleanupFuncs,
|
||||
|
||||
flotsamsSentAsFee: make([]*entity.Flotsam, 0),
|
||||
blockReward: 0,
|
||||
|
||||
cursedInscriptionCount: 0, // to be initialized by p.VerifyStates()
|
||||
blessedInscriptionCount: 0, // to be initialized by p.VerifyStates()
|
||||
lostSats: 0, // to be initialized by p.VerifyStates()
|
||||
outPointValueCache: outPointValueCache,
|
||||
|
||||
newInscriptionTransfers: make([]*entity.InscriptionTransfer, 0),
|
||||
newInscriptionEntries: make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry),
|
||||
newInscriptionEntryStates: make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry),
|
||||
|
||||
newTickEntries: make(map[string]*entity.TickEntry),
|
||||
newTickEntryStates: make(map[string]*entity.TickEntry),
|
||||
newEventDeploys: make([]*entity.EventDeploy, 0),
|
||||
newEventMints: make([]*entity.EventMint, 0),
|
||||
newEventInscribeTransfers: make([]*entity.EventInscribeTransfer, 0),
|
||||
newEventTransferTransfers: make([]*entity.EventTransferTransfer, 0),
|
||||
newBalances: make(map[string]map[string]*entity.Balance),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// VerifyStates implements indexer.Processor.
|
||||
func (p *Processor) VerifyStates(ctx context.Context) error {
|
||||
indexerState, err := p.indexerInfoDg.GetLatestIndexerState(ctx)
|
||||
if err != nil && !errors.Is(err, errs.NotFound) {
|
||||
return errors.Wrap(err, "failed to get latest indexer state")
|
||||
}
|
||||
// if not found, create indexer state
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
if err := p.indexerInfoDg.CreateIndexerState(ctx, entity.IndexerState{
|
||||
ClientVersion: ClientVersion,
|
||||
DBVersion: DBVersion,
|
||||
EventHashVersion: EventHashVersion,
|
||||
Network: p.network,
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "failed to set indexer state")
|
||||
}
|
||||
} else {
|
||||
if indexerState.DBVersion != DBVersion {
|
||||
return errors.Wrapf(errs.ConflictSetting, "db version mismatch: current version is %d. Please upgrade to version %d", indexerState.DBVersion, DBVersion)
|
||||
}
|
||||
if indexerState.EventHashVersion != EventHashVersion {
|
||||
return errors.Wrapf(errs.ConflictSetting, "event version mismatch: current version is %d. Please reset rune's db first.", indexerState.EventHashVersion, EventHashVersion)
|
||||
}
|
||||
if indexerState.Network != p.network {
|
||||
return errors.Wrapf(errs.ConflictSetting, "network mismatch: latest indexed network is %d, configured network is %d. If you want to change the network, please reset the database", indexerState.Network, p.network)
|
||||
}
|
||||
}
|
||||
|
||||
stats, err := p.brc20Dg.GetProcessorStats(ctx)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errs.NotFound) {
|
||||
return errors.Wrap(err, "failed to count cursed inscriptions")
|
||||
}
|
||||
stats = &entity.ProcessorStats{
|
||||
BlockHeight: uint64(startingBlockHeader[p.network].Height),
|
||||
CursedInscriptionCount: 0,
|
||||
BlessedInscriptionCount: 0,
|
||||
LostSats: 0,
|
||||
}
|
||||
}
|
||||
p.cursedInscriptionCount = stats.CursedInscriptionCount
|
||||
p.blessedInscriptionCount = stats.BlessedInscriptionCount
|
||||
p.lostSats = stats.LostSats
|
||||
return nil
|
||||
}
|
||||
|
||||
// CurrentBlock implements indexer.Processor.
|
||||
func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error) {
|
||||
blockHeader, err := p.brc20Dg.GetLatestBlock(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, errs.NotFound) {
|
||||
return startingBlockHeader[p.network], nil
|
||||
}
|
||||
return types.BlockHeader{}, errors.Wrap(err, "failed to get latest block")
|
||||
}
|
||||
return blockHeader, nil
|
||||
}
|
||||
|
||||
// GetIndexedBlock implements indexer.Processor.
|
||||
func (p *Processor) GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error) {
|
||||
block, err := p.brc20Dg.GetIndexedBlockByHeight(ctx, height)
|
||||
if err != nil {
|
||||
return types.BlockHeader{}, errors.Wrap(err, "failed to get indexed block")
|
||||
}
|
||||
return types.BlockHeader{
|
||||
Height: int64(block.Height),
|
||||
Hash: block.Hash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name implements indexer.Processor.
|
||||
func (p *Processor) Name() string {
|
||||
return "brc20"
|
||||
}
|
||||
|
||||
// RevertData implements indexer.Processor.
|
||||
func (p *Processor) RevertData(ctx context.Context, from int64) error {
|
||||
brc20DgTx, err := p.brc20Dg.BeginBRC20Tx(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to begin transaction")
|
||||
}
|
||||
defer func() {
|
||||
if err := brc20DgTx.Rollback(ctx); err != nil {
|
||||
logger.WarnContext(ctx, "failed to rollback transaction",
|
||||
slogx.Error(err),
|
||||
slogx.String("event", "rollback_brc20_insertion"),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := brc20DgTx.DeleteIndexedBlocksSinceHeight(ctx, uint64(from)); err != nil {
|
||||
return errors.Wrap(err, "failed to delete indexed blocks")
|
||||
}
|
||||
if err := brc20DgTx.DeleteProcessorStatsSinceHeight(ctx, uint64(from)); err != nil {
|
||||
return errors.Wrap(err, "failed to delete processor stats")
|
||||
}
|
||||
if err := brc20DgTx.DeleteTickEntriesSinceHeight(ctx, uint64(from)); err != nil {
|
||||
return errors.Wrap(err, "failed to delete ticks")
|
||||
}
|
||||
if err := brc20DgTx.DeleteTickEntryStatesSinceHeight(ctx, uint64(from)); err != nil {
|
||||
return errors.Wrap(err, "failed to delete tick states")
|
||||
}
|
||||
if err := brc20DgTx.DeleteEventDeploysSinceHeight(ctx, uint64(from)); err != nil {
|
||||
return errors.Wrap(err, "failed to delete deploy events")
|
||||
}
|
||||
if err := brc20DgTx.DeleteEventMintsSinceHeight(ctx, uint64(from)); err != nil {
|
||||
return errors.Wrap(err, "failed to delete mint events")
|
||||
}
|
||||
if err := brc20DgTx.DeleteEventInscribeTransfersSinceHeight(ctx, uint64(from)); err != nil {
|
||||
return errors.Wrap(err, "failed to delete inscribe transfer events")
|
||||
}
|
||||
if err := brc20DgTx.DeleteEventTransferTransfersSinceHeight(ctx, uint64(from)); err != nil {
|
||||
return errors.Wrap(err, "failed to delete transfer transfer events")
|
||||
}
|
||||
if err := brc20DgTx.DeleteBalancesSinceHeight(ctx, uint64(from)); err != nil {
|
||||
return errors.Wrap(err, "failed to delete balances")
|
||||
}
|
||||
if err := brc20DgTx.DeleteInscriptionEntriesSinceHeight(ctx, uint64(from)); err != nil {
|
||||
return errors.Wrap(err, "failed to delete inscription entries")
|
||||
}
|
||||
if err := brc20DgTx.DeleteInscriptionEntryStatesSinceHeight(ctx, uint64(from)); err != nil {
|
||||
return errors.Wrap(err, "failed to delete inscription entry states")
|
||||
}
|
||||
if err := brc20DgTx.DeleteInscriptionTransfersSinceHeight(ctx, uint64(from)); err != nil {
|
||||
return errors.Wrap(err, "failed to delete inscription transfers")
|
||||
}
|
||||
|
||||
if err := brc20DgTx.Commit(ctx); err != nil {
|
||||
return errors.Wrap(err, "failed to commit transaction")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) Shutdown(ctx context.Context) error {
|
||||
var errs []error
|
||||
for _, cleanup := range p.cleanupFuncs {
|
||||
if err := cleanup(ctx); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return errors.WithStack(errors.Join(errs...))
|
||||
}
|
||||
447
modules/brc20/processor_brc20.go
Normal file
447
modules/brc20/processor_brc20.go
Normal file
@@ -0,0 +1,447 @@
|
||||
package brc20
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/brc20"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
"github.com/samber/lo"
|
||||
"github.com/shopspring/decimal"
|
||||
)
|
||||
|
||||
func (p *Processor) processBRC20States(ctx context.Context, transfers []*entity.InscriptionTransfer, blockHeader types.BlockHeader) error {
|
||||
payloads := make([]*brc20.Payload, 0)
|
||||
ticks := make(map[string]struct{})
|
||||
for _, transfer := range transfers {
|
||||
if transfer.Content == nil {
|
||||
// skip empty content
|
||||
continue
|
||||
}
|
||||
payload, err := brc20.ParsePayload(transfer)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse payload")
|
||||
}
|
||||
payloads = append(payloads, payload)
|
||||
ticks[payload.Tick] = struct{}{}
|
||||
}
|
||||
if len(payloads) == 0 {
|
||||
// skip if no valid payloads
|
||||
return nil
|
||||
}
|
||||
// TODO: concurrently fetch from db to optimize speed
|
||||
tickEntries, err := p.brc20Dg.GetTickEntriesByTicks(ctx, lo.Keys(ticks))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get inscription entries by ids")
|
||||
}
|
||||
|
||||
// preload required data to reduce individual data fetching during process
|
||||
inscriptionIds := make([]ordinals.InscriptionId, 0)
|
||||
inscriptionIdsToFetchParent := make([]ordinals.InscriptionId, 0)
|
||||
inscriptionIdsToFetchEventInscribeTransfer := make([]ordinals.InscriptionId, 0)
|
||||
balancesToFetch := make([]datagateway.GetBalancesBatchAtHeightQuery, 0) // pkscript -> tick -> struct{}
|
||||
for _, payload := range payloads {
|
||||
inscriptionIds = append(inscriptionIds, payload.Transfer.InscriptionId)
|
||||
if payload.Op == brc20.OperationMint {
|
||||
// preload parent id to validate mint events with self mint
|
||||
if entry := tickEntries[payload.Tick]; entry.IsSelfMint {
|
||||
inscriptionIdsToFetchParent = append(inscriptionIdsToFetchParent, payload.Transfer.InscriptionId)
|
||||
}
|
||||
}
|
||||
if payload.Op == brc20.OperationTransfer {
|
||||
if payload.Transfer.OldSatPoint == (ordinals.SatPoint{}) {
|
||||
// preload balance to validate inscribe transfer event
|
||||
balancesToFetch = append(balancesToFetch, datagateway.GetBalancesBatchAtHeightQuery{
|
||||
PkScriptHex: hex.EncodeToString(payload.Transfer.NewPkScript),
|
||||
Tick: payload.Tick,
|
||||
})
|
||||
} else {
|
||||
// preload inscribe-transfer events to validate transfer-transfer event
|
||||
inscriptionIdsToFetchEventInscribeTransfer = append(inscriptionIdsToFetchEventInscribeTransfer, payload.Transfer.InscriptionId)
|
||||
}
|
||||
}
|
||||
}
|
||||
inscriptionIdsToNumber, err := p.getInscriptionNumbersByIds(ctx, lo.Uniq(inscriptionIds))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get inscription numbers by ids")
|
||||
}
|
||||
inscriptionIdsToParent, err := p.getInscriptionParentsByIds(ctx, lo.Uniq(inscriptionIdsToFetchParent))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get inscription parents by ids")
|
||||
}
|
||||
latestEventId, err := p.brc20Dg.GetLatestEventId(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get latest event id")
|
||||
}
|
||||
// pkscript -> tick -> balance
|
||||
balances, err := p.brc20Dg.GetBalancesBatchAtHeight(ctx, uint64(blockHeader.Height-1), balancesToFetch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get balances batch at height")
|
||||
}
|
||||
eventInscribeTransfers, err := p.brc20Dg.GetEventInscribeTransfersByInscriptionIds(ctx, lo.Uniq(inscriptionIdsToFetchEventInscribeTransfer))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get event inscribe transfers by inscription ids")
|
||||
}
|
||||
|
||||
newTickEntries := make(map[string]*entity.TickEntry)
|
||||
newTickEntryStates := make(map[string]*entity.TickEntry)
|
||||
newEventDeploys := make([]*entity.EventDeploy, 0)
|
||||
newEventMints := make([]*entity.EventMint, 0)
|
||||
newEventInscribeTransfers := make([]*entity.EventInscribeTransfer, 0)
|
||||
newEventTransferTransfers := make([]*entity.EventTransferTransfer, 0)
|
||||
newBalances := make(map[string]map[string]*entity.Balance)
|
||||
var eventHashBuilder strings.Builder
|
||||
|
||||
handleEventDeploy := func(payload *brc20.Payload, tickEntry *entity.TickEntry) {
|
||||
if payload.Transfer.TransferCount > 1 {
|
||||
// skip used deploy inscriptions
|
||||
return
|
||||
}
|
||||
if tickEntry != nil {
|
||||
// skip deploy inscriptions for duplicate ticks
|
||||
return
|
||||
}
|
||||
newEntry := &entity.TickEntry{
|
||||
Tick: payload.Tick,
|
||||
OriginalTick: payload.OriginalTick,
|
||||
TotalSupply: payload.Max,
|
||||
Decimals: payload.Dec,
|
||||
LimitPerMint: payload.Lim,
|
||||
IsSelfMint: payload.SelfMint,
|
||||
DeployInscriptionId: payload.Transfer.InscriptionId,
|
||||
DeployedAt: blockHeader.Timestamp,
|
||||
DeployedAtHeight: payload.Transfer.BlockHeight,
|
||||
MintedAmount: decimal.Zero,
|
||||
BurnedAmount: decimal.Zero,
|
||||
CompletedAt: time.Time{},
|
||||
CompletedAtHeight: 0,
|
||||
}
|
||||
newTickEntries[payload.Tick] = newEntry
|
||||
newTickEntryStates[payload.Tick] = newEntry
|
||||
// update entries for other operations in same block
|
||||
tickEntries[payload.Tick] = newEntry
|
||||
|
||||
event := &entity.EventDeploy{
|
||||
Id: latestEventId + 1,
|
||||
InscriptionId: payload.Transfer.InscriptionId,
|
||||
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
|
||||
Tick: payload.Tick,
|
||||
OriginalTick: payload.OriginalTick,
|
||||
TxHash: payload.Transfer.TxHash,
|
||||
BlockHeight: payload.Transfer.BlockHeight,
|
||||
TxIndex: payload.Transfer.TxIndex,
|
||||
Timestamp: blockHeader.Timestamp,
|
||||
PkScript: payload.Transfer.NewPkScript,
|
||||
SatPoint: payload.Transfer.NewSatPoint,
|
||||
TotalSupply: payload.Max,
|
||||
Decimals: payload.Dec,
|
||||
LimitPerMint: payload.Lim,
|
||||
IsSelfMint: payload.SelfMint,
|
||||
}
|
||||
newEventDeploys = append(newEventDeploys, event)
|
||||
latestEventId++
|
||||
|
||||
eventHashBuilder.WriteString(getEventDeployString(event) + eventHashSeparator)
|
||||
}
|
||||
handleEventMint := func(payload *brc20.Payload, tickEntry *entity.TickEntry) {
|
||||
if payload.Transfer.TransferCount > 1 {
|
||||
// skip used mint inscriptions that are already used
|
||||
return
|
||||
}
|
||||
if tickEntry == nil {
|
||||
// skip mint inscriptions for non-existent ticks
|
||||
return
|
||||
}
|
||||
if -payload.Amt.Exponent() > int32(tickEntry.Decimals) {
|
||||
// skip mint inscriptions with decimals greater than allowed
|
||||
return
|
||||
}
|
||||
if tickEntry.MintedAmount.GreaterThanOrEqual(tickEntry.TotalSupply) {
|
||||
// skip mint inscriptions for ticks with completed mints
|
||||
return
|
||||
}
|
||||
if payload.Amt.GreaterThan(tickEntry.LimitPerMint) {
|
||||
// skip mint inscriptions with amount greater than limit per mint
|
||||
return
|
||||
}
|
||||
mintableAmount := tickEntry.TotalSupply.Sub(tickEntry.MintedAmount)
|
||||
if payload.Amt.GreaterThan(mintableAmount) {
|
||||
payload.Amt = mintableAmount
|
||||
}
|
||||
var parentId *ordinals.InscriptionId
|
||||
if tickEntry.IsSelfMint {
|
||||
parentIdValue, ok := inscriptionIdsToParent[payload.Transfer.InscriptionId]
|
||||
if !ok {
|
||||
// skip mint inscriptions for self mint ticks without parent inscription
|
||||
return
|
||||
}
|
||||
if parentIdValue != tickEntry.DeployInscriptionId {
|
||||
// skip mint inscriptions for self mint ticks with invalid parent inscription
|
||||
return
|
||||
}
|
||||
parentId = &parentIdValue
|
||||
}
|
||||
|
||||
tickEntry.MintedAmount = tickEntry.MintedAmount.Add(payload.Amt)
|
||||
// mark as completed if this mint completes the total supply
|
||||
if tickEntry.MintedAmount.GreaterThanOrEqual(tickEntry.TotalSupply) {
|
||||
tickEntry.CompletedAt = blockHeader.Timestamp
|
||||
tickEntry.CompletedAtHeight = payload.Transfer.BlockHeight
|
||||
}
|
||||
|
||||
newTickEntryStates[payload.Tick] = tickEntry
|
||||
event := &entity.EventMint{
|
||||
Id: latestEventId + 1,
|
||||
InscriptionId: payload.Transfer.InscriptionId,
|
||||
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
|
||||
Tick: payload.Tick,
|
||||
OriginalTick: payload.OriginalTick,
|
||||
TxHash: payload.Transfer.TxHash,
|
||||
BlockHeight: payload.Transfer.BlockHeight,
|
||||
TxIndex: payload.Transfer.TxIndex,
|
||||
Timestamp: blockHeader.Timestamp,
|
||||
PkScript: payload.Transfer.NewPkScript,
|
||||
SatPoint: payload.Transfer.NewSatPoint,
|
||||
Amount: payload.Amt,
|
||||
ParentId: parentId,
|
||||
}
|
||||
newEventMints = append(newEventMints, event)
|
||||
latestEventId++
|
||||
|
||||
eventHashBuilder.WriteString(getEventMintString(event, tickEntry.Decimals) + eventHashSeparator)
|
||||
}
|
||||
handleEventInscribeTransfer := func(payload *brc20.Payload, tickEntry *entity.TickEntry) {
|
||||
// inscribe transfer event
|
||||
pkScriptHex := hex.EncodeToString(payload.Transfer.NewPkScript)
|
||||
balance, ok := balances[pkScriptHex][payload.Tick]
|
||||
if !ok {
|
||||
balance = &entity.Balance{
|
||||
PkScript: payload.Transfer.NewPkScript,
|
||||
Tick: payload.Tick,
|
||||
BlockHeight: uint64(blockHeader.Height - 1),
|
||||
OverallBalance: decimal.Zero, // defaults balance to zero if not found
|
||||
AvailableBalance: decimal.Zero,
|
||||
}
|
||||
}
|
||||
if payload.Amt.GreaterThan(balance.AvailableBalance) {
|
||||
// skip inscribe transfer event if amount exceeds available balance
|
||||
return
|
||||
}
|
||||
// update balance state
|
||||
balance.BlockHeight = uint64(blockHeader.Height)
|
||||
balance.AvailableBalance = balance.AvailableBalance.Sub(payload.Amt)
|
||||
if _, ok := balances[pkScriptHex]; !ok {
|
||||
balances[pkScriptHex] = make(map[string]*entity.Balance)
|
||||
}
|
||||
balances[pkScriptHex][payload.Tick] = balance
|
||||
if _, ok := newBalances[pkScriptHex]; !ok {
|
||||
newBalances[pkScriptHex] = make(map[string]*entity.Balance)
|
||||
}
|
||||
newBalances[pkScriptHex][payload.Tick] = &entity.Balance{}
|
||||
|
||||
event := &entity.EventInscribeTransfer{
|
||||
Id: latestEventId + 1,
|
||||
InscriptionId: payload.Transfer.InscriptionId,
|
||||
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
|
||||
Tick: payload.Tick,
|
||||
OriginalTick: payload.OriginalTick,
|
||||
TxHash: payload.Transfer.TxHash,
|
||||
BlockHeight: payload.Transfer.BlockHeight,
|
||||
TxIndex: payload.Transfer.TxIndex,
|
||||
Timestamp: blockHeader.Timestamp,
|
||||
PkScript: payload.Transfer.NewPkScript,
|
||||
SatPoint: payload.Transfer.NewSatPoint,
|
||||
OutputIndex: payload.Transfer.NewSatPoint.OutPoint.Index,
|
||||
SatsAmount: payload.Transfer.NewOutputValue,
|
||||
Amount: payload.Amt,
|
||||
}
|
||||
latestEventId++
|
||||
eventInscribeTransfers[payload.Transfer.InscriptionId] = event
|
||||
newEventInscribeTransfers = append(newEventInscribeTransfers, event)
|
||||
|
||||
eventHashBuilder.WriteString(getEventInscribeTransferString(event, tickEntry.Decimals) + eventHashSeparator)
|
||||
}
|
||||
handleEventTransferTransferAsFee := func(payload *brc20.Payload, tickEntry *entity.TickEntry, inscribeTransfer *entity.EventInscribeTransfer) {
|
||||
// return balance to sender
|
||||
fromPkScriptHex := hex.EncodeToString(inscribeTransfer.PkScript)
|
||||
fromBalance, ok := balances[fromPkScriptHex][payload.Tick]
|
||||
if !ok {
|
||||
fromBalance = &entity.Balance{
|
||||
PkScript: inscribeTransfer.PkScript,
|
||||
Tick: payload.Tick,
|
||||
BlockHeight: uint64(blockHeader.Height),
|
||||
OverallBalance: decimal.Zero, // defaults balance to zero if not found
|
||||
AvailableBalance: decimal.Zero,
|
||||
}
|
||||
}
|
||||
fromBalance.BlockHeight = uint64(blockHeader.Height)
|
||||
fromBalance.AvailableBalance = fromBalance.AvailableBalance.Add(payload.Amt)
|
||||
if _, ok := balances[fromPkScriptHex]; !ok {
|
||||
balances[fromPkScriptHex] = make(map[string]*entity.Balance)
|
||||
}
|
||||
balances[fromPkScriptHex][payload.Tick] = fromBalance
|
||||
if _, ok := newBalances[fromPkScriptHex]; !ok {
|
||||
newBalances[fromPkScriptHex] = make(map[string]*entity.Balance)
|
||||
}
|
||||
newBalances[fromPkScriptHex][payload.Tick] = fromBalance
|
||||
|
||||
event := &entity.EventTransferTransfer{
|
||||
Id: latestEventId + 1,
|
||||
InscriptionId: payload.Transfer.InscriptionId,
|
||||
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
|
||||
Tick: payload.Tick,
|
||||
OriginalTick: payload.OriginalTick,
|
||||
TxHash: payload.Transfer.TxHash,
|
||||
BlockHeight: payload.Transfer.BlockHeight,
|
||||
TxIndex: payload.Transfer.TxIndex,
|
||||
Timestamp: blockHeader.Timestamp,
|
||||
FromPkScript: inscribeTransfer.PkScript,
|
||||
FromSatPoint: inscribeTransfer.SatPoint,
|
||||
FromInputIndex: payload.Transfer.FromInputIndex,
|
||||
ToPkScript: payload.Transfer.NewPkScript,
|
||||
ToSatPoint: payload.Transfer.NewSatPoint,
|
||||
ToOutputIndex: payload.Transfer.NewSatPoint.OutPoint.Index,
|
||||
SpentAsFee: true,
|
||||
Amount: payload.Amt,
|
||||
}
|
||||
newEventTransferTransfers = append(newEventTransferTransfers, event)
|
||||
|
||||
eventHashBuilder.WriteString(getEventTransferTransferString(event, tickEntry.Decimals) + eventHashSeparator)
|
||||
}
|
||||
handleEventTransferTransferNormal := func(payload *brc20.Payload, tickEntry *entity.TickEntry, inscribeTransfer *entity.EventInscribeTransfer) {
|
||||
// subtract balance from sender
|
||||
fromPkScriptHex := hex.EncodeToString(inscribeTransfer.PkScript)
|
||||
fromBalance, ok := balances[fromPkScriptHex][payload.Tick]
|
||||
if !ok {
|
||||
// skip transfer transfer event if from balance does not exist
|
||||
return
|
||||
}
|
||||
fromBalance.BlockHeight = uint64(blockHeader.Height)
|
||||
fromBalance.OverallBalance = fromBalance.OverallBalance.Sub(payload.Amt)
|
||||
if _, ok := balances[fromPkScriptHex]; !ok {
|
||||
balances[fromPkScriptHex] = make(map[string]*entity.Balance)
|
||||
}
|
||||
balances[fromPkScriptHex][payload.Tick] = fromBalance
|
||||
if _, ok := newBalances[fromPkScriptHex]; !ok {
|
||||
newBalances[fromPkScriptHex] = make(map[string]*entity.Balance)
|
||||
}
|
||||
newBalances[fromPkScriptHex][payload.Tick] = fromBalance
|
||||
|
||||
// add balance to receiver
|
||||
if bytes.Equal(payload.Transfer.NewPkScript, []byte{0x6a}) {
|
||||
// burn if sent to OP_RETURN
|
||||
tickEntry.BurnedAmount = tickEntry.BurnedAmount.Add(payload.Amt)
|
||||
tickEntries[payload.Tick] = tickEntry
|
||||
newTickEntryStates[payload.Tick] = tickEntry
|
||||
} else {
|
||||
toPkScriptHex := hex.EncodeToString(payload.Transfer.NewPkScript)
|
||||
toBalance, ok := balances[toPkScriptHex][payload.Tick]
|
||||
if !ok {
|
||||
toBalance = &entity.Balance{
|
||||
PkScript: payload.Transfer.NewPkScript,
|
||||
Tick: payload.Tick,
|
||||
BlockHeight: uint64(blockHeader.Height),
|
||||
OverallBalance: decimal.Zero, // defaults balance to zero if not found
|
||||
AvailableBalance: decimal.Zero,
|
||||
}
|
||||
}
|
||||
toBalance.BlockHeight = uint64(blockHeader.Height)
|
||||
toBalance.OverallBalance = toBalance.OverallBalance.Add(payload.Amt)
|
||||
toBalance.AvailableBalance = toBalance.AvailableBalance.Add(payload.Amt)
|
||||
if _, ok := balances[toPkScriptHex]; !ok {
|
||||
balances[toPkScriptHex] = make(map[string]*entity.Balance)
|
||||
}
|
||||
balances[toPkScriptHex][payload.Tick] = toBalance
|
||||
if _, ok := newBalances[toPkScriptHex]; !ok {
|
||||
newBalances[toPkScriptHex] = make(map[string]*entity.Balance)
|
||||
}
|
||||
newBalances[toPkScriptHex][payload.Tick] = toBalance
|
||||
}
|
||||
|
||||
event := &entity.EventTransferTransfer{
|
||||
Id: latestEventId + 1,
|
||||
InscriptionId: payload.Transfer.InscriptionId,
|
||||
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
|
||||
Tick: payload.Tick,
|
||||
OriginalTick: payload.OriginalTick,
|
||||
TxHash: payload.Transfer.TxHash,
|
||||
BlockHeight: payload.Transfer.BlockHeight,
|
||||
TxIndex: payload.Transfer.TxIndex,
|
||||
Timestamp: blockHeader.Timestamp,
|
||||
FromPkScript: inscribeTransfer.PkScript,
|
||||
FromSatPoint: inscribeTransfer.SatPoint,
|
||||
FromInputIndex: payload.Transfer.FromInputIndex,
|
||||
ToPkScript: payload.Transfer.NewPkScript,
|
||||
ToSatPoint: payload.Transfer.NewSatPoint,
|
||||
ToOutputIndex: payload.Transfer.NewSatPoint.OutPoint.Index,
|
||||
SpentAsFee: false,
|
||||
Amount: payload.Amt,
|
||||
}
|
||||
newEventTransferTransfers = append(newEventTransferTransfers, event)
|
||||
|
||||
eventHashBuilder.WriteString(getEventTransferTransferString(event, tickEntry.Decimals) + eventHashSeparator)
|
||||
}
|
||||
|
||||
for _, payload := range payloads {
|
||||
tickEntry := tickEntries[payload.Tick]
|
||||
|
||||
if payload.Transfer.SentAsFee && payload.Transfer.OldSatPoint == (ordinals.SatPoint{}) {
|
||||
// skip inscriptions inscribed as fee
|
||||
continue
|
||||
}
|
||||
|
||||
switch payload.Op {
|
||||
case brc20.OperationDeploy:
|
||||
handleEventDeploy(payload, tickEntry)
|
||||
case brc20.OperationMint:
|
||||
handleEventMint(payload, tickEntry)
|
||||
case brc20.OperationTransfer:
|
||||
if payload.Transfer.TransferCount > 2 {
|
||||
// skip used transfer inscriptions
|
||||
continue
|
||||
}
|
||||
if tickEntry == nil {
|
||||
// skip transfer inscriptions for non-existent ticks
|
||||
continue
|
||||
}
|
||||
if -payload.Amt.Exponent() > int32(tickEntry.Decimals) {
|
||||
// skip transfer inscriptions with decimals greater than allowed
|
||||
continue
|
||||
}
|
||||
|
||||
if payload.Transfer.OldSatPoint == (ordinals.SatPoint{}) {
|
||||
handleEventInscribeTransfer(payload, tickEntry)
|
||||
} else {
|
||||
// transfer transfer event
|
||||
inscribeTransfer, ok := eventInscribeTransfers[payload.Transfer.InscriptionId]
|
||||
if !ok {
|
||||
// skip transfer transfer event if prior inscribe transfer event does not exist
|
||||
continue
|
||||
}
|
||||
|
||||
if payload.Transfer.SentAsFee {
|
||||
handleEventTransferTransferAsFee(payload, tickEntry, inscribeTransfer)
|
||||
} else {
|
||||
handleEventTransferTransferNormal(payload, tickEntry, inscribeTransfer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p.newTickEntries = newTickEntries
|
||||
p.newTickEntryStates = newTickEntryStates
|
||||
p.newEventDeploys = newEventDeploys
|
||||
p.newEventMints = newEventMints
|
||||
p.newEventInscribeTransfers = newEventInscribeTransfers
|
||||
p.newEventTransferTransfers = newEventTransferTransfers
|
||||
p.newBalances = newBalances
|
||||
p.eventHashString = eventHashBuilder.String()
|
||||
return nil
|
||||
}
|
||||
570
modules/brc20/processor_inscription.go
Normal file
570
modules/brc20/processor_inscription.go
Normal file
@@ -0,0 +1,570 @@
|
||||
package brc20
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/btcsuite/btcd/blockchain"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
|
||||
"github.com/samber/lo"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func (p *Processor) processInscriptionTx(ctx context.Context, tx *types.Transaction, blockHeader types.BlockHeader) error {
|
||||
ctx = logger.WithContext(ctx, slogx.String("tx_hash", tx.TxHash.String()))
|
||||
envelopes := ordinals.ParseEnvelopesFromTx(tx)
|
||||
inputOutPoints := lo.Map(tx.TxIn, func(txIn *types.TxIn, _ int) wire.OutPoint {
|
||||
return wire.OutPoint{
|
||||
Hash: txIn.PreviousOutTxHash,
|
||||
Index: txIn.PreviousOutIndex,
|
||||
}
|
||||
})
|
||||
transfersInOutPoints, err := p.getInscriptionTransfersInOutPoints(ctx, inputOutPoints)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get inscriptions in outpoints")
|
||||
}
|
||||
// cache outpoint values for future blocks
|
||||
for outIndex, txOut := range tx.TxOut {
|
||||
p.outPointValueCache.Add(wire.OutPoint{
|
||||
Hash: tx.TxHash,
|
||||
Index: uint32(outIndex),
|
||||
}, uint64(txOut.Value))
|
||||
}
|
||||
if len(envelopes) == 0 && len(transfersInOutPoints) == 0 {
|
||||
// no inscription activity, skip
|
||||
return nil
|
||||
}
|
||||
|
||||
floatingInscriptions := make([]*entity.Flotsam, 0)
|
||||
totalInputValue := uint64(0)
|
||||
totalOutputValue := lo.SumBy(tx.TxOut, func(txOut *types.TxOut) uint64 { return uint64(txOut.Value) })
|
||||
inscribeOffsets := make(map[uint64]*struct {
|
||||
inscriptionId ordinals.InscriptionId
|
||||
count int
|
||||
})
|
||||
idCounter := uint32(0)
|
||||
inputValues, err := p.getOutPointValues(ctx, inputOutPoints)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get outpoint values")
|
||||
}
|
||||
for i, input := range tx.TxIn {
|
||||
inputOutPoint := wire.OutPoint{
|
||||
Hash: input.PreviousOutTxHash,
|
||||
Index: input.PreviousOutIndex,
|
||||
}
|
||||
inputValue := inputValues[inputOutPoint]
|
||||
// skip coinbase inputs since there can't be an inscription in coinbase
|
||||
if input.PreviousOutTxHash.IsEqual(&chainhash.Hash{}) {
|
||||
totalInputValue += p.getBlockSubsidy(uint64(tx.BlockHeight))
|
||||
continue
|
||||
}
|
||||
|
||||
transfersInOutPoint := transfersInOutPoints[inputOutPoint]
|
||||
for satPoint, transfers := range transfersInOutPoint {
|
||||
offset := totalInputValue + satPoint.Offset
|
||||
for _, transfer := range transfers {
|
||||
floatingInscriptions = append(floatingInscriptions, &entity.Flotsam{
|
||||
Offset: offset,
|
||||
InscriptionId: transfer.InscriptionId,
|
||||
Tx: tx,
|
||||
OriginOld: &entity.OriginOld{
|
||||
OldSatPoint: satPoint,
|
||||
Content: transfer.Content,
|
||||
InputIndex: uint32(i),
|
||||
},
|
||||
})
|
||||
if _, ok := inscribeOffsets[offset]; !ok {
|
||||
inscribeOffsets[offset] = &struct {
|
||||
inscriptionId ordinals.InscriptionId
|
||||
count int
|
||||
}{transfer.InscriptionId, 0}
|
||||
}
|
||||
inscribeOffsets[offset].count++
|
||||
}
|
||||
}
|
||||
// offset on output to inscribe new inscriptions from this input
|
||||
offset := totalInputValue
|
||||
totalInputValue += inputValue
|
||||
|
||||
envelopesInInput := lo.Filter(envelopes, func(envelope *ordinals.Envelope, _ int) bool {
|
||||
return envelope.InputIndex == uint32(i)
|
||||
})
|
||||
for _, envelope := range envelopesInInput {
|
||||
inscriptionId := ordinals.InscriptionId{
|
||||
TxHash: tx.TxHash,
|
||||
Index: idCounter,
|
||||
}
|
||||
var cursed, cursedForBRC20 bool
|
||||
if envelope.UnrecognizedEvenField || // unrecognized even field
|
||||
envelope.DuplicateField || // duplicate field
|
||||
envelope.IncompleteField || // incomplete field
|
||||
envelope.InputIndex != 0 || // not first input
|
||||
envelope.Offset != 0 || // not first envelope in input
|
||||
envelope.Inscription.Pointer != nil || // contains pointer
|
||||
envelope.PushNum || // contains pushnum opcodes
|
||||
envelope.Stutter { // contains stuttering curse structure
|
||||
cursed = true
|
||||
cursedForBRC20 = true
|
||||
}
|
||||
if initial, ok := inscribeOffsets[offset]; !cursed && ok {
|
||||
if initial.count > 1 {
|
||||
cursed = true // reinscription
|
||||
cursedForBRC20 = true
|
||||
} else {
|
||||
initialInscriptionEntry, err := p.getInscriptionEntryById(ctx, initial.inscriptionId)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get inscription entry id %s", initial.inscriptionId)
|
||||
}
|
||||
if !initialInscriptionEntry.Cursed {
|
||||
cursed = true // reinscription curse if initial inscription is not cursed
|
||||
}
|
||||
if !initialInscriptionEntry.CursedForBRC20 {
|
||||
cursedForBRC20 = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// inscriptions are no longer cursed after jubilee, but BRC20 still considers them as cursed
|
||||
if cursed && uint64(tx.BlockHeight) > ordinals.GetJubileeHeight(p.network) {
|
||||
cursed = false
|
||||
}
|
||||
|
||||
unbound := inputValue == 0 || envelope.UnrecognizedEvenField
|
||||
if envelope.Inscription.Pointer != nil && *envelope.Inscription.Pointer < totalOutputValue {
|
||||
offset = *envelope.Inscription.Pointer
|
||||
}
|
||||
|
||||
floatingInscriptions = append(floatingInscriptions, &entity.Flotsam{
|
||||
Offset: offset,
|
||||
InscriptionId: inscriptionId,
|
||||
Tx: tx,
|
||||
OriginNew: &entity.OriginNew{
|
||||
Reinscription: inscribeOffsets[offset] != nil,
|
||||
Cursed: cursed,
|
||||
CursedForBRC20: cursedForBRC20,
|
||||
Fee: 0,
|
||||
Hidden: false, // we don't care about this field for brc20
|
||||
Parent: envelope.Inscription.Parent,
|
||||
Pointer: envelope.Inscription.Pointer,
|
||||
Unbound: unbound,
|
||||
Inscription: envelope.Inscription,
|
||||
},
|
||||
})
|
||||
|
||||
if _, ok := inscribeOffsets[offset]; !ok {
|
||||
inscribeOffsets[offset] = &struct {
|
||||
inscriptionId ordinals.InscriptionId
|
||||
count int
|
||||
}{inscriptionId, 0}
|
||||
}
|
||||
inscribeOffsets[offset].count++
|
||||
idCounter++
|
||||
}
|
||||
}
|
||||
|
||||
// parents must exist in floatingInscriptions to be valid
|
||||
potentialParents := make(map[ordinals.InscriptionId]struct{})
|
||||
for _, flotsam := range floatingInscriptions {
|
||||
potentialParents[flotsam.InscriptionId] = struct{}{}
|
||||
}
|
||||
for _, flotsam := range floatingInscriptions {
|
||||
if flotsam.OriginNew != nil && flotsam.OriginNew.Parent != nil {
|
||||
if _, ok := potentialParents[*flotsam.OriginNew.Parent]; !ok {
|
||||
// parent not found, ignore parent
|
||||
flotsam.OriginNew.Parent = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// calculate fee for each new inscription
|
||||
for _, flotsam := range floatingInscriptions {
|
||||
if flotsam.OriginNew != nil {
|
||||
flotsam.OriginNew.Fee = (totalInputValue - totalOutputValue) / uint64(idCounter)
|
||||
}
|
||||
}
|
||||
|
||||
// if tx is coinbase, add inscriptions sent as fee to outputs of this tx
|
||||
ownInscriptionCount := len(floatingInscriptions)
|
||||
isCoinbase := tx.TxIn[0].PreviousOutTxHash.IsEqual(&chainhash.Hash{})
|
||||
if isCoinbase {
|
||||
floatingInscriptions = append(floatingInscriptions, p.flotsamsSentAsFee...)
|
||||
}
|
||||
// sort floatingInscriptions by offset
|
||||
slices.SortFunc(floatingInscriptions, func(i, j *entity.Flotsam) int {
|
||||
return int(i.Offset) - int(j.Offset)
|
||||
})
|
||||
|
||||
outputValue := uint64(0)
|
||||
curIncrIdx := 0
|
||||
// newLocations := make(map[ordinals.SatPoint][]*Flotsam)
|
||||
type location struct {
|
||||
satPoint ordinals.SatPoint
|
||||
flotsam *entity.Flotsam
|
||||
sentAsFee bool
|
||||
}
|
||||
newLocations := make([]*location, 0)
|
||||
outputToSumValue := make([]uint64, 0, len(tx.TxOut))
|
||||
for outIndex, txOut := range tx.TxOut {
|
||||
end := outputValue + uint64(txOut.Value)
|
||||
|
||||
// process all inscriptions that are supposed to be inscribed in this output
|
||||
for curIncrIdx < len(floatingInscriptions) && floatingInscriptions[curIncrIdx].Offset < end {
|
||||
newSatPoint := ordinals.SatPoint{
|
||||
OutPoint: wire.OutPoint{
|
||||
Hash: tx.TxHash,
|
||||
Index: uint32(outIndex),
|
||||
},
|
||||
Offset: floatingInscriptions[curIncrIdx].Offset - outputValue,
|
||||
}
|
||||
// newLocations[newSatPoint] = append(newLocations[newSatPoint], floatingInscriptions[curIncrIdx])
|
||||
newLocations = append(newLocations, &location{
|
||||
satPoint: newSatPoint,
|
||||
flotsam: floatingInscriptions[curIncrIdx],
|
||||
sentAsFee: isCoinbase && curIncrIdx >= ownInscriptionCount, // if curIncrIdx >= ownInscriptionCount, then current inscription came from p.flotSamsSentAsFee
|
||||
})
|
||||
curIncrIdx++
|
||||
}
|
||||
|
||||
outputValue = end
|
||||
outputToSumValue = append(outputToSumValue, outputValue)
|
||||
}
|
||||
|
||||
for _, loc := range newLocations {
|
||||
satPoint := loc.satPoint
|
||||
flotsam := loc.flotsam
|
||||
sentAsFee := loc.sentAsFee
|
||||
// TODO: not sure if we still need to handle pointer here, it's already handled above.
|
||||
if flotsam.OriginNew != nil && flotsam.OriginNew.Pointer != nil {
|
||||
pointer := *flotsam.OriginNew.Pointer
|
||||
for outIndex, outputValue := range outputToSumValue {
|
||||
start := uint64(0)
|
||||
if outIndex > 0 {
|
||||
start = outputToSumValue[outIndex-1]
|
||||
}
|
||||
end := outputValue
|
||||
if start <= pointer && pointer < end {
|
||||
satPoint.Offset = pointer - start
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := p.updateInscriptionLocation(ctx, satPoint, flotsam, sentAsFee, tx, blockHeader); err != nil {
|
||||
return errors.Wrap(err, "failed to update inscription location")
|
||||
}
|
||||
}
|
||||
|
||||
// handle leftover flotsams (flotsams with offset over total output value) )
|
||||
if isCoinbase {
|
||||
// if there are leftover inscriptions in coinbase, they are lost permanently
|
||||
for _, flotsam := range floatingInscriptions[curIncrIdx:] {
|
||||
newSatPoint := ordinals.SatPoint{
|
||||
OutPoint: wire.OutPoint{},
|
||||
Offset: p.lostSats + flotsam.Offset - totalOutputValue,
|
||||
}
|
||||
if err := p.updateInscriptionLocation(ctx, newSatPoint, flotsam, false, tx, blockHeader); err != nil {
|
||||
return errors.Wrap(err, "failed to update inscription location")
|
||||
}
|
||||
}
|
||||
p.lostSats += p.blockReward - totalOutputValue
|
||||
} else {
|
||||
// if there are leftover inscriptions in non-coinbase tx, they are stored in p.flotsamsSentAsFee for processing in this block's coinbase tx
|
||||
for _, flotsam := range floatingInscriptions[curIncrIdx:] {
|
||||
flotsam.Offset = p.blockReward + flotsam.Offset - totalOutputValue
|
||||
p.flotsamsSentAsFee = append(p.flotsamsSentAsFee, flotsam)
|
||||
}
|
||||
// add fees to block reward
|
||||
p.blockReward = totalInputValue - totalOutputValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) updateInscriptionLocation(ctx context.Context, newSatPoint ordinals.SatPoint, flotsam *entity.Flotsam, sentAsFee bool, tx *types.Transaction, blockHeader types.BlockHeader) error {
|
||||
txOut := tx.TxOut[newSatPoint.OutPoint.Index]
|
||||
if flotsam.OriginOld != nil {
|
||||
entry, err := p.getInscriptionEntryById(ctx, flotsam.InscriptionId)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get inscription entry id %s", flotsam.InscriptionId)
|
||||
}
|
||||
entry.TransferCount++
|
||||
transfer := &entity.InscriptionTransfer{
|
||||
InscriptionId: flotsam.InscriptionId,
|
||||
BlockHeight: uint64(flotsam.Tx.BlockHeight), // use flotsam's tx to track tx that initiated the transfer
|
||||
TxIndex: flotsam.Tx.Index, // use flotsam's tx to track tx that initiated the transfer
|
||||
TxHash: flotsam.Tx.TxHash,
|
||||
Content: flotsam.OriginOld.Content,
|
||||
FromInputIndex: flotsam.OriginOld.InputIndex,
|
||||
OldSatPoint: flotsam.OriginOld.OldSatPoint,
|
||||
NewSatPoint: newSatPoint,
|
||||
NewPkScript: txOut.PkScript,
|
||||
NewOutputValue: uint64(txOut.Value),
|
||||
SentAsFee: sentAsFee,
|
||||
TransferCount: entry.TransferCount,
|
||||
}
|
||||
|
||||
// track transfers even if transfer count exceeds 2 (because we need to check for reinscriptions)
|
||||
p.newInscriptionTransfers = append(p.newInscriptionTransfers, transfer)
|
||||
p.newInscriptionEntryStates[entry.Id] = entry
|
||||
return nil
|
||||
}
|
||||
|
||||
if flotsam.OriginNew != nil {
|
||||
origin := flotsam.OriginNew
|
||||
var inscriptionNumber int64
|
||||
sequenceNumber := p.cursedInscriptionCount + p.blessedInscriptionCount
|
||||
if origin.Cursed {
|
||||
inscriptionNumber = -int64(p.cursedInscriptionCount + 1)
|
||||
p.cursedInscriptionCount++
|
||||
} else {
|
||||
inscriptionNumber = int64(p.blessedInscriptionCount)
|
||||
p.blessedInscriptionCount++
|
||||
}
|
||||
// if not valid brc20 inscription, delete content to save space
|
||||
if !isBRC20Inscription(origin.Inscription) {
|
||||
origin.Inscription.Content = nil
|
||||
origin.Inscription.ContentType = ""
|
||||
origin.Inscription.ContentEncoding = ""
|
||||
}
|
||||
transfer := &entity.InscriptionTransfer{
|
||||
InscriptionId: flotsam.InscriptionId,
|
||||
BlockHeight: uint64(flotsam.Tx.BlockHeight), // use flotsam's tx to track tx that initiated the transfer
|
||||
TxIndex: flotsam.Tx.Index, // use flotsam's tx to track tx that initiated the transfer
|
||||
TxHash: flotsam.Tx.TxHash,
|
||||
Content: origin.Inscription.Content,
|
||||
FromInputIndex: 0, // unused
|
||||
OldSatPoint: ordinals.SatPoint{},
|
||||
NewSatPoint: newSatPoint,
|
||||
NewPkScript: txOut.PkScript,
|
||||
NewOutputValue: uint64(txOut.Value),
|
||||
SentAsFee: sentAsFee,
|
||||
TransferCount: 1, // count inscription as first transfer
|
||||
}
|
||||
entry := &ordinals.InscriptionEntry{
|
||||
Id: flotsam.InscriptionId,
|
||||
Number: inscriptionNumber,
|
||||
SequenceNumber: sequenceNumber,
|
||||
Cursed: origin.Cursed,
|
||||
CursedForBRC20: origin.CursedForBRC20,
|
||||
CreatedAt: blockHeader.Timestamp,
|
||||
CreatedAtHeight: uint64(blockHeader.Height),
|
||||
Inscription: origin.Inscription,
|
||||
TransferCount: 1, // count inscription as first transfer
|
||||
}
|
||||
p.newInscriptionTransfers = append(p.newInscriptionTransfers, transfer)
|
||||
p.newInscriptionEntries[entry.Id] = entry
|
||||
p.newInscriptionEntryStates[entry.Id] = entry
|
||||
|
||||
return nil
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
type brc20Inscription struct {
|
||||
P string `json:"p"`
|
||||
}
|
||||
|
||||
func isBRC20Inscription(inscription ordinals.Inscription) bool {
|
||||
if inscription.ContentType != "application/json" && inscription.ContentType != "text/plain" {
|
||||
return false
|
||||
}
|
||||
|
||||
// attempt to parse content as json
|
||||
if inscription.Content == nil {
|
||||
return false
|
||||
}
|
||||
var parsed brc20Inscription
|
||||
if err := json.Unmarshal(inscription.Content, &parsed); err != nil {
|
||||
return false
|
||||
}
|
||||
if parsed.P != "brc-20" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Processor) getOutPointValues(ctx context.Context, outPoints []wire.OutPoint) (map[wire.OutPoint]uint64, error) {
|
||||
// try to get from cache if exists
|
||||
cacheValues := p.outPointValueCache.MGet(outPoints)
|
||||
result := make(map[wire.OutPoint]uint64)
|
||||
|
||||
outPointsToFetch := make([]wire.OutPoint, 0)
|
||||
for i, outPoint := range outPoints {
|
||||
if cacheValues[i] != 0 {
|
||||
result[outPoint] = cacheValues[i]
|
||||
} else {
|
||||
outPointsToFetch = append(outPointsToFetch, outPoint)
|
||||
}
|
||||
}
|
||||
eg, ectx := errgroup.WithContext(ctx)
|
||||
txHashes := make(map[chainhash.Hash]struct{})
|
||||
for _, outPoint := range outPointsToFetch {
|
||||
txHashes[outPoint.Hash] = struct{}{}
|
||||
}
|
||||
txOutsByHash := make(map[chainhash.Hash][]*types.TxOut)
|
||||
var mutex sync.Mutex
|
||||
for txHash := range txHashes {
|
||||
txHash := txHash
|
||||
eg.Go(func() error {
|
||||
txOuts, err := p.btcClient.GetTransactionOutputs(ectx, txHash)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get transaction outputs")
|
||||
}
|
||||
|
||||
// update cache
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
|
||||
txOutsByHash[txHash] = txOuts
|
||||
for i, txOut := range txOuts {
|
||||
p.outPointValueCache.Add(wire.OutPoint{Hash: txHash, Index: uint32(i)}, uint64(txOut.Value))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
for i := range outPoints {
|
||||
if result[outPoints[i]] == 0 {
|
||||
result[outPoints[i]] = uint64(txOutsByHash[outPoints[i].Hash][outPoints[i].Index].Value)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *Processor) getInscriptionTransfersInOutPoints(ctx context.Context, outPoints []wire.OutPoint) (map[wire.OutPoint]map[ordinals.SatPoint][]*entity.InscriptionTransfer, error) {
|
||||
// try to get from flush buffer if exists
|
||||
result := make(map[wire.OutPoint]map[ordinals.SatPoint][]*entity.InscriptionTransfer)
|
||||
|
||||
outPointsToFetch := make([]wire.OutPoint, 0)
|
||||
for _, outPoint := range outPoints {
|
||||
var found bool
|
||||
for _, transfer := range p.newInscriptionTransfers {
|
||||
if transfer.NewSatPoint.OutPoint == outPoint {
|
||||
found = true
|
||||
if _, ok := result[outPoint]; !ok {
|
||||
result[outPoint] = make(map[ordinals.SatPoint][]*entity.InscriptionTransfer)
|
||||
}
|
||||
result[outPoint][transfer.NewSatPoint] = append(result[outPoint][transfer.NewSatPoint], transfer)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
outPointsToFetch = append(outPointsToFetch, outPoint)
|
||||
}
|
||||
}
|
||||
|
||||
transfers, err := p.brc20Dg.GetInscriptionTransfersInOutPoints(ctx, outPointsToFetch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
|
||||
}
|
||||
|
||||
for satPoint, transferList := range transfers {
|
||||
if _, ok := result[satPoint.OutPoint]; !ok {
|
||||
result[satPoint.OutPoint] = make(map[ordinals.SatPoint][]*entity.InscriptionTransfer)
|
||||
}
|
||||
result[satPoint.OutPoint][satPoint] = append(result[satPoint.OutPoint][satPoint], transferList...)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *Processor) getInscriptionEntryById(ctx context.Context, id ordinals.InscriptionId) (*ordinals.InscriptionEntry, error) {
|
||||
inscriptions, err := p.getInscriptionEntriesByIds(ctx, []ordinals.InscriptionId{id})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
|
||||
}
|
||||
inscription, ok := inscriptions[id]
|
||||
if !ok {
|
||||
return nil, errors.Wrap(errs.NotFound, "inscription not found")
|
||||
}
|
||||
return inscription, nil
|
||||
}
|
||||
|
||||
func (p *Processor) getInscriptionEntriesByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*ordinals.InscriptionEntry, error) {
|
||||
// try to get from cache if exists
|
||||
result := make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
|
||||
|
||||
idsToFetch := make([]ordinals.InscriptionId, 0)
|
||||
for _, id := range ids {
|
||||
if inscriptionEntry, ok := p.newInscriptionEntryStates[id]; ok {
|
||||
result[id] = inscriptionEntry
|
||||
} else {
|
||||
idsToFetch = append(idsToFetch, id)
|
||||
}
|
||||
}
|
||||
|
||||
if len(idsToFetch) > 0 {
|
||||
inscriptions, err := p.brc20Dg.GetInscriptionEntriesByIds(ctx, idsToFetch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
|
||||
}
|
||||
for id, inscription := range inscriptions {
|
||||
result[id] = inscription
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *Processor) getInscriptionNumbersByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]int64, error) {
|
||||
// try to get from cache if exists
|
||||
result := make(map[ordinals.InscriptionId]int64)
|
||||
|
||||
idsToFetch := make([]ordinals.InscriptionId, 0)
|
||||
for _, id := range ids {
|
||||
if entry, ok := p.newInscriptionEntryStates[id]; ok {
|
||||
result[id] = int64(entry.Number)
|
||||
} else {
|
||||
idsToFetch = append(idsToFetch, id)
|
||||
}
|
||||
}
|
||||
|
||||
if len(idsToFetch) > 0 {
|
||||
inscriptions, err := p.brc20Dg.GetInscriptionNumbersByIds(ctx, idsToFetch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
|
||||
}
|
||||
for id, number := range inscriptions {
|
||||
result[id] = number
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *Processor) getInscriptionParentsByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]ordinals.InscriptionId, error) {
|
||||
// try to get from cache if exists
|
||||
result := make(map[ordinals.InscriptionId]ordinals.InscriptionId)
|
||||
|
||||
idsToFetch := make([]ordinals.InscriptionId, 0)
|
||||
for _, id := range ids {
|
||||
if entry, ok := p.newInscriptionEntryStates[id]; ok {
|
||||
if entry.Inscription.Parent != nil {
|
||||
result[id] = *entry.Inscription.Parent
|
||||
}
|
||||
} else {
|
||||
idsToFetch = append(idsToFetch, id)
|
||||
}
|
||||
}
|
||||
|
||||
if len(idsToFetch) > 0 {
|
||||
inscriptions, err := p.brc20Dg.GetInscriptionParentsByIds(ctx, idsToFetch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
|
||||
}
|
||||
for id, parent := range inscriptions {
|
||||
result[id] = parent
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *Processor) getBlockSubsidy(blockHeight uint64) uint64 {
|
||||
return uint64(blockchain.CalcBlockSubsidy(int32(blockHeight), p.network.ChainParams()))
|
||||
}
|
||||
222
modules/brc20/processor_process.go
Normal file
222
modules/brc20/processor_process.go
Normal file
@@ -0,0 +1,222 @@
|
||||
package brc20
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"slices"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/gaze-network/indexer-network/common/errs"
|
||||
"github.com/gaze-network/indexer-network/core/types"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
|
||||
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger"
|
||||
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
// Process implements indexer.Processor.
|
||||
func (p *Processor) Process(ctx context.Context, blocks []*types.Block) error {
|
||||
for _, block := range blocks {
|
||||
ctx = logger.WithContext(ctx, slogx.Uint64("height", uint64(block.Header.Height)))
|
||||
logger.DebugContext(ctx, "Processing new block")
|
||||
p.blockReward = p.getBlockSubsidy(uint64(block.Header.Height))
|
||||
p.flotsamsSentAsFee = make([]*entity.Flotsam, 0)
|
||||
|
||||
// put coinbase tx (first tx) at the end of block
|
||||
transactions := append(block.Transactions[1:], block.Transactions[0])
|
||||
for _, tx := range transactions {
|
||||
if err := p.processInscriptionTx(ctx, tx, block.Header); err != nil {
|
||||
return errors.Wrap(err, "failed to process tx")
|
||||
}
|
||||
}
|
||||
|
||||
// sort transfers by tx index, output index, output sat offset
|
||||
// NOTE: ord indexes inscription transfers spent as fee at the end of the block, but brc20 indexes them as soon as they are sent
|
||||
slices.SortFunc(p.newInscriptionTransfers, func(t1, t2 *entity.InscriptionTransfer) int {
|
||||
if t1.TxIndex != t2.TxIndex {
|
||||
return int(t1.TxIndex) - int(t2.TxIndex)
|
||||
}
|
||||
if t1.SentAsFee != t2.SentAsFee {
|
||||
// transfers sent as fee should be ordered after non-fees
|
||||
if t1.SentAsFee {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
if t1.NewSatPoint.OutPoint.Index != t2.NewSatPoint.OutPoint.Index {
|
||||
return int(t1.NewSatPoint.OutPoint.Index) - int(t2.NewSatPoint.OutPoint.Index)
|
||||
}
|
||||
return int(t1.NewSatPoint.Offset) - int(t2.NewSatPoint.Offset)
|
||||
})
|
||||
|
||||
if err := p.processBRC20States(ctx, p.newInscriptionTransfers, block.Header); err != nil {
|
||||
return errors.Wrap(err, "failed to process brc20 states")
|
||||
}
|
||||
|
||||
if err := p.flushBlock(ctx, block.Header); err != nil {
|
||||
return errors.Wrap(err, "failed to flush block")
|
||||
}
|
||||
|
||||
logger.DebugContext(ctx, "Inserted new block")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeader) error {
|
||||
brc20DgTx, err := p.brc20Dg.BeginBRC20Tx(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to begin transaction")
|
||||
}
|
||||
defer func() {
|
||||
if err := brc20DgTx.Rollback(ctx); err != nil {
|
||||
logger.WarnContext(ctx, "failed to rollback transaction",
|
||||
slogx.Error(err),
|
||||
slogx.String("event", "rollback_brc20_insertion"),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
blockHeight := uint64(blockHeader.Height)
|
||||
|
||||
// calculate event hash
|
||||
{
|
||||
eventHashString := p.eventHashString
|
||||
if len(eventHashString) > 0 && eventHashString[len(eventHashString)-1:] == eventHashSeparator {
|
||||
eventHashString = eventHashString[:len(eventHashString)-1]
|
||||
}
|
||||
eventHash := sha256.Sum256([]byte(eventHashString))
|
||||
prevIndexedBlock, err := brc20DgTx.GetIndexedBlockByHeight(ctx, blockHeader.Height-1)
|
||||
if err != nil && errors.Is(err, errs.NotFound) && blockHeader.Height-1 == startingBlockHeader[p.network].Height {
|
||||
prevIndexedBlock = &entity.IndexedBlock{
|
||||
Height: uint64(startingBlockHeader[p.network].Height),
|
||||
Hash: startingBlockHeader[p.network].Hash,
|
||||
EventHash: []byte{},
|
||||
CumulativeEventHash: []byte{},
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get previous indexed block")
|
||||
}
|
||||
var cumulativeEventHash [32]byte
|
||||
if len(prevIndexedBlock.CumulativeEventHash) == 0 {
|
||||
cumulativeEventHash = eventHash
|
||||
} else {
|
||||
cumulativeEventHash = sha256.Sum256([]byte(hex.EncodeToString(prevIndexedBlock.CumulativeEventHash[:]) + hex.EncodeToString(eventHash[:])))
|
||||
}
|
||||
if err := brc20DgTx.CreateIndexedBlock(ctx, &entity.IndexedBlock{
|
||||
Height: blockHeight,
|
||||
Hash: blockHeader.Hash,
|
||||
EventHash: eventHash[:],
|
||||
CumulativeEventHash: cumulativeEventHash[:],
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "failed to create indexed block")
|
||||
}
|
||||
p.eventHashString = ""
|
||||
}
|
||||
|
||||
// flush new inscription entries
|
||||
{
|
||||
newInscriptionEntries := lo.Values(p.newInscriptionEntries)
|
||||
if err := brc20DgTx.CreateInscriptionEntries(ctx, blockHeight, newInscriptionEntries); err != nil {
|
||||
return errors.Wrap(err, "failed to create inscription entries")
|
||||
}
|
||||
p.newInscriptionEntries = make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
|
||||
}
|
||||
|
||||
// flush new inscription entry states
|
||||
{
|
||||
newInscriptionEntryStates := lo.Values(p.newInscriptionEntryStates)
|
||||
if err := brc20DgTx.CreateInscriptionEntryStates(ctx, blockHeight, newInscriptionEntryStates); err != nil {
|
||||
return errors.Wrap(err, "failed to create inscription entry states")
|
||||
}
|
||||
p.newInscriptionEntryStates = make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
|
||||
}
|
||||
|
||||
// flush new inscription entry states
|
||||
{
|
||||
if err := brc20DgTx.CreateInscriptionTransfers(ctx, p.newInscriptionTransfers); err != nil {
|
||||
return errors.Wrap(err, "failed to create inscription transfers")
|
||||
}
|
||||
p.newInscriptionTransfers = make([]*entity.InscriptionTransfer, 0)
|
||||
}
|
||||
|
||||
// flush processor stats
|
||||
{
|
||||
stats := &entity.ProcessorStats{
|
||||
BlockHeight: blockHeight,
|
||||
CursedInscriptionCount: p.cursedInscriptionCount,
|
||||
BlessedInscriptionCount: p.blessedInscriptionCount,
|
||||
LostSats: p.lostSats,
|
||||
}
|
||||
if err := brc20DgTx.CreateProcessorStats(ctx, stats); err != nil {
|
||||
return errors.Wrap(err, "failed to create processor stats")
|
||||
}
|
||||
}
|
||||
// newTickEntries map[string]*entity.TickEntry
|
||||
// newTickEntryStates map[string]*entity.TickEntry
|
||||
// newEventDeploys []*entity.EventDeploy
|
||||
// newEventMints []*entity.EventMint
|
||||
// newEventInscribeTransfers []*entity.EventInscribeTransfer
|
||||
// newEventTransferTransfers []*entity.EventTransferTransfer
|
||||
// newBalances map[string]map[string]*entity.Balance
|
||||
|
||||
// flush new tick entries
|
||||
{
|
||||
newTickEntries := lo.Values(p.newTickEntries)
|
||||
if err := brc20DgTx.CreateTickEntries(ctx, blockHeight, newTickEntries); err != nil {
|
||||
return errors.Wrap(err, "failed to create tick entries")
|
||||
}
|
||||
p.newTickEntries = make(map[string]*entity.TickEntry)
|
||||
}
|
||||
|
||||
// flush new tick entry states
|
||||
{
|
||||
newTickEntryStates := lo.Values(p.newTickEntryStates)
|
||||
if err := brc20DgTx.CreateTickEntryStates(ctx, blockHeight, newTickEntryStates); err != nil {
|
||||
return errors.Wrap(err, "failed to create tick entry states")
|
||||
}
|
||||
p.newTickEntryStates = make(map[string]*entity.TickEntry)
|
||||
}
|
||||
|
||||
// flush new events
|
||||
{
|
||||
if err := brc20DgTx.CreateEventDeploys(ctx, p.newEventDeploys); err != nil {
|
||||
return errors.Wrap(err, "failed to create event deploys")
|
||||
}
|
||||
if err := brc20DgTx.CreateEventMints(ctx, p.newEventMints); err != nil {
|
||||
return errors.Wrap(err, "failed to create event mints")
|
||||
}
|
||||
if err := brc20DgTx.CreateEventInscribeTransfers(ctx, p.newEventInscribeTransfers); err != nil {
|
||||
return errors.Wrap(err, "failed to create event inscribe transfers")
|
||||
}
|
||||
if err := brc20DgTx.CreateEventTransferTransfers(ctx, p.newEventTransferTransfers); err != nil {
|
||||
return errors.Wrap(err, "failed to create event transfer transfers")
|
||||
}
|
||||
p.newEventDeploys = make([]*entity.EventDeploy, 0)
|
||||
p.newEventMints = make([]*entity.EventMint, 0)
|
||||
p.newEventInscribeTransfers = make([]*entity.EventInscribeTransfer, 0)
|
||||
p.newEventTransferTransfers = make([]*entity.EventTransferTransfer, 0)
|
||||
}
|
||||
|
||||
// flush new balances
|
||||
{
|
||||
newBalances := make([]*entity.Balance, 0)
|
||||
for _, tickBalances := range p.newBalances {
|
||||
for _, balance := range tickBalances {
|
||||
newBalances = append(newBalances, balance)
|
||||
}
|
||||
}
|
||||
if err := brc20DgTx.CreateBalances(ctx, newBalances); err != nil {
|
||||
return errors.Wrap(err, "failed to create balances")
|
||||
}
|
||||
p.newBalances = make(map[string]map[string]*entity.Balance)
|
||||
}
|
||||
|
||||
if err := brc20DgTx.Commit(ctx); err != nil {
|
||||
return errors.Wrap(err, "failed to commit transaction")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
package runes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"slices"
|
||||
"unicode/utf8"
|
||||
@@ -335,7 +334,6 @@ func runestonePayloadFromTx(tx *types.Transaction) ([]byte, Flaws) {
|
||||
continue
|
||||
}
|
||||
if err := tokenizer.Err(); err != nil {
|
||||
fmt.Println(err.Error())
|
||||
continue
|
||||
}
|
||||
if opCode := tokenizer.Opcode(); opCode != RUNESTONE_PAYLOAD_MAGIC_NUMBER {
|
||||
|
||||
@@ -9,4 +9,5 @@ import (
|
||||
|
||||
type Contract interface {
|
||||
GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error)
|
||||
GetTransactionOutputs(ctx context.Context, txHash chainhash.Hash) ([]*types.TxOut, error)
|
||||
}
|
||||
|
||||
267
pkg/lru/lru.go
Normal file
267
pkg/lru/lru.go
Normal file
@@ -0,0 +1,267 @@
|
||||
// lru a lru-cache package modified version of github.com/hashicorp/golang-lru
|
||||
// TODO: create PR to hashicorp/golang-lru
|
||||
package lru
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/golang-lru/v2/simplelru"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultEvictedBufferSize defines the default buffer size to store evicted key/val
|
||||
DefaultEvictedBufferSize = 16
|
||||
)
|
||||
|
||||
// Cache is a thread-safe fixed size LRU cache.
|
||||
type Cache[K comparable, V any] struct {
|
||||
lru *simplelru.LRU[K, V]
|
||||
evictedKeys []K
|
||||
evictedVals []V
|
||||
onEvictedCB func(k K, v V)
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// New creates an LRU of the given size.
|
||||
func New[K comparable, V any](size int) (*Cache[K, V], error) {
|
||||
return NewWithEvict[K, V](size, nil)
|
||||
}
|
||||
|
||||
// NewWithEvict constructs a fixed size cache with the given eviction
|
||||
// callback.
|
||||
func NewWithEvict[K comparable, V any](size int, onEvicted func(key K, value V)) (c *Cache[K, V], err error) {
|
||||
// create a cache with default settings
|
||||
c = &Cache[K, V]{
|
||||
onEvictedCB: onEvicted,
|
||||
}
|
||||
if onEvicted != nil {
|
||||
c.initEvictBuffers()
|
||||
onEvicted = c.onEvicted
|
||||
}
|
||||
c.lru, err = simplelru.NewLRU(size, onEvicted)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cache[K, V]) initEvictBuffers() {
|
||||
c.evictedKeys = make([]K, 0, DefaultEvictedBufferSize)
|
||||
c.evictedVals = make([]V, 0, DefaultEvictedBufferSize)
|
||||
}
|
||||
|
||||
// onEvicted save evicted key/val and sent in externally registered callback
|
||||
// outside of critical section
|
||||
func (c *Cache[K, V]) onEvicted(k K, v V) {
|
||||
c.evictedKeys = append(c.evictedKeys, k)
|
||||
c.evictedVals = append(c.evictedVals, v)
|
||||
}
|
||||
|
||||
// Purge is used to completely clear the cache.
|
||||
func (c *Cache[K, V]) Purge() {
|
||||
var ks []K
|
||||
var vs []V
|
||||
c.lock.Lock()
|
||||
c.lru.Purge()
|
||||
if c.onEvictedCB != nil && len(c.evictedKeys) > 0 {
|
||||
ks, vs = c.evictedKeys, c.evictedVals
|
||||
c.initEvictBuffers()
|
||||
}
|
||||
c.lock.Unlock()
|
||||
// invoke callback outside of critical section
|
||||
if c.onEvictedCB != nil {
|
||||
for i := 0; i < len(ks); i++ {
|
||||
c.onEvictedCB(ks[i], vs[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
func (c *Cache[K, V]) Add(key K, value V) (evicted bool) {
|
||||
var k K
|
||||
var v V
|
||||
c.lock.Lock()
|
||||
evicted = c.lru.Add(key, value)
|
||||
if c.onEvictedCB != nil && evicted {
|
||||
k, v = c.evictedKeys[0], c.evictedVals[0]
|
||||
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
|
||||
}
|
||||
c.lock.Unlock()
|
||||
if c.onEvictedCB != nil && evicted {
|
||||
c.onEvictedCB(k, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *Cache[K, V]) Get(key K) (value V, ok bool) {
|
||||
c.lock.Lock()
|
||||
value, ok = c.lru.Get(key)
|
||||
c.lock.Unlock()
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// MGet looks up a multiple key's values from the cache.
|
||||
// Returns a slice of value in the same order as the keys. value will be zero-value if key not found.
|
||||
func (c *Cache[K, V]) MGet(keys []K) (values []V) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
values = make([]V, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
value, _ := c.lru.Get(key)
|
||||
values = append(values, value)
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// Contains checks if a key is in the cache, without updating the
|
||||
// recent-ness or deleting it for being stale.
|
||||
func (c *Cache[K, V]) Contains(key K) bool {
|
||||
c.lock.RLock()
|
||||
containKey := c.lru.Contains(key)
|
||||
c.lock.RUnlock()
|
||||
return containKey
|
||||
}
|
||||
|
||||
// Peek returns the key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *Cache[K, V]) Peek(key K) (value V, ok bool) {
|
||||
c.lock.RLock()
|
||||
value, ok = c.lru.Peek(key)
|
||||
c.lock.RUnlock()
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// MPeek returns multiple key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *Cache[K, V]) MPeek(keys []K) (values []V) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
values = make([]V, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
value, _ := c.lru.Peek(key)
|
||||
values = append(values, value)
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// ContainsOrAdd checks if a key is in the cache without updating the
|
||||
// recent-ness or deleting it for being stale, and if not, adds the value.
|
||||
// Returns whether found and whether an eviction occurred.
|
||||
func (c *Cache[K, V]) ContainsOrAdd(key K, value V) (ok, evicted bool) {
|
||||
var k K
|
||||
var v V
|
||||
c.lock.Lock()
|
||||
if c.lru.Contains(key) {
|
||||
c.lock.Unlock()
|
||||
return true, false
|
||||
}
|
||||
evicted = c.lru.Add(key, value)
|
||||
if c.onEvictedCB != nil && evicted {
|
||||
k, v = c.evictedKeys[0], c.evictedVals[0]
|
||||
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
|
||||
}
|
||||
c.lock.Unlock()
|
||||
if c.onEvictedCB != nil && evicted {
|
||||
c.onEvictedCB(k, v)
|
||||
}
|
||||
return false, evicted
|
||||
}
|
||||
|
||||
// PeekOrAdd checks if a key is in the cache without updating the
|
||||
// recent-ness or deleting it for being stale, and if not, adds the value.
|
||||
// Returns whether found and whether an eviction occurred.
|
||||
func (c *Cache[K, V]) PeekOrAdd(key K, value V) (previous V, ok, evicted bool) {
|
||||
var k K
|
||||
var v V
|
||||
c.lock.Lock()
|
||||
previous, ok = c.lru.Peek(key)
|
||||
if ok {
|
||||
c.lock.Unlock()
|
||||
return previous, true, false
|
||||
}
|
||||
evicted = c.lru.Add(key, value)
|
||||
if c.onEvictedCB != nil && evicted {
|
||||
k, v = c.evictedKeys[0], c.evictedVals[0]
|
||||
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
|
||||
}
|
||||
c.lock.Unlock()
|
||||
if c.onEvictedCB != nil && evicted {
|
||||
c.onEvictedCB(k, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *Cache[K, V]) Remove(key K) (present bool) {
|
||||
var k K
|
||||
var v V
|
||||
c.lock.Lock()
|
||||
present = c.lru.Remove(key)
|
||||
if c.onEvictedCB != nil && present {
|
||||
k, v = c.evictedKeys[0], c.evictedVals[0]
|
||||
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
|
||||
}
|
||||
c.lock.Unlock()
|
||||
if c.onEvictedCB != nil && present {
|
||||
c.onEvictedCB(k, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Resize changes the cache size.
|
||||
func (c *Cache[K, V]) Resize(size int) (evicted int) {
|
||||
var ks []K
|
||||
var vs []V
|
||||
c.lock.Lock()
|
||||
evicted = c.lru.Resize(size)
|
||||
if c.onEvictedCB != nil && evicted > 0 {
|
||||
ks, vs = c.evictedKeys, c.evictedVals
|
||||
c.initEvictBuffers()
|
||||
}
|
||||
c.lock.Unlock()
|
||||
if c.onEvictedCB != nil && evicted > 0 {
|
||||
for i := 0; i < len(ks); i++ {
|
||||
c.onEvictedCB(ks[i], vs[i])
|
||||
}
|
||||
}
|
||||
return evicted
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) {
|
||||
var k K
|
||||
var v V
|
||||
c.lock.Lock()
|
||||
key, value, ok = c.lru.RemoveOldest()
|
||||
if c.onEvictedCB != nil && ok {
|
||||
k, v = c.evictedKeys[0], c.evictedVals[0]
|
||||
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
|
||||
}
|
||||
c.lock.Unlock()
|
||||
if c.onEvictedCB != nil && ok {
|
||||
c.onEvictedCB(k, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) {
|
||||
c.lock.RLock()
|
||||
key, value, ok = c.lru.GetOldest()
|
||||
c.lock.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
func (c *Cache[K, V]) Keys() []K {
|
||||
c.lock.RLock()
|
||||
keys := c.lru.Keys()
|
||||
c.lock.RUnlock()
|
||||
return keys
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Cache[K, V]) Len() int {
|
||||
c.lock.RLock()
|
||||
length := c.lru.Len()
|
||||
c.lock.RUnlock()
|
||||
return length
|
||||
}
|
||||
368
pkg/lru/lru_test.go
Normal file
368
pkg/lru/lru_test.go
Normal file
@@ -0,0 +1,368 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func getRand(tb testing.TB) int64 {
|
||||
out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
return out.Int64()
|
||||
}
|
||||
|
||||
func BenchmarkLRU_Rand(b *testing.B) {
|
||||
l, err := New[int64, int64](8192)
|
||||
if err != nil {
|
||||
b.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
trace := make([]int64, b.N*2)
|
||||
for i := 0; i < b.N*2; i++ {
|
||||
trace[i] = getRand(b) % 32768
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
var hit, miss int
|
||||
for i := 0; i < 2*b.N; i++ {
|
||||
if i%2 == 0 {
|
||||
l.Add(trace[i], trace[i])
|
||||
} else {
|
||||
if _, ok := l.Get(trace[i]); ok {
|
||||
hit++
|
||||
} else {
|
||||
miss++
|
||||
}
|
||||
}
|
||||
}
|
||||
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||
}
|
||||
|
||||
func BenchmarkLRU_Freq(b *testing.B) {
|
||||
l, err := New[int64, int64](8192)
|
||||
if err != nil {
|
||||
b.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
trace := make([]int64, b.N*2)
|
||||
for i := 0; i < b.N*2; i++ {
|
||||
if i%2 == 0 {
|
||||
trace[i] = getRand(b) % 16384
|
||||
} else {
|
||||
trace[i] = getRand(b) % 32768
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
l.Add(trace[i], trace[i])
|
||||
}
|
||||
var hit, miss int
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, ok := l.Get(trace[i]); ok {
|
||||
hit++
|
||||
} else {
|
||||
miss++
|
||||
}
|
||||
}
|
||||
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||
}
|
||||
|
||||
func TestLRU(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(k int, v int) {
|
||||
if k != v {
|
||||
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
|
||||
}
|
||||
evictCounter++
|
||||
}
|
||||
l, err := NewWithEvict(128, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
l.Add(i, i)
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
}
|
||||
|
||||
if evictCounter != 128 {
|
||||
t.Fatalf("bad evict count: %v", evictCounter)
|
||||
}
|
||||
|
||||
for i, k := range l.Keys() {
|
||||
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
|
||||
t.Fatalf("bad key: %v", k)
|
||||
}
|
||||
}
|
||||
for i := 0; i < 128; i++ {
|
||||
if _, ok := l.Get(i); ok {
|
||||
t.Fatalf("should be evicted")
|
||||
}
|
||||
}
|
||||
for i := 128; i < 256; i++ {
|
||||
if _, ok := l.Get(i); !ok {
|
||||
t.Fatalf("should not be evicted")
|
||||
}
|
||||
}
|
||||
for i := 128; i < 192; i++ {
|
||||
l.Remove(i)
|
||||
if _, ok := l.Get(i); ok {
|
||||
t.Fatalf("should be deleted")
|
||||
}
|
||||
}
|
||||
|
||||
l.Get(192) // expect 192 to be last key in l.Keys()
|
||||
|
||||
for i, k := range l.Keys() {
|
||||
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
|
||||
t.Fatalf("out of order key: %v", k)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// test mget
|
||||
keys := l.Keys()
|
||||
values := l.MGet(keys)
|
||||
for i, v := range values {
|
||||
if keys[i] != v {
|
||||
t.Fatalf("[%d]bad value: %v:%v", i, keys[i], v)
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
// test mget with missing keys
|
||||
keys := append([]int{-1}, l.Keys()...)
|
||||
values := l.MGet(keys)
|
||||
if len(values) != len(keys) {
|
||||
t.Fatalf("bad len: %v, expected: %v", len(values), len(keys))
|
||||
}
|
||||
if values[0] != 0 {
|
||||
t.Fatalf("bad value: %v, expected: %v", values[0], 0)
|
||||
}
|
||||
for i := 1; i < len(values); i++ {
|
||||
if keys[i] != values[i] {
|
||||
t.Fatalf("[%d]bad value: %v:%v", i, keys[i], values[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
l.Purge()
|
||||
if l.Len() != 0 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
}
|
||||
if _, ok := l.Get(200); ok {
|
||||
t.Fatalf("should contain nothing")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Add returns true/false if an eviction occurred
|
||||
func TestLRUAdd(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(k int, v int) {
|
||||
evictCounter++
|
||||
}
|
||||
|
||||
l, err := NewWithEvict(1, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if l.Add(1, 1) == true || evictCounter != 0 {
|
||||
t.Errorf("should not have an eviction")
|
||||
}
|
||||
if l.Add(2, 2) == false || evictCounter != 1 {
|
||||
t.Errorf("should have an eviction")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Contains doesn't update recent-ness
|
||||
func TestLRUContains(t *testing.T) {
|
||||
l, err := New[int, int](2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if !l.Contains(1) {
|
||||
t.Errorf("1 should be contained")
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("Contains should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
||||
|
||||
// test that ContainsOrAdd doesn't update recent-ness
|
||||
func TestLRUContainsOrAdd(t *testing.T) {
|
||||
l, err := New[int, int](2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
contains, evict := l.ContainsOrAdd(1, 1)
|
||||
if !contains {
|
||||
t.Errorf("1 should be contained")
|
||||
}
|
||||
if evict {
|
||||
t.Errorf("nothing should be evicted here")
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
contains, evict = l.ContainsOrAdd(1, 1)
|
||||
if contains {
|
||||
t.Errorf("1 should not have been contained")
|
||||
}
|
||||
if !evict {
|
||||
t.Errorf("an eviction should have occurred")
|
||||
}
|
||||
if !l.Contains(1) {
|
||||
t.Errorf("now 1 should be contained")
|
||||
}
|
||||
}
|
||||
|
||||
// test that PeekOrAdd doesn't update recent-ness
|
||||
func TestLRUPeekOrAdd(t *testing.T) {
|
||||
l, err := New[int, int](2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
previous, contains, evict := l.PeekOrAdd(1, 1)
|
||||
if !contains {
|
||||
t.Errorf("1 should be contained")
|
||||
}
|
||||
if evict {
|
||||
t.Errorf("nothing should be evicted here")
|
||||
}
|
||||
if previous != 1 {
|
||||
t.Errorf("previous is not equal to 1")
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
contains, evict = l.ContainsOrAdd(1, 1)
|
||||
if contains {
|
||||
t.Errorf("1 should not have been contained")
|
||||
}
|
||||
if !evict {
|
||||
t.Errorf("an eviction should have occurred")
|
||||
}
|
||||
if !l.Contains(1) {
|
||||
t.Errorf("now 1 should be contained")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Peek doesn't update recent-ness
|
||||
func TestLRUPeek(t *testing.T) {
|
||||
l, err := New[int, int](2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if v, ok := l.Peek(1); !ok || v != 1 {
|
||||
t.Errorf("1 should be set to 1: %v, %v", v, ok)
|
||||
}
|
||||
|
||||
vals := l.MPeek([]int{-1, 1, 2})
|
||||
if len(vals) != 3 {
|
||||
t.Errorf("bad len: %v", len(vals))
|
||||
}
|
||||
if vals[0] != 0 {
|
||||
t.Errorf("bad value: %v, expected: %v", vals[0], 0)
|
||||
}
|
||||
if vals[1] != 1 || vals[2] != 2 {
|
||||
t.Errorf("bad vals: %v", vals)
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Resize can upsize and downsize
|
||||
func TestLRUResize(t *testing.T) {
|
||||
onEvictCounter := 0
|
||||
onEvicted := func(k int, v int) {
|
||||
onEvictCounter++
|
||||
}
|
||||
l, err := NewWithEvict(2, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Downsize
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
evicted := l.Resize(1)
|
||||
if evicted != 1 {
|
||||
t.Errorf("1 element should have been evicted: %v", evicted)
|
||||
}
|
||||
if onEvictCounter != 1 {
|
||||
t.Errorf("onEvicted should have been called 1 time: %v", onEvictCounter)
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("Element 1 should have been evicted")
|
||||
}
|
||||
|
||||
// Upsize
|
||||
evicted = l.Resize(2)
|
||||
if evicted != 0 {
|
||||
t.Errorf("0 elements should have been evicted: %v", evicted)
|
||||
}
|
||||
|
||||
l.Add(4, 4)
|
||||
if !l.Contains(3) || !l.Contains(4) {
|
||||
t.Errorf("Cache should have contained 2 elements")
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeysAndMPeek(t *testing.T) {
|
||||
l, err := New[int, int](2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if vals := l.MPeek(l.Keys()); len(vals) != 0 {
|
||||
t.Errorf("bad len: %v", len(vals))
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
keys := l.Keys()
|
||||
if len(keys) != 2 {
|
||||
t.Errorf("bad len: %v", len(keys))
|
||||
}
|
||||
if keys[0] != 1 || keys[1] != 2 {
|
||||
t.Errorf("bad keys: %v", keys)
|
||||
}
|
||||
|
||||
vals := l.MPeek([]int{-1, 1, 2})
|
||||
if len(vals) != 3 {
|
||||
t.Errorf("bad len: %v", len(vals))
|
||||
}
|
||||
if vals[0] != 0 {
|
||||
t.Errorf("bad value: %v, expected: %v", vals[0], 0)
|
||||
}
|
||||
if vals[1] != 1 || vals[2] != 2 {
|
||||
t.Errorf("bad vals: %v", vals)
|
||||
}
|
||||
}
|
||||
10
sqlc.yaml
10
sqlc.yaml
@@ -17,3 +17,13 @@ sql:
|
||||
sql_package: "pgx/v5"
|
||||
rename:
|
||||
id: "Id"
|
||||
- schema: "./modules/brc20/database/postgresql/migrations"
|
||||
queries: "./modules/brc20/database/postgresql/queries"
|
||||
engine: "postgresql"
|
||||
gen:
|
||||
go:
|
||||
package: "gen"
|
||||
out: "./modules/brc20/internal/repository/postgres/gen"
|
||||
sql_package: "pgx/v5"
|
||||
rename:
|
||||
id: "Id"
|
||||
|
||||
Reference in New Issue
Block a user