Compare commits

..

8 Commits

Author SHA1 Message Date
Planxnx
5eb5c36a5b feat(btcutils): add bitcoin signature verification 2024-06-14 16:43:53 +07:00
Planxnx
24328b1ca3 feat(btcutils): add bitcoin utility functions 2024-06-14 16:30:52 +07:00
Thanee Charattrakool
51fd1f6636 feat: move requestip config to http config (#25) 2024-06-12 22:08:03 +07:00
Thanee Charattrakool
a7bc6257c4 feat(api): add request context and logger middleware (#24)
* feat(api): add request context and logger middleware

* feat(api): add cors and favicon middleware

* fix: solve wrapcheck linter warning

* feat: configurable hidden request headers
2024-06-12 21:47:29 +07:00
gazenw
3bb7500c87 feat: update docker version 2024-06-07 13:55:55 +07:00
Gaze
8c92893d4a feat: release v0.2.1 2024-05-31 01:16:34 +07:00
Nut Pinyo
d84e30ed11 fix: implement Shutdown() for processors (#22) 2024-05-31 01:13:12 +07:00
Thanee Charattrakool
d9fa217977 feat: use current indexed block for first prev block (#23)
* feat: use current indexed block for first prev block

* fix: forgot to set next prev header
2024-05-31 01:11:37 +07:00
90 changed files with 1918 additions and 7799 deletions

View File

@@ -39,7 +39,7 @@
"ui.completion.usePlaceholders": false,
"ui.diagnostic.analyses": {
// https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md
"fieldalignment": false,
// "fieldalignment": false,
"nilness": true,
"shadow": false,
"unusedparams": true,

View File

@@ -51,8 +51,6 @@ Here is our minimum database disk space requirement for each module.
| ------ | -------------------------- | ---------------------------- |
| Runes | 10 GB | 150 GB |
Here is our minimum database disk space requirement for each module.
#### 4. Prepare `config.yaml` file.
```yaml
@@ -108,7 +106,7 @@ We will be using `docker-compose` for our installation guide. Make sure the `doc
# docker-compose.yaml
services:
gaze-indexer:
image: ghcr.io/gaze-network/gaze-indexer:v1.0.0
image: ghcr.io/gaze-network/gaze-indexer:v0.2.1
container_name: gaze-indexer
restart: unless-stopped
ports:

View File

@@ -17,16 +17,20 @@ import (
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/modules/brc20"
"github.com/gaze-network/indexer-network/modules/runes"
"github.com/gaze-network/indexer-network/pkg/automaxprocs"
"github.com/gaze-network/indexer-network/pkg/errorhandler"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/middleware/requestcontext"
"github.com/gaze-network/indexer-network/pkg/middleware/requestlogger"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/compress"
"github.com/gofiber/fiber/v2/middleware/cors"
"github.com/gofiber/fiber/v2/middleware/favicon"
fiberrecover "github.com/gofiber/fiber/v2/middleware/recover"
"github.com/gofiber/fiber/v2/middleware/requestid"
"github.com/samber/do/v2"
"github.com/samber/lo"
"github.com/spf13/cobra"
@@ -35,7 +39,6 @@ import (
// Register Modules
var Modules = do.Package(
do.LazyNamed("runes", runes.New),
do.LazyNamed("brc20", brc20.New),
)
func NewRunCommand() *cobra.Command {
@@ -137,6 +140,14 @@ func runHandler(cmd *cobra.Command, _ []string) error {
ErrorHandler: errorhandler.NewHTTPErrorHandler(),
})
app.
Use(favicon.New()).
Use(cors.New()).
Use(requestid.New()).
Use(requestcontext.New(
requestcontext.WithRequestId(),
requestcontext.WithClientIP(conf.HTTPServer.RequestIP),
)).
Use(requestlogger.New(conf.HTTPServer.Logger)).
Use(fiberrecover.New(fiberrecover.Config{
EnableStackTrace: true,
StackTraceHandler: func(c *fiber.Ctx, e interface{}) {

View File

@@ -17,7 +17,7 @@ import (
type migrateDownCmdOptions struct {
DatabaseURL string
Modules string
Runes bool
All bool
}
@@ -59,7 +59,7 @@ func NewMigrateDownCommand() *cobra.Command {
}
flags := cmd.Flags()
flags.StringVar(&opts.Modules, "modules", "", "Modules to apply up migrations")
flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes down migrations")
flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on")
flags.BoolVar(&opts.All, "all", false, "Confirm apply ALL down migrations without prompt")
@@ -87,8 +87,6 @@ func migrateDownHandler(opts *migrateDownCmdOptions, _ *cobra.Command, args migr
}
}
modules := strings.Split(opts.Modules, ",")
applyDownMigrations := func(module string, sourcePath string, migrationTable string) error {
newDatabaseURL := cloneURLWithQuery(databaseURL, url.Values{"x-migrations-table": {migrationTable}})
sourceURL := "file://" + sourcePath
@@ -118,15 +116,10 @@ func migrateDownHandler(opts *migrateDownCmdOptions, _ *cobra.Command, args migr
return nil
}
if lo.Contains(modules, "runes") {
if opts.Runes {
if err := applyDownMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
if lo.Contains(modules, "brc20") {
if err := applyDownMigrations("BRC20", brc20MigrationSource, "brc20_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
return nil
}

View File

@@ -11,13 +11,12 @@ import (
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/samber/lo"
"github.com/spf13/cobra"
)
type migrateUpCmdOptions struct {
DatabaseURL string
Modules string
Runes bool
}
type migrateUpCmdArgs struct {
@@ -55,7 +54,7 @@ func NewMigrateUpCommand() *cobra.Command {
}
flags := cmd.Flags()
flags.StringVar(&opts.Modules, "modules", "", "Modules to apply up migrations")
flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes up migrations")
flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on")
return cmd
@@ -73,8 +72,6 @@ func migrateUpHandler(opts *migrateUpCmdOptions, _ *cobra.Command, args migrateU
return errors.Errorf("unsupported database driver: %s", databaseURL.Scheme)
}
modules := strings.Split(opts.Modules, ",")
applyUpMigrations := func(module string, sourcePath string, migrationTable string) error {
newDatabaseURL := cloneURLWithQuery(databaseURL, url.Values{"x-migrations-table": {migrationTable}})
sourceURL := "file://" + sourcePath
@@ -104,15 +101,10 @@ func migrateUpHandler(opts *migrateUpCmdOptions, _ *cobra.Command, args migrateU
return nil
}
if lo.Contains(modules, "runes") {
if opts.Runes {
if err := applyUpMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
if lo.Contains(modules, "brc20") {
if err := applyUpMigrations("BRC20", brc20MigrationSource, "brc20_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
return nil
}

View File

@@ -4,7 +4,6 @@ import "net/url"
const (
runesMigrationSource = "modules/runes/database/postgresql/migrations"
brc20MigrationSource = "modules/brc20/database/postgresql/migrations"
)
func cloneURLWithQuery(u *url.URL, newQuery url.Values) *url.URL {

View File

@@ -23,6 +23,14 @@ reporting:
# HTTP server configuration options.
http_server:
port: 8080 # Port to run the HTTP server on for modules with HTTP API handlers.
logger:
disable: false # disable logger if logger level is `INFO`
request_header: false
request_query: false
requestip: # Client IP extraction configuration options. This is unnecessary if you don't care about the real client IP or if you're not using a reverse proxy.
trusted_proxies_ip: # Cloudflare, GCP Public LB. See: server/internal/middleware/requestcontext/PROXY-IP.md
trusted_proxies_header: # X-Real-IP, CF-Connecting-IP
enable_reject_malformed_request: false # return 403 if request is malformed (invalid IP)
# Meta-protocol modules configuration options.
modules:

View File

@@ -1,5 +1,5 @@
package constants
const (
Version = "v0.0.1"
Version = "v0.2.1"
)

View File

@@ -292,19 +292,3 @@ func (d *BitcoinNodeDatasource) GetBlockHeader(ctx context.Context, height int64
return types.ParseMsgBlockHeader(*block, height), nil
}
// GetTransaction fetch transaction from Bitcoin node
func (d *BitcoinNodeDatasource) GetTransactionOutputs(ctx context.Context, txHash chainhash.Hash) ([]*types.TxOut, error) {
rawTx, err := d.btcclient.GetRawTransaction(&txHash)
if err != nil {
return nil, errors.Wrap(err, "failed to get raw transaction")
}
msgTx := rawTx.MsgTx()
txOuts := make([]*types.TxOut, 0, len(msgTx.TxOut))
for _, txOut := range msgTx.TxOut {
txOuts = append(txOuts, types.ParseTxOut(txOut))
}
return txOuts, nil
}

View File

@@ -91,6 +91,10 @@ func (i *Indexer[T]) Run(ctx context.Context) (err error) {
select {
case <-i.quit:
logger.InfoContext(ctx, "Got quit signal, stopping indexer")
if err := i.Processor.Shutdown(ctx); err != nil {
logger.ErrorContext(ctx, "Failed to shutdown processor", slogx.Error(err))
return errors.Wrap(err, "processor shutdown failed")
}
return nil
case <-ctx.Done():
return nil
@@ -204,9 +208,9 @@ func (i *Indexer[T]) process(ctx context.Context) (err error) {
}
// validate is input is continuous and no reorg
for i := 1; i < len(inputs); i++ {
header := inputs[i].BlockHeader()
prevHeader := inputs[i-1].BlockHeader()
prevHeader := i.currentBlock
for i, input := range inputs {
header := input.BlockHeader()
if header.Height != prevHeader.Height+1 {
return errors.Wrapf(errs.InternalError, "input is not continuous, input[%d] height: %d, input[%d] height: %d", i-1, prevHeader.Height, i, header.Height)
}
@@ -217,6 +221,7 @@ func (i *Indexer[T]) process(ctx context.Context) (err error) {
// end current round
return nil
}
prevHeader = header
}
ctx = logger.WithContext(ctx, slog.Int("total_inputs", len(inputs)))

View File

@@ -29,6 +29,9 @@ type Processor[T Input] interface {
// VerifyStates verifies the states of the indexed data and the indexer
// to ensure the last shutdown was graceful and no missing data.
VerifyStates(ctx context.Context) error
// Shutdown gracefully stops the processor. Database connections, network calls, leftover states, etc. should be closed and cleaned up here.
Shutdown(ctx context.Context) error
}
type IndexerWorker interface {

21
go.mod
View File

@@ -6,12 +6,12 @@ require (
github.com/Cleverse/go-utilities/utils v0.0.0-20240119201306-d71eb577ef11
github.com/btcsuite/btcd v0.24.0
github.com/btcsuite/btcd/btcutil v1.1.5
github.com/btcsuite/btcd/btcutil/psbt v1.1.9
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0
github.com/cockroachdb/errors v1.11.1
github.com/gaze-network/uint128 v1.3.0
github.com/gofiber/fiber/v2 v2.52.4
github.com/golang-migrate/migrate/v4 v4.17.1
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/jackc/pgx/v5 v5.5.5
github.com/mcosta74/pgx-slog v0.3.0
github.com/planxnx/concurrent-stream v0.1.5
@@ -21,23 +21,24 @@ require (
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.18.2
github.com/stretchr/testify v1.8.4
github.com/stretchr/testify v1.9.0
github.com/valyala/fasthttp v1.51.0
go.uber.org/automaxprocs v1.5.3
golang.org/x/sync v0.5.0
golang.org/x/sync v0.7.0
)
require (
github.com/andybalholm/brotli v1.0.5 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.1.3 // indirect
github.com/bitonicnl/verify-signed-message v0.7.1
github.com/btcsuite/btcd/btcec/v2 v2.3.3 // indirect
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
@@ -75,10 +76,10 @@ require (
github.com/valyala/tcplisten v1.0.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.20.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

52
go.sum
View File

@@ -7,18 +7,23 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/bitonicnl/verify-signed-message v0.7.1 h1:1Qku9k9WgzobjqBY7tT3CLjWxtTJZxkYNhOV6QeCTjY=
github.com/bitonicnl/verify-signed-message v0.7.1/go.mod h1:PR60twfJIaHEo9Wb6eJBh8nBHEZIQQx8CvRwh0YmEPk=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A=
github.com/btcsuite/btcd v0.24.0 h1:gL3uHE/IaFj6fcZSu03SvqPMSx7s/dPzfpG/atRwWdo=
github.com/btcsuite/btcd v0.24.0/go.mod h1:K4IDc1593s8jKXIF7yS7yCTSxrknB9z0STzc2j6XgE4=
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE=
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
github.com/btcsuite/btcd/btcec/v2 v2.3.3 h1:6+iXlDKE8RMtKsvK0gshlXIuPbyWM/h84Ensb7o3sC0=
github.com/btcsuite/btcd/btcec/v2 v2.3.3/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8=
github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00=
github.com/btcsuite/btcd/btcutil/psbt v1.1.9 h1:UmfOIiWMZcVMOLaN+lxbbLSuoINGS1WmK1TZNI0b4yk=
github.com/btcsuite/btcd/btcutil/psbt v1.1.9/go.mod h1:ehBEvU91lxSlXtA+zZz3iFYx7Yq9eqnKx4/kSrnsvMY=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
@@ -50,10 +55,12 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/dhui/dktest v0.4.1 h1:/w+IWuDXVymg3IrRJCHHOkMK10m9aNVMOyD0X12YVTg=
github.com/dhui/dktest v0.4.1/go.mod h1:DdOqcUpL7vgyP4GlF3X3w7HbSlz8cEQzwewPveYEQbA=
@@ -92,13 +99,12 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
@@ -108,8 +114,6 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@@ -221,11 +225,11 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
@@ -247,14 +251,14 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg=
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d h1:N0hmiNbwsSNwHBAvR3QB5w25pUwH4tK0Y/RltD1j1h4=
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -269,8 +273,8 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -283,19 +287,19 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -8,10 +8,11 @@ import (
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
brc20config "github.com/gaze-network/indexer-network/modules/brc20/config"
runesconfig "github.com/gaze-network/indexer-network/modules/runes/config"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/middleware/requestcontext"
"github.com/gaze-network/indexer-network/pkg/middleware/requestlogger"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/spf13/pflag"
"github.com/spf13/viper"
@@ -61,11 +62,12 @@ type BitcoinNodeClient struct {
type Modules struct {
Runes runesconfig.Config `mapstructure:"runes"`
BRC20 brc20config.Config `mapstructure:"brc20"`
}
type HTTPServerConfig struct {
Port int `mapstructure:"port"`
Port int `mapstructure:"port"`
Logger requestlogger.Config `mapstructure:"logger"`
RequestIP requestcontext.WithClientIPConfig `mapstructure:"requestip"`
}
// Parse parse the configuration from environment variables

View File

@@ -1,71 +0,0 @@
package brc20
import (
"context"
"strings"
"github.com/btcsuite/btcd/rpcclient"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/datasources"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/internal/postgres"
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
brc20postgres "github.com/gaze-network/indexer-network/modules/brc20/internal/repository/postgres"
"github.com/gaze-network/indexer-network/pkg/btcclient"
"github.com/samber/do/v2"
)
func New(injector do.Injector) (indexer.IndexerWorker, error) {
ctx := do.MustInvoke[context.Context](injector)
conf := do.MustInvoke[config.Config](injector)
// reportingClient := do.MustInvoke[*reportingclient.ReportingClient](injector)
cleanupFuncs := make([]func(context.Context) error, 0)
var brc20Dg datagateway.BRC20DataGateway
var indexerInfoDg datagateway.IndexerInfoDataGateway
switch strings.ToLower(conf.Modules.BRC20.Database) {
case "postgresql", "postgres", "pg":
pg, err := postgres.NewPool(ctx, conf.Modules.BRC20.Postgres)
if err != nil {
if errors.Is(err, errs.InvalidArgument) {
return nil, errors.Wrap(err, "Invalid Postgres configuration for indexer")
}
return nil, errors.Wrap(err, "can't create Postgres connection pool")
}
cleanupFuncs = append(cleanupFuncs, func(ctx context.Context) error {
pg.Close()
return nil
})
brc20Repo := brc20postgres.NewRepository(pg)
brc20Dg = brc20Repo
indexerInfoDg = brc20Repo
default:
return nil, errors.Wrapf(errs.Unsupported, "%q database for indexer is not supported", conf.Modules.BRC20.Database)
}
var bitcoinDatasource datasources.Datasource[*types.Block]
var bitcoinClient btcclient.Contract
switch strings.ToLower(conf.Modules.BRC20.Datasource) {
case "bitcoin-node":
btcClient := do.MustInvoke[*rpcclient.Client](injector)
bitcoinNodeDatasource := datasources.NewBitcoinNode(btcClient)
bitcoinDatasource = bitcoinNodeDatasource
bitcoinClient = bitcoinNodeDatasource
default:
return nil, errors.Wrapf(errs.Unsupported, "%q datasource is not supported", conf.Modules.BRC20.Datasource)
}
processor, err := NewProcessor(brc20Dg, indexerInfoDg, bitcoinClient, conf.Network, cleanupFuncs)
if err != nil {
return nil, errors.WithStack(err)
}
if err := processor.VerifyStates(ctx); err != nil {
return nil, errors.WithStack(err)
}
indexer := indexer.New(processor, bitcoinDatasource)
return indexer, nil
}

View File

@@ -1,10 +0,0 @@
package config
import "github.com/gaze-network/indexer-network/internal/postgres"
type Config struct {
Datasource string `mapstructure:"datasource"` // Datasource to fetch bitcoin data for Meta-Protocol e.g. `bitcoin-node`
Database string `mapstructure:"database"` // Database to store data.
APIHandlers []string `mapstructure:"api_handlers"` // List of API handlers to enable. (e.g. `http`)
Postgres postgres.Config `mapstructure:"postgres"`
}

View File

@@ -1,25 +0,0 @@
package brc20
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/core/types"
)
const (
ClientVersion = "v0.0.1"
DBVersion = 1
EventHashVersion = 1
)
var startingBlockHeader = map[common.Network]types.BlockHeader{
common.NetworkMainnet: {
Height: 767429,
Hash: *utils.Must(chainhash.NewHashFromStr("00000000000000000002b35aef66eb15cd2b232a800f75a2f25cedca4cfe52c4")),
},
common.NetworkTestnet: {
Height: 2413342,
Hash: *utils.Must(chainhash.NewHashFromStr("00000000000022e97030b143af785de812f836dd0651b6ac2b7dd9e90dc9abf9")),
},
}

View File

@@ -1,17 +0,0 @@
BEGIN;
DROP TABLE IF EXISTS "brc20_indexer_states";
DROP TABLE IF EXISTS "brc20_indexed_blocks";
DROP TABLE IF EXISTS "brc20_processor_stats";
DROP TABLE IF EXISTS "brc20_tick_entries";
DROP TABLE IF EXISTS "brc20_tick_entry_states";
DROP TABLE IF EXISTS "brc20_event_deploys";
DROP TABLE IF EXISTS "brc20_event_mints";
DROP TABLE IF EXISTS "brc20_event_inscribe_transfers";
DROP TABLE IF EXISTS "brc20_event_transfer_transfers";
DROP TABLE IF EXISTS "brc20_balances";
DROP TABLE IF EXISTS "brc20_inscription_entries";
DROP TABLE IF EXISTS "brc20_inscription_entry_states";
DROP TABLE IF EXISTS "brc20_inscription_transfers";
COMMIT;

View File

@@ -1,189 +0,0 @@
BEGIN;
-- Indexer Client Information
CREATE TABLE IF NOT EXISTS "brc20_indexer_states" (
"id" BIGSERIAL PRIMARY KEY,
"client_version" TEXT NOT NULL,
"network" TEXT NOT NULL,
"db_version" INT NOT NULL,
"event_hash_version" INT NOT NULL,
"created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS brc20_indexer_state_created_at_idx ON "brc20_indexer_states" USING BTREE ("created_at" DESC);
-- BRC20 data
CREATE TABLE IF NOT EXISTS "brc20_indexed_blocks" (
"height" INT NOT NULL PRIMARY KEY,
"hash" TEXT NOT NULL,
"event_hash" TEXT NOT NULL,
"cumulative_event_hash" TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS "brc20_processor_stats" (
"block_height" INT NOT NULL PRIMARY KEY,
"cursed_inscription_count" INT NOT NULL,
"blessed_inscription_count" INT NOT NULL,
"lost_sats" BIGINT NOT NULL
);
CREATE TABLE IF NOT EXISTS "brc20_tick_entries" (
"tick" TEXT NOT NULL PRIMARY KEY, -- lowercase of original_tick
"original_tick" TEXT NOT NULL,
"total_supply" DECIMAL NOT NULL,
"decimals" SMALLINT NOT NULL,
"limit_per_mint" DECIMAL NOT NULL,
"is_self_mint" BOOLEAN NOT NULL,
"deploy_inscription_id" TEXT NOT NULL,
"deployed_at" TIMESTAMP NOT NULL,
"deployed_at_height" INT NOT NULL
);
CREATE TABLE IF NOT EXISTS "brc20_tick_entry_states" (
"tick" TEXT NOT NULL,
"block_height" INT NOT NULL,
"minted_amount" DECIMAL NOT NULL,
"burned_amount" DECIMAL NOT NULL,
"completed_at" TIMESTAMP,
"completed_at_height" INT,
PRIMARY KEY ("tick", "block_height")
);
CREATE TABLE IF NOT EXISTS "brc20_event_deploys" (
"id" BIGINT PRIMARY KEY NOT NULL,
"inscription_id" TEXT NOT NULL,
"inscription_number" BIGINT NOT NULL,
"tick" TEXT NOT NULL, -- lowercase of original_tick
"original_tick" TEXT NOT NULL,
"tx_hash" TEXT NOT NULL,
"block_height" INT NOT NULL,
"tx_index" INT NOT NULL,
"timestamp" TIMESTAMP NOT NULL,
"pkscript" TEXT NOT NULL,
"satpoint" TEXT NOT NULL,
"total_supply" DECIMAL NOT NULL,
"decimals" SMALLINT NOT NULL,
"limit_per_mint" DECIMAL NOT NULL,
"is_self_mint" BOOLEAN NOT NULL
);
CREATE INDEX IF NOT EXISTS brc20_event_deploys_block_height_idx ON "brc20_event_deploys" USING BTREE ("block_height");
CREATE TABLE IF NOT EXISTS "brc20_event_mints" (
"id" BIGINT PRIMARY KEY NOT NULL,
"inscription_id" TEXT NOT NULL,
"inscription_number" BIGINT NOT NULL,
"tick" TEXT NOT NULL, -- lowercase of original_tick
"original_tick" TEXT NOT NULL,
"tx_hash" TEXT NOT NULL,
"block_height" INT NOT NULL,
"tx_index" INT NOT NULL,
"timestamp" TIMESTAMP NOT NULL,
"pkscript" TEXT NOT NULL,
"satpoint" TEXT NOT NULL,
"amount" DECIMAL NOT NULL,
"parent_id" TEXT -- requires parent deploy inscription id if minting a self-mint ticker
);
CREATE INDEX IF NOT EXISTS brc20_event_mints_block_height_idx ON "brc20_event_mints" USING BTREE ("block_height");
CREATE TABLE IF NOT EXISTS "brc20_event_inscribe_transfers" (
"id" BIGINT PRIMARY KEY NOT NULL,
"inscription_id" TEXT NOT NULL,
"inscription_number" BIGINT NOT NULL,
"tick" TEXT NOT NULL, -- lowercase of original_tick
"original_tick" TEXT NOT NULL,
"tx_hash" TEXT NOT NULL,
"block_height" INT NOT NULL,
"tx_index" INT NOT NULL,
"timestamp" TIMESTAMP NOT NULL,
"pkscript" TEXT NOT NULL,
"satpoint" TEXT NOT NULL,
"output_index" INT NOT NULL,
"sats_amount" BIGINT NOT NULL,
"amount" DECIMAL NOT NULL
);
CREATE INDEX IF NOT EXISTS brc20_event_inscribe_transfers_block_height_idx ON "brc20_event_inscribe_transfers" USING BTREE ("block_height");
CREATE INDEX IF NOT EXISTS brc20_event_inscribe_transfers_inscription_id_idx ON "brc20_event_inscribe_transfers" USING BTREE ("inscription_id"); -- used for validating transfer transfer events
CREATE TABLE IF NOT EXISTS "brc20_event_transfer_transfers" (
"id" BIGINT PRIMARY KEY NOT NULL,
"inscription_id" TEXT NOT NULL,
"inscription_number" BIGINT NOT NULL,
"tick" TEXT NOT NULL, -- lowercase of original_tick
"original_tick" TEXT NOT NULL,
"tx_hash" TEXT NOT NULL,
"block_height" INT NOT NULL,
"tx_index" INT NOT NULL,
"timestamp" TIMESTAMP NOT NULL,
"from_pkscript" TEXT NOT NULL,
"from_satpoint" TEXT NOT NULL,
"from_input_index" INT NOT NULL,
"to_pkscript" TEXT NOT NULL,
"to_satpoint" TEXT NOT NULL,
"to_output_index" INT NOT NULL,
"spent_as_fee" BOOLEAN NOT NULL,
"amount" DECIMAL NOT NULL
);
CREATE INDEX IF NOT EXISTS brc20_event_transfer_transfers_block_height_idx ON "brc20_event_transfer_transfers" USING BTREE ("block_height");
CREATE TABLE IF NOT EXISTS "brc20_balances" (
"pkscript" TEXT NOT NULL,
"block_height" INT NOT NULL,
"tick" TEXT NOT NULL,
"overall_balance" DECIMAL NOT NULL,
"available_balance" DECIMAL NOT NULL,
PRIMARY KEY ("pkscript", "tick", "block_height")
);
CREATE TABLE IF NOT EXISTS "brc20_inscription_entries" (
"id" TEXT NOT NULL PRIMARY KEY,
"number" BIGINT NOT NULL,
"sequence_number" BIGINT NOT NULL,
"delegate" TEXT, -- delegate inscription id
"metadata" BYTEA,
"metaprotocol" TEXT,
"parents" TEXT[], -- parent inscription id, 0.14 only supports 1 parent per inscription
"pointer" BIGINT,
"content" JSONB, -- can use jsonb because we only track brc20 inscriptions
"content_encoding" TEXT,
"content_type" TEXT,
"cursed" BOOLEAN NOT NULL, -- inscriptions after jubilee are no longer cursed in 0.14, which affects inscription number
"cursed_for_brc20" BOOLEAN NOT NULL, -- however, inscriptions that would normally be cursed are still considered cursed for brc20
"created_at" TIMESTAMP NOT NULL,
"created_at_height" INT NOT NULL
);
CREATE INDEX IF NOT EXISTS brc20_inscription_entries_id_number_idx ON "brc20_inscription_entries" USING BTREE ("id", "number");
CREATE TABLE IF NOT EXISTS "brc20_inscription_entry_states" (
"id" TEXT NOT NULL,
"block_height" INT NOT NULL,
"transfer_count" INT NOT NULL,
PRIMARY KEY ("id", "block_height")
);
CREATE TABLE IF NOT EXISTS "brc20_inscription_transfers" (
"inscription_id" TEXT NOT NULL,
"block_height" INT NOT NULL,
"tx_index" INT NOT NULL,
"tx_hash" TEXT NOT NULL,
"from_input_index" INT NOT NULL,
"old_satpoint_tx_hash" TEXT,
"old_satpoint_out_idx" INT,
"old_satpoint_offset" BIGINT,
"new_satpoint_tx_hash" TEXT,
"new_satpoint_out_idx" INT,
"new_satpoint_offset" BIGINT,
"new_pkscript" TEXT NOT NULL,
"new_output_value" BIGINT NOT NULL,
"sent_as_fee" BOOLEAN NOT NULL,
"transfer_count" INT NOT NULL,
PRIMARY KEY ("inscription_id", "block_height", "tx_index")
);
CREATE INDEX IF NOT EXISTS brc20_inscription_transfers_block_height_tx_index_idx ON "brc20_inscription_transfers" USING BTREE ("block_height", "tx_index");
CREATE INDEX IF NOT EXISTS brc20_inscription_transfers_new_satpoint_idx ON "brc20_inscription_transfers" USING BTREE ("new_satpoint_tx_hash", "new_satpoint_out_idx", "new_satpoint_offset");
COMMIT;

View File

@@ -1,145 +0,0 @@
-- name: GetLatestIndexedBlock :one
SELECT * FROM "brc20_indexed_blocks" ORDER BY "height" DESC LIMIT 1;
-- name: GetIndexedBlockByHeight :one
SELECT * FROM "brc20_indexed_blocks" WHERE "height" = $1;
-- name: GetLatestProcessorStats :one
SELECT * FROM "brc20_processor_stats" ORDER BY "block_height" DESC LIMIT 1;
-- name: GetInscriptionTransfersInOutPoints :many
SELECT "it".*, "ie"."content" FROM (
SELECT
unnest(@tx_hash_arr::text[]) AS "tx_hash",
unnest(@tx_out_idx_arr::int[]) AS "tx_out_idx"
) "inputs"
INNER JOIN "brc20_inscription_transfers" it ON "inputs"."tx_hash" = "it"."new_satpoint_tx_hash" AND "inputs"."tx_out_idx" = "it"."new_satpoint_out_idx"
LEFT JOIN "brc20_inscription_entries" ie ON "it"."inscription_id" = "ie"."id";
;
-- name: GetInscriptionEntriesByIds :many
WITH "states" AS (
-- select latest state
SELECT DISTINCT ON ("id") * FROM "brc20_inscription_entry_states" WHERE "id" = ANY(@inscription_ids::text[]) ORDER BY "id", "block_height" DESC
)
SELECT * FROM "brc20_inscription_entries"
LEFT JOIN "states" ON "brc20_inscription_entries"."id" = "states"."id"
WHERE "brc20_inscription_entries"."id" = ANY(@inscription_ids::text[]);
-- name: GetTickEntriesByTicks :many
WITH "states" AS (
-- select latest state
SELECT DISTINCT ON ("tick") * FROM "brc20_tick_entry_states" WHERE "tick" = ANY(@ticks::text[]) ORDER BY "tick", "block_height" DESC
)
SELECT * FROM "brc20_tick_entries"
LEFT JOIN "states" ON "brc20_tick_entries"."tick" = "states"."tick"
WHERE "brc20_tick_entries"."tick" = ANY(@ticks::text[]);
-- name: GetInscriptionNumbersByIds :many
SELECT id, number FROM "brc20_inscription_entries" WHERE "id" = ANY(@inscription_ids::text[]);
-- name: GetInscriptionParentsByIds :many
SELECT id, parents FROM "brc20_inscription_entries" WHERE "id" = ANY(@inscription_ids::text[]);
-- name: GetLatestEventIds :one
WITH "latest_deploy_id" AS (
SELECT "id" FROM "brc20_event_deploys" ORDER BY "id" DESC LIMIT 1
),
"latest_mint_id" AS (
SELECT "id" FROM "brc20_event_mints" ORDER BY "id" DESC LIMIT 1
),
"latest_inscribe_transfer_id" AS (
SELECT "id" FROM "brc20_event_inscribe_transfers" ORDER BY "id" DESC LIMIT 1
),
"latest_transfer_transfer_id" AS (
SELECT "id" FROM "brc20_event_transfer_transfers" ORDER BY "id" DESC LIMIT 1
)
SELECT
COALESCE((SELECT "id" FROM "latest_deploy_id"), -1) AS "event_deploy_id",
COALESCE((SELECT "id" FROM "latest_mint_id"), -1) AS "event_mint_id",
COALESCE((SELECT "id" FROM "latest_inscribe_transfer_id"), -1) AS "event_inscribe_transfer_id",
COALESCE((SELECT "id" FROM "latest_transfer_transfer_id"), -1) AS "event_transfer_transfer_id";
-- name: GetBalancesBatchAtHeight :many
SELECT DISTINCT ON ("brc20_balances"."pkscript", "brc20_balances"."tick") "brc20_balances".* FROM "brc20_balances"
INNER JOIN (
SELECT
unnest(@pkscript_arr::text[]) AS "pkscript",
unnest(@tick_arr::text[]) AS "tick"
) "queries" ON "brc20_balances"."pkscript" = "queries"."pkscript" AND "brc20_balances"."tick" = "queries"."tick" AND "brc20_balances"."block_height" <= @block_height
ORDER BY "brc20_balances"."pkscript", "brc20_balances"."tick", "block_height" DESC;
-- name: GetEventInscribeTransfersByInscriptionIds :many
SELECT * FROM "brc20_event_inscribe_transfers" WHERE "inscription_id" = ANY(@inscription_ids::text[]);
-- name: CreateIndexedBlock :exec
INSERT INTO "brc20_indexed_blocks" ("height", "hash", "event_hash", "cumulative_event_hash") VALUES ($1, $2, $3, $4);
-- name: CreateProcessorStats :exec
INSERT INTO "brc20_processor_stats" ("block_height", "cursed_inscription_count", "blessed_inscription_count", "lost_sats") VALUES ($1, $2, $3, $4);
-- name: CreateTickEntries :batchexec
INSERT INTO "brc20_tick_entries" ("tick", "original_tick", "total_supply", "decimals", "limit_per_mint", "is_self_mint", "deploy_inscription_id", "deployed_at", "deployed_at_height") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9);
-- name: CreateTickEntryStates :batchexec
INSERT INTO "brc20_tick_entry_states" ("tick", "block_height", "minted_amount", "burned_amount", "completed_at", "completed_at_height") VALUES ($1, $2, $3, $4, $5, $6);
-- name: CreateInscriptionEntries :batchexec
INSERT INTO "brc20_inscription_entries" ("id", "number", "sequence_number", "delegate", "metadata", "metaprotocol", "parents", "pointer", "content", "content_encoding", "content_type", "cursed", "cursed_for_brc20", "created_at", "created_at_height") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15);
-- name: CreateInscriptionEntryStates :batchexec
INSERT INTO "brc20_inscription_entry_states" ("id", "block_height", "transfer_count") VALUES ($1, $2, $3);
-- name: CreateInscriptionTransfers :batchexec
INSERT INTO "brc20_inscription_transfers" ("inscription_id", "block_height", "tx_index", "tx_hash", "from_input_index", "old_satpoint_tx_hash", "old_satpoint_out_idx", "old_satpoint_offset", "new_satpoint_tx_hash", "new_satpoint_out_idx", "new_satpoint_offset", "new_pkscript", "new_output_value", "sent_as_fee", "transfer_count") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15);
-- name: CreateEventDeploys :batchexec
INSERT INTO "brc20_event_deploys" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "total_supply", "decimals", "limit_per_mint", "is_self_mint") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14);
-- name: CreateEventMints :batchexec
INSERT INTO "brc20_event_mints" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "amount", "parent_id") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12);
-- name: CreateEventInscribeTransfers :batchexec
INSERT INTO "brc20_event_inscribe_transfers" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "output_index", "sats_amount", "amount") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13);
-- name: CreateEventTransferTransfers :batchexec
INSERT INTO "brc20_event_transfer_transfers" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "from_pkscript", "from_satpoint", "from_input_index", "to_pkscript", "to_satpoint", "to_output_index", "spent_as_fee", "amount") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16);
-- name: CreateBalances :batchexec
INSERT INTO "brc20_balances" ("pkscript", "block_height", "tick", "overall_balance", "available_balance") VALUES ($1, $2, $3, $4, $5);
-- name: DeleteIndexedBlocksSinceHeight :exec
DELETE FROM "brc20_indexed_blocks" WHERE "height" >= $1;
-- name: DeleteProcessorStatsSinceHeight :exec
DELETE FROM "brc20_processor_stats" WHERE "block_height" >= $1;
-- name: DeleteTickEntriesSinceHeight :exec
DELETE FROM "brc20_tick_entries" WHERE "deployed_at_height" >= $1;
-- name: DeleteTickEntryStatesSinceHeight :exec
DELETE FROM "brc20_tick_entry_states" WHERE "block_height" >= $1;
-- name: DeleteEventDeploysSinceHeight :exec
DELETE FROM "brc20_event_deploys" WHERE "block_height" >= $1;
-- name: DeleteEventMintsSinceHeight :exec
DELETE FROM "brc20_event_mints" WHERE "block_height" >= $1;
-- name: DeleteEventInscribeTransfersSinceHeight :exec
DELETE FROM "brc20_event_inscribe_transfers" WHERE "block_height" >= $1;
-- name: DeleteEventTransferTransfersSinceHeight :exec
DELETE FROM "brc20_event_transfer_transfers" WHERE "block_height" >= $1;
-- name: DeleteBalancesSinceHeight :exec
DELETE FROM "brc20_balances" WHERE "block_height" >= $1;
-- name: DeleteInscriptionEntriesSinceHeight :exec
DELETE FROM "brc20_inscription_entries" WHERE "created_at_height" >= $1;
-- name: DeleteInscriptionEntryStatesSinceHeight :exec
DELETE FROM "brc20_inscription_entry_states" WHERE "block_height" >= $1;
-- name: DeleteInscriptionTransfersSinceHeight :exec
DELETE FROM "brc20_inscription_transfers" WHERE "block_height" >= $1;

View File

@@ -1,5 +0,0 @@
-- name: GetLatestIndexerState :one
SELECT * FROM brc20_indexer_states ORDER BY created_at DESC LIMIT 1;
-- name: CreateIndexerState :exec
INSERT INTO brc20_indexer_states (client_version, network, db_version, event_hash_version) VALUES ($1, $2, $3, $4);

View File

@@ -1,69 +0,0 @@
package brc20
import (
"encoding/hex"
"strconv"
"strings"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/samber/lo"
)
const eventHashSeparator = "|"
func getEventDeployString(event *entity.EventDeploy) string {
var sb strings.Builder
sb.WriteString("deploy-inscribe;")
sb.WriteString(event.InscriptionId.String() + ";")
sb.WriteString(hex.EncodeToString(event.PkScript) + ";")
sb.WriteString(event.Tick + ";")
sb.WriteString(event.OriginalTick + ";")
sb.WriteString(event.TotalSupply.StringFixed(int32(event.Decimals)) + ";")
sb.WriteString(strconv.Itoa(int(event.Decimals)) + ";")
sb.WriteString(event.LimitPerMint.StringFixed(int32(event.Decimals)) + ";")
sb.WriteString(lo.Ternary(event.IsSelfMint, "True", "False"))
return sb.String()
}
func getEventMintString(event *entity.EventMint, decimals uint16) string {
var sb strings.Builder
var parentId string
if event.ParentId != nil {
parentId = event.ParentId.String()
}
sb.WriteString("mint-inscribe;")
sb.WriteString(event.InscriptionId.String() + ";")
sb.WriteString(hex.EncodeToString(event.PkScript) + ";")
sb.WriteString(event.Tick + ";")
sb.WriteString(event.OriginalTick + ";")
sb.WriteString(event.Amount.StringFixed(int32(decimals)) + ";")
sb.WriteString(parentId)
return sb.String()
}
func getEventInscribeTransferString(event *entity.EventInscribeTransfer, decimals uint16) string {
var sb strings.Builder
sb.WriteString("inscribe-transfer;")
sb.WriteString(event.InscriptionId.String() + ";")
sb.WriteString(hex.EncodeToString(event.PkScript) + ";")
sb.WriteString(event.Tick + ";")
sb.WriteString(event.OriginalTick + ";")
sb.WriteString(event.Amount.StringFixed(int32(decimals)))
return sb.String()
}
func getEventTransferTransferString(event *entity.EventTransferTransfer, decimals uint16) string {
var sb strings.Builder
sb.WriteString("transfer-transfer;")
sb.WriteString(event.InscriptionId.String() + ";")
sb.WriteString(hex.EncodeToString(event.FromPkScript) + ";")
if event.SpentAsFee {
sb.WriteString(";")
} else {
sb.WriteString(hex.EncodeToString(event.ToPkScript) + ";")
}
sb.WriteString(event.Tick + ";")
sb.WriteString(event.OriginalTick + ";")
sb.WriteString(event.Amount.StringFixed(int32(decimals)))
return sb.String()
}

View File

@@ -1,16 +0,0 @@
package brc20
import "github.com/gaze-network/indexer-network/common"
var selfMintActivationHeights = map[common.Network]uint64{
common.NetworkMainnet: 837090,
common.NetworkTestnet: 837090,
}
func isSelfMintActivated(height uint64, network common.Network) bool {
activationHeight, ok := selfMintActivationHeights[network]
if !ok {
return false
}
return height >= activationHeight
}

View File

@@ -1,21 +0,0 @@
package brc20
type Operation string
const (
OperationDeploy Operation = "deploy"
OperationMint Operation = "mint"
OperationTransfer Operation = "transfer"
)
func (o Operation) IsValid() bool {
switch o {
case OperationDeploy, OperationMint, OperationTransfer:
return true
}
return false
}
func (o Operation) String() string {
return string(o)
}

View File

@@ -1,173 +0,0 @@
package brc20
import (
"encoding/json"
"math"
"math/big"
"strconv"
"strings"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/shopspring/decimal"
)
type rawPayload struct {
P string // required
Op string `json:"op"` // required
Tick string `json:"tick"` // required
// for deploy operations
Max string `json:"max"` // required
Lim *string `json:"lim"`
Dec *string `json:"dec"`
SelfMint *string `json:"self_mint"`
// for mint/transfer operations
Amt string `json:"amt"` // required
}
type Payload struct {
Transfer *entity.InscriptionTransfer
P string
Op Operation
Tick string // lower-cased tick
OriginalTick string // original tick before lower-cased
// for deploy operations
Max decimal.Decimal
Lim decimal.Decimal
Dec uint16
SelfMint bool
// for mint/transfer operations
Amt decimal.Decimal
}
var (
ErrInvalidProtocol = errors.New("invalid protocol: must be 'brc20'")
ErrInvalidOperation = errors.New("invalid operation for brc20: must be one of 'deploy', 'mint', or 'transfer'")
ErrInvalidTickLength = errors.New("invalid tick length: must be 4 or 5 bytes")
ErrEmptyTick = errors.New("empty tick")
ErrEmptyMax = errors.New("empty max")
ErrInvalidMax = errors.New("invalid max")
ErrInvalidDec = errors.New("invalid dec")
ErrInvalidSelfMint = errors.New("invalid self_mint")
ErrInvalidAmt = errors.New("invalid amt")
ErrNumberOverflow = errors.New("number overflow: max value is (2^64-1)")
)
func ParsePayload(transfer *entity.InscriptionTransfer) (*Payload, error) {
var p rawPayload
err := json.Unmarshal(transfer.Content, &p)
if err != nil {
return nil, errors.Wrap(err, "failed to unmarshal payload as json")
}
if p.P != "brc-20" {
return nil, errors.WithStack(ErrInvalidProtocol)
}
if !Operation(p.Op).IsValid() {
return nil, errors.WithStack(ErrInvalidOperation)
}
if p.Tick == "" {
return nil, errors.WithStack(ErrEmptyTick)
}
if len(p.Tick) != 4 && len(p.Tick) != 5 {
return nil, errors.WithStack(ErrInvalidTickLength)
}
parsed := Payload{
Transfer: transfer,
P: p.P,
Op: Operation(p.Op),
Tick: strings.ToLower(p.Tick),
OriginalTick: p.Tick,
}
switch parsed.Op {
case OperationDeploy:
if p.Max == "" {
return nil, errors.WithStack(ErrEmptyMax)
}
var rawDec string
if p.Dec != nil {
rawDec = *p.Dec
}
if rawDec == "" {
rawDec = "18"
}
dec, ok := strconv.ParseUint(rawDec, 10, 16)
if ok != nil {
return nil, errors.Wrap(ok, "failed to parse dec")
}
if dec > 18 {
return nil, errors.WithStack(ErrInvalidDec)
}
parsed.Dec = uint16(dec)
max, err := parseNumericString(p.Max, dec)
if err != nil {
return nil, errors.Wrap(err, "failed to parse max")
}
parsed.Max = max
limit := max
if p.Lim != nil {
limit, err = parseNumericString(*p.Lim, dec)
if err != nil {
return nil, errors.Wrap(err, "failed to parse lim")
}
}
parsed.Lim = limit
// 5-bytes ticks are self-mint only
if len(parsed.OriginalTick) == 5 {
if p.SelfMint == nil || *p.SelfMint != "true" {
return nil, errors.WithStack(ErrInvalidSelfMint)
}
// infinite mints if tick is self-mint, and max is set to 0
if parsed.Max.IsZero() {
parsed.Max = maxNumber
if parsed.Lim.IsZero() {
parsed.Lim = maxNumber
}
}
}
if parsed.Max.IsZero() {
return nil, errors.WithStack(ErrInvalidMax)
}
case OperationMint, OperationTransfer:
if p.Amt == "" {
return nil, errors.WithStack(ErrInvalidAmt)
}
// NOTE: check tick decimals after parsing payload
amt, err := parseNumericString(p.Amt, 18)
if err != nil {
return nil, errors.Wrap(err, "failed to parse amt")
}
parsed.Amt = amt
default:
return nil, errors.WithStack(ErrInvalidOperation)
}
return &parsed, nil
}
// max number for all numeric fields (except dec) is (2^64-1)
var (
maxNumber = decimal.NewFromBigInt(new(big.Int).SetUint64(math.MaxUint64), 0)
)
func parseNumericString(s string, maxDec uint64) (decimal.Decimal, error) {
d, err := decimal.NewFromString(s)
if err != nil {
return decimal.Decimal{}, errors.Wrap(err, "failed to parse decimal number")
}
if -d.Exponent() > int32(maxDec) {
return decimal.Decimal{}, errors.Errorf("cannot parse decimal number: too many decimal points: expected %d got %d", maxDec, d.Exponent())
}
if d.GreaterThan(maxNumber) {
return decimal.Decimal{}, errors.WithStack(ErrNumberOverflow)
}
return d, nil
}

View File

@@ -1,72 +0,0 @@
package datagateway
import (
"context"
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
)
type BRC20DataGateway interface {
BRC20ReaderDataGateway
BRC20WriterDataGateway
// BeginBRC20Tx returns a new BRC20DataGateway with transaction enabled. All write operations performed in this datagateway must be committed to persist changes.
BeginBRC20Tx(ctx context.Context) (BRC20DataGatewayWithTx, error)
}
type BRC20DataGatewayWithTx interface {
BRC20DataGateway
Tx
}
type BRC20ReaderDataGateway interface {
GetLatestBlock(ctx context.Context) (types.BlockHeader, error)
GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error)
GetProcessorStats(ctx context.Context) (*entity.ProcessorStats, error)
GetInscriptionTransfersInOutPoints(ctx context.Context, outPoints []wire.OutPoint) (map[ordinals.SatPoint][]*entity.InscriptionTransfer, error)
GetInscriptionEntriesByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*ordinals.InscriptionEntry, error)
GetInscriptionNumbersByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]int64, error)
GetInscriptionParentsByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]ordinals.InscriptionId, error)
GetBalancesBatchAtHeight(ctx context.Context, blockHeight uint64, queries []GetBalancesBatchAtHeightQuery) (map[string]map[string]*entity.Balance, error)
GetTickEntriesByTicks(ctx context.Context, ticks []string) (map[string]*entity.TickEntry, error)
GetEventInscribeTransfersByInscriptionIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*entity.EventInscribeTransfer, error)
GetLatestEventId(ctx context.Context) (int64, error)
}
type BRC20WriterDataGateway interface {
CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error
CreateProcessorStats(ctx context.Context, stats *entity.ProcessorStats) error
CreateTickEntries(ctx context.Context, blockHeight uint64, entries []*entity.TickEntry) error
CreateTickEntryStates(ctx context.Context, blockHeight uint64, entryStates []*entity.TickEntry) error
CreateInscriptionEntries(ctx context.Context, blockHeight uint64, entries []*ordinals.InscriptionEntry) error
CreateInscriptionEntryStates(ctx context.Context, blockHeight uint64, entryStates []*ordinals.InscriptionEntry) error
CreateInscriptionTransfers(ctx context.Context, transfers []*entity.InscriptionTransfer) error
CreateEventDeploys(ctx context.Context, events []*entity.EventDeploy) error
CreateEventMints(ctx context.Context, events []*entity.EventMint) error
CreateEventInscribeTransfers(ctx context.Context, events []*entity.EventInscribeTransfer) error
CreateEventTransferTransfers(ctx context.Context, events []*entity.EventTransferTransfer) error
CreateBalances(ctx context.Context, balances []*entity.Balance) error
// used for revert data
DeleteIndexedBlocksSinceHeight(ctx context.Context, height uint64) error
DeleteProcessorStatsSinceHeight(ctx context.Context, height uint64) error
DeleteTickEntriesSinceHeight(ctx context.Context, height uint64) error
DeleteTickEntryStatesSinceHeight(ctx context.Context, height uint64) error
DeleteEventDeploysSinceHeight(ctx context.Context, height uint64) error
DeleteEventMintsSinceHeight(ctx context.Context, height uint64) error
DeleteEventInscribeTransfersSinceHeight(ctx context.Context, height uint64) error
DeleteEventTransferTransfersSinceHeight(ctx context.Context, height uint64) error
DeleteBalancesSinceHeight(ctx context.Context, height uint64) error
DeleteInscriptionEntriesSinceHeight(ctx context.Context, height uint64) error
DeleteInscriptionEntryStatesSinceHeight(ctx context.Context, height uint64) error
DeleteInscriptionTransfersSinceHeight(ctx context.Context, height uint64) error
}
type GetBalancesBatchAtHeightQuery struct {
PkScriptHex string
Tick string
BlockHeight uint64
}

View File

@@ -1,12 +0,0 @@
package datagateway
import (
"context"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
)
type IndexerInfoDataGateway interface {
GetLatestIndexerState(ctx context.Context) (entity.IndexerState, error)
CreateIndexerState(ctx context.Context, state entity.IndexerState) error
}

View File

@@ -1,12 +0,0 @@
package datagateway
import "context"
type Tx interface {
// Commit commits the DB transaction. All changes made after Begin() will be persisted. Calling Commit() will close the current transaction.
// If Commit() is called without a prior Begin(), it must be a no-op.
Commit(ctx context.Context) error
// Rollback rolls back the DB transaction. All changes made after Begin() will be discarded.
// Rollback() must be safe to call even if no transaction is active. Hence, a defer Rollback() is safe, even if Commit() was called prior with non-error conditions.
Rollback(ctx context.Context) error
}

View File

@@ -1,11 +0,0 @@
package entity
import "github.com/shopspring/decimal"
type Balance struct {
PkScript []byte
Tick string
BlockHeight uint64
OverallBalance decimal.Decimal
AvailableBalance decimal.Decimal
}

View File

@@ -1,28 +0,0 @@
package entity
import (
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/shopspring/decimal"
)
type EventDeploy struct {
Id int64
InscriptionId ordinals.InscriptionId
InscriptionNumber int64
Tick string
OriginalTick string
TxHash chainhash.Hash
BlockHeight uint64
TxIndex uint32
Timestamp time.Time
PkScript []byte
SatPoint ordinals.SatPoint
TotalSupply decimal.Decimal
Decimals uint16
LimitPerMint decimal.Decimal
IsSelfMint bool
}

View File

@@ -1,27 +0,0 @@
package entity
import (
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/shopspring/decimal"
)
type EventInscribeTransfer struct {
Id int64
InscriptionId ordinals.InscriptionId
InscriptionNumber int64
Tick string
OriginalTick string
TxHash chainhash.Hash
BlockHeight uint64
TxIndex uint32
Timestamp time.Time
PkScript []byte
SatPoint ordinals.SatPoint
OutputIndex uint32
SatsAmount uint64
Amount decimal.Decimal
}

View File

@@ -1,26 +0,0 @@
package entity
import (
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/shopspring/decimal"
)
type EventMint struct {
Id int64
InscriptionId ordinals.InscriptionId
InscriptionNumber int64
Tick string
OriginalTick string
TxHash chainhash.Hash
BlockHeight uint64
TxIndex uint32
Timestamp time.Time
PkScript []byte
SatPoint ordinals.SatPoint
Amount decimal.Decimal
ParentId *ordinals.InscriptionId
}

View File

@@ -1,30 +0,0 @@
package entity
import (
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/shopspring/decimal"
)
type EventTransferTransfer struct {
Id int64
InscriptionId ordinals.InscriptionId
InscriptionNumber int64
Tick string
OriginalTick string
TxHash chainhash.Hash
BlockHeight uint64
TxIndex uint32
Timestamp time.Time
FromPkScript []byte
FromSatPoint ordinals.SatPoint
FromInputIndex uint32
ToPkScript []byte
ToSatPoint ordinals.SatPoint
ToOutputIndex uint32
SpentAsFee bool
Amount decimal.Decimal
}

View File

@@ -1,31 +0,0 @@
package entity
import (
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
)
type OriginOld struct {
Content []byte
OldSatPoint ordinals.SatPoint
InputIndex uint32
}
type OriginNew struct {
Inscription ordinals.Inscription
Parent *ordinals.InscriptionId
Pointer *uint64
Fee uint64
Cursed bool
CursedForBRC20 bool
Hidden bool
Reinscription bool
Unbound bool
}
type Flotsam struct {
Tx *types.Transaction
OriginOld *OriginOld // OriginOld and OriginNew are mutually exclusive
OriginNew *OriginNew // OriginOld and OriginNew are mutually exclusive
Offset uint64
InscriptionId ordinals.InscriptionId
}

View File

@@ -1,10 +0,0 @@
package entity
import "github.com/btcsuite/btcd/chaincfg/chainhash"
type IndexedBlock struct {
Height uint64
Hash chainhash.Hash
EventHash []byte
CumulativeEventHash []byte
}

View File

@@ -1,15 +0,0 @@
package entity
import (
"time"
"github.com/gaze-network/indexer-network/common"
)
type IndexerState struct {
CreatedAt time.Time
ClientVersion string
DBVersion int32
EventHashVersion int32
Network common.Network
}

View File

@@ -1,21 +0,0 @@
package entity
import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
)
type InscriptionTransfer struct {
InscriptionId ordinals.InscriptionId
BlockHeight uint64
TxIndex uint32
TxHash chainhash.Hash
Content []byte
FromInputIndex uint32
OldSatPoint ordinals.SatPoint
NewSatPoint ordinals.SatPoint
NewPkScript []byte
NewOutputValue uint64
SentAsFee bool
TransferCount uint32
}

View File

@@ -1,8 +0,0 @@
package entity
type ProcessorStats struct {
BlockHeight uint64
CursedInscriptionCount uint64
BlessedInscriptionCount uint64
LostSats uint64
}

View File

@@ -1,25 +0,0 @@
package entity
import (
"time"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/shopspring/decimal"
)
type TickEntry struct {
Tick string
OriginalTick string
TotalSupply decimal.Decimal
Decimals uint16
LimitPerMint decimal.Decimal
IsSelfMint bool
DeployInscriptionId ordinals.InscriptionId
DeployedAt time.Time
DeployedAtHeight uint64
MintedAmount decimal.Decimal
BurnedAmount decimal.Decimal
CompletedAt time.Time
CompletedAtHeight uint64
}

View File

@@ -1,285 +0,0 @@
package ordinals
import (
"bytes"
"encoding/binary"
"github.com/btcsuite/btcd/txscript"
"github.com/gaze-network/indexer-network/core/types"
"github.com/samber/lo"
)
type Envelope struct {
Inscription Inscription
InputIndex uint32 // Index of input that contains the envelope
Offset int // Number of envelope in the input
PushNum bool // True if envelope contains pushnum opcodes
Stutter bool // True if envelope matches stuttering curse structure
IncompleteField bool // True if payload is incomplete
DuplicateField bool // True if payload contains duplicated field
UnrecognizedEvenField bool // True if payload contains unrecognized even field
}
func ParseEnvelopesFromTx(tx *types.Transaction) []*Envelope {
envelopes := make([]*Envelope, 0)
for i, txIn := range tx.TxIn {
tapScript, ok := extractTapScript(txIn.Witness)
if !ok {
continue
}
newEnvelopes := envelopesFromTapScript(tapScript, i)
envelopes = append(envelopes, newEnvelopes...)
}
return envelopes
}
var protocolId = []byte("ord")
func envelopesFromTapScript(tokenizer txscript.ScriptTokenizer, inputIndex int) []*Envelope {
envelopes := make([]*Envelope, 0)
var stuttered bool
for tokenizer.Next() {
if tokenizer.Err() != nil {
break
}
if tokenizer.Opcode() == txscript.OP_FALSE {
envelope, stutter := envelopeFromTokenizer(tokenizer, inputIndex, len(envelopes), stuttered)
if envelope != nil {
envelopes = append(envelopes, envelope)
} else {
stuttered = stutter
}
}
}
if tokenizer.Err() != nil {
return envelopes
}
return envelopes
}
func envelopeFromTokenizer(tokenizer txscript.ScriptTokenizer, inputIndex int, offset int, stuttered bool) (*Envelope, bool) {
tokenizer.Next()
if tokenizer.Opcode() != txscript.OP_IF {
return nil, tokenizer.Opcode() == txscript.OP_FALSE
}
tokenizer.Next()
if !bytes.Equal(tokenizer.Data(), protocolId) {
return nil, tokenizer.Opcode() == txscript.OP_FALSE
}
var pushNum bool
payload := make([][]byte, 0)
for tokenizer.Next() {
if tokenizer.Err() != nil {
return nil, false
}
opCode := tokenizer.Opcode()
if opCode == txscript.OP_ENDIF {
break
}
switch opCode {
case txscript.OP_1NEGATE:
pushNum = true
payload = append(payload, []byte{0x81})
case txscript.OP_1:
pushNum = true
payload = append(payload, []byte{0x01})
case txscript.OP_2:
pushNum = true
payload = append(payload, []byte{0x02})
case txscript.OP_3:
pushNum = true
payload = append(payload, []byte{0x03})
case txscript.OP_4:
pushNum = true
payload = append(payload, []byte{0x04})
case txscript.OP_5:
pushNum = true
payload = append(payload, []byte{0x05})
case txscript.OP_6:
pushNum = true
payload = append(payload, []byte{0x06})
case txscript.OP_7:
pushNum = true
payload = append(payload, []byte{0x07})
case txscript.OP_8:
pushNum = true
payload = append(payload, []byte{0x08})
case txscript.OP_9:
pushNum = true
payload = append(payload, []byte{0x09})
case txscript.OP_10:
pushNum = true
payload = append(payload, []byte{0x10})
case txscript.OP_11:
pushNum = true
payload = append(payload, []byte{0x11})
case txscript.OP_12:
pushNum = true
payload = append(payload, []byte{0x12})
case txscript.OP_13:
pushNum = true
payload = append(payload, []byte{0x13})
case txscript.OP_14:
pushNum = true
payload = append(payload, []byte{0x14})
case txscript.OP_15:
pushNum = true
payload = append(payload, []byte{0x15})
case txscript.OP_16:
pushNum = true
payload = append(payload, []byte{0x16})
case txscript.OP_0:
// OP_0 is a special case, it is accepted in ord's implementation
payload = append(payload, []byte{})
default:
data := tokenizer.Data()
if data == nil {
return nil, false
}
payload = append(payload, data)
}
}
// incomplete envelope
if tokenizer.Done() && tokenizer.Opcode() != txscript.OP_ENDIF {
return nil, false
}
// find body (empty data push in even index payload)
bodyIndex := -1
for i, value := range payload {
if i%2 == 0 && len(value) == 0 {
bodyIndex = i
break
}
}
var fieldPayloads [][]byte
var body []byte
if bodyIndex != -1 {
fieldPayloads = payload[:bodyIndex]
body = lo.Flatten(payload[bodyIndex+1:])
} else {
fieldPayloads = payload[:]
}
var incompleteField bool
fields := make(Fields)
for _, chunk := range lo.Chunk(fieldPayloads, 2) {
if len(chunk) != 2 {
incompleteField = true
break
}
key := chunk[0]
value := chunk[1]
// key cannot be empty, as checked by bodyIndex above
tag := Tag(key[0])
fields[tag] = append(fields[tag], value)
}
var duplicateField bool
for _, values := range fields {
if len(values) > 1 {
duplicateField = true
break
}
}
rawContentEncoding := fields.Take(TagContentEncoding)
rawContentType := fields.Take(TagContentType)
rawDelegate := fields.Take(TagDelegate)
rawMetadata := fields.Take(TagMetadata)
rawMetaprotocol := fields.Take(TagMetaprotocol)
rawParent := fields.Take(TagParent)
rawPointer := fields.Take(TagPointer)
unrecognizedEvenField := lo.SomeBy(lo.Keys(fields), func(key Tag) bool {
return key%2 == 0
})
var delegate, parent *InscriptionId
inscriptionId, err := NewInscriptionIdFromString(string(rawDelegate))
if err == nil {
delegate = &inscriptionId
}
inscriptionId, err = NewInscriptionIdFromString(string(rawParent))
if err == nil {
parent = &inscriptionId
}
var pointer *uint64
// if rawPointer is not nil and fits in uint64
if rawPointer != nil && (len(rawPointer) <= 8 || lo.EveryBy(rawPointer[8:], func(value byte) bool {
return value != 0
})) {
// pad zero bytes to 8 bytes
if len(rawPointer) < 8 {
rawPointer = append(rawPointer, make([]byte, 8-len(rawPointer))...)
}
pointer = lo.ToPtr(binary.LittleEndian.Uint64(rawPointer))
}
inscription := Inscription{
Content: body,
ContentEncoding: string(rawContentEncoding),
ContentType: string(rawContentType),
Delegate: delegate,
Metadata: rawMetadata,
Metaprotocol: string(rawMetaprotocol),
Parent: parent,
Pointer: pointer,
}
return &Envelope{
Inscription: inscription,
InputIndex: uint32(inputIndex),
Offset: offset,
PushNum: pushNum,
Stutter: stuttered,
IncompleteField: incompleteField,
DuplicateField: duplicateField,
UnrecognizedEvenField: unrecognizedEvenField,
}, false
}
type Fields map[Tag][][]byte
func (fields Fields) Take(tag Tag) []byte {
values, ok := fields[tag]
if !ok {
return nil
}
if tag.IsChunked() {
delete(fields, tag)
return lo.Flatten(values)
} else {
first := values[0]
values = values[1:]
if len(values) == 0 {
delete(fields, tag)
} else {
fields[tag] = values
}
return first
}
}
func extractTapScript(witness [][]byte) (txscript.ScriptTokenizer, bool) {
witness = removeAnnexFromWitness(witness)
if len(witness) < 2 {
return txscript.ScriptTokenizer{}, false
}
script := witness[len(witness)-2]
return txscript.MakeScriptTokenizer(0, script), true
}
func removeAnnexFromWitness(witness [][]byte) [][]byte {
if len(witness) >= 2 && len(witness[len(witness)-1]) > 0 && witness[len(witness)-1][0] == txscript.TaprootAnnexTag {
return witness[:len(witness)-1]
}
return witness
}

View File

@@ -1,742 +0,0 @@
package ordinals
import (
"testing"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/core/types"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
)
func TestParseEnvelopesFromTx(t *testing.T) {
testTx := func(t *testing.T, tx *types.Transaction, expected []*Envelope) {
t.Helper()
envelopes := ParseEnvelopesFromTx(tx)
assert.Equal(t, expected, envelopes)
}
testParseWitness := func(t *testing.T, tapScript []byte, expected []*Envelope) {
t.Helper()
tx := &types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*types.TxIn{
{
Witness: wire.TxWitness{
tapScript,
{},
},
},
},
}
testTx(t, tx, expected)
}
testEnvelope := func(t *testing.T, payload [][]byte, expected []*Envelope) {
t.Helper()
builder := NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF)
for _, data := range payload {
builder.AddData(data)
}
builder.AddOp(txscript.OP_ENDIF)
script, err := builder.Script()
assert.NoError(t, err)
testParseWitness(
t,
script,
expected,
)
}
t.Run("empty_witness", func(t *testing.T) {
testTx(t, &types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*types.TxIn{{
Witness: wire.TxWitness{},
}},
}, []*Envelope{})
})
t.Run("ignore_key_path_spends", func(t *testing.T) {
testTx(t, &types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*types.TxIn{{
Witness: wire.TxWitness{
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script()),
},
}},
}, []*Envelope{})
})
t.Run("ignore_key_path_spends_with_annex", func(t *testing.T) {
testTx(t, &types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*types.TxIn{{
Witness: wire.TxWitness{
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script()),
[]byte{txscript.TaprootAnnexTag},
},
}},
}, []*Envelope{})
})
t.Run("parse_from_tapscript", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script()),
[]*Envelope{{}},
)
})
t.Run("ignore_unparsable_scripts", func(t *testing.T) {
script := utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script())
script = append(script, 0x01)
testParseWitness(
t,
script,
[]*Envelope{
{},
},
)
})
t.Run("no_inscription", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
Script()),
[]*Envelope{},
)
})
t.Run("duplicate_field", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagNop.Bytes(),
{},
TagNop.Bytes(),
{},
},
[]*Envelope{
{
DuplicateField: true,
},
},
)
})
t.Run("with_content_type", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagBody.Bytes(),
[]byte("ord"),
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("ord"),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("with_content_encoding", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagContentEncoding.Bytes(),
[]byte("br"),
TagBody.Bytes(),
[]byte("ord"),
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("ord"),
ContentType: "text/plain;charset=utf-8",
ContentEncoding: "br",
},
},
},
)
})
t.Run("with_unknown_tag", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagNop.Bytes(),
[]byte("bar"),
TagBody.Bytes(),
[]byte("ord"),
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("ord"),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("no_body", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
},
[]*Envelope{
{
Inscription: Inscription{
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("no_content_type", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagBody.Bytes(),
[]byte("foo"),
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("foo"),
},
},
},
)
})
t.Run("valid_body_in_multiple_pushes", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagBody.Bytes(),
[]byte("foo"),
[]byte("bar"),
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("foobar"),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("valid_body_in_zero_pushes", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagBody.Bytes(),
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte(""),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("valid_body_in_multiple_empty_pushes", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagBody.Bytes(),
{},
{},
{},
{},
{},
{},
{},
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte(""),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("valid_ignore_trailing", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddData(TagContentType.Bytes()).
AddData([]byte("text/plain;charset=utf-8")).
AddData(TagBody.Bytes()).
AddData([]byte("ord")).
AddOp(txscript.OP_ENDIF).
AddOp(txscript.OP_CHECKSIG).
Script()),
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("ord"),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("valid_ignore_preceding", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_CHECKSIG).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddData(TagContentType.Bytes()).
AddData([]byte("text/plain;charset=utf-8")).
AddData(TagBody.Bytes()).
AddData([]byte("ord")).
AddOp(txscript.OP_ENDIF).
Script()),
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("ord"),
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("multiple_inscriptions_in_a_single_witness", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddData(TagContentType.Bytes()).
AddData([]byte("text/plain;charset=utf-8")).
AddData(TagBody.Bytes()).
AddData([]byte("foo")).
AddOp(txscript.OP_ENDIF).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddData(TagContentType.Bytes()).
AddData([]byte("text/plain;charset=utf-8")).
AddData(TagBody.Bytes()).
AddData([]byte("bar")).
AddOp(txscript.OP_ENDIF).
Script()),
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("foo"),
ContentType: "text/plain;charset=utf-8",
},
},
{
Inscription: Inscription{
Content: []byte("bar"),
ContentType: "text/plain;charset=utf-8",
},
Offset: 1,
},
},
)
})
t.Run("invalid_utf8_does_not_render_inscription_invalid", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("text/plain;charset=utf-8"),
TagBody.Bytes(),
{0b10000000},
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte{0b10000000},
ContentType: "text/plain;charset=utf-8",
},
},
},
)
})
t.Run("no_endif", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
Script()),
[]*Envelope{},
)
})
t.Run("no_op_false", func(t *testing.T) {
testParseWitness(
t,
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script()),
[]*Envelope{},
)
})
t.Run("empty_envelope", func(t *testing.T) {
testEnvelope(
t,
[][]byte{},
[]*Envelope{},
)
})
t.Run("wrong_protocol_identifier", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
[]byte("foo"),
},
[]*Envelope{},
)
})
t.Run("extract_from_second_input", func(t *testing.T) {
testTx(
t,
&types.Transaction{
Version: 2,
LockTime: 0,
TxIn: []*types.TxIn{{}, {
Witness: wire.TxWitness{
utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddData(TagContentType.Bytes()).
AddData([]byte("text/plain;charset=utf-8")).
AddData(TagBody.Bytes()).
AddData([]byte("ord")).
AddOp(txscript.OP_ENDIF).
Script(),
),
{},
},
}},
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte("ord"),
ContentType: "text/plain;charset=utf-8",
},
InputIndex: 1,
},
},
)
})
t.Run("inscribe_png", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagContentType.Bytes(),
[]byte("image/png"),
TagBody.Bytes(),
{0x01, 0x02, 0x03},
},
[]*Envelope{
{
Inscription: Inscription{
Content: []byte{0x01, 0x02, 0x03},
ContentType: "image/png",
},
},
},
)
})
t.Run("unknown_odd_fields", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagNop.Bytes(),
{0x00},
},
[]*Envelope{
{
Inscription: Inscription{},
},
},
)
})
t.Run("unknown_even_fields", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagUnbound.Bytes(),
{0x00},
},
[]*Envelope{
{
Inscription: Inscription{},
UnrecognizedEvenField: true,
},
},
)
})
t.Run("pointer_field_is_recognized", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagPointer.Bytes(),
{0x01},
},
[]*Envelope{
{
Inscription: Inscription{
Pointer: lo.ToPtr(uint64(1)),
},
},
},
)
})
t.Run("duplicate_pointer_field_makes_inscription_unbound", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagPointer.Bytes(),
{0x01},
TagPointer.Bytes(),
{0x00},
},
[]*Envelope{
{
Inscription: Inscription{
Pointer: lo.ToPtr(uint64(1)),
},
DuplicateField: true,
UnrecognizedEvenField: true,
},
},
)
})
t.Run("incomplete_field", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagNop.Bytes(),
},
[]*Envelope{
{
Inscription: Inscription{},
IncompleteField: true,
},
},
)
})
t.Run("metadata_is_parsed_correctly", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagMetadata.Bytes(),
{},
},
[]*Envelope{
{
Inscription: Inscription{
Metadata: []byte{},
},
},
},
)
})
t.Run("metadata_is_parsed_correctly_from_chunks", func(t *testing.T) {
testEnvelope(
t,
[][]byte{
protocolId,
TagMetadata.Bytes(),
{0x00},
TagMetadata.Bytes(),
{0x01},
},
[]*Envelope{
{
Inscription: Inscription{
Metadata: []byte{0x00, 0x01},
},
DuplicateField: true,
},
},
)
})
t.Run("pushnum_opcodes_are_parsed_correctly", func(t *testing.T) {
pushNumOpCodes := map[byte][]byte{
txscript.OP_1NEGATE: {0x81},
txscript.OP_1: {0x01},
txscript.OP_2: {0x02},
txscript.OP_3: {0x03},
txscript.OP_4: {0x04},
txscript.OP_5: {0x05},
txscript.OP_6: {0x06},
txscript.OP_7: {0x07},
txscript.OP_8: {0x08},
txscript.OP_9: {0x09},
txscript.OP_10: {0x10},
txscript.OP_11: {0x11},
txscript.OP_12: {0x12},
txscript.OP_13: {0x13},
txscript.OP_14: {0x14},
txscript.OP_15: {0x15},
txscript.OP_16: {0x16},
}
for opCode, value := range pushNumOpCodes {
script := utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddData(TagBody.Bytes()).
AddOp(opCode).
AddOp(txscript.OP_ENDIF).
Script())
testParseWitness(
t,
script,
[]*Envelope{
{
Inscription: Inscription{
Content: value,
},
PushNum: true,
},
},
)
}
})
t.Run("stuttering", func(t *testing.T) {
script := utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script())
testParseWitness(
t,
script,
[]*Envelope{
{
Inscription: Inscription{},
Stutter: true,
},
},
)
script = utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script())
testParseWitness(
t,
script,
[]*Envelope{
{
Inscription: Inscription{},
Stutter: true,
},
},
)
script = utils.Must(NewPushScriptBuilder().
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_AND).
AddOp(txscript.OP_FALSE).
AddOp(txscript.OP_IF).
AddData(protocolId).
AddOp(txscript.OP_ENDIF).
Script())
testParseWitness(
t,
script,
[]*Envelope{
{
Inscription: Inscription{},
Stutter: false,
},
},
)
})
}

View File

@@ -1,13 +0,0 @@
package ordinals
import "github.com/gaze-network/indexer-network/common"
func GetJubileeHeight(network common.Network) uint64 {
switch network {
case common.NetworkMainnet:
return 824544
case common.NetworkTestnet:
return 2544192
}
panic("unsupported network")
}

View File

@@ -1,27 +0,0 @@
package ordinals
import "time"
type Inscription struct {
Content []byte
ContentEncoding string
ContentType string
Delegate *InscriptionId
Metadata []byte
Metaprotocol string
Parent *InscriptionId // in 0.14, inscription has only one parent
Pointer *uint64
}
// TODO: refactor ordinals.InscriptionEntry to entity.InscriptionEntry
type InscriptionEntry struct {
Id InscriptionId
Number int64
SequenceNumber uint64
Cursed bool
CursedForBRC20 bool
CreatedAt time.Time
CreatedAtHeight uint64
Inscription Inscription
TransferCount uint32
}

View File

@@ -1,67 +0,0 @@
package ordinals
import (
"fmt"
"strconv"
"strings"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
)
type InscriptionId struct {
TxHash chainhash.Hash
Index uint32
}
func (i InscriptionId) String() string {
return fmt.Sprintf("%si%d", i.TxHash.String(), i.Index)
}
func NewInscriptionId(txHash chainhash.Hash, index uint32) InscriptionId {
return InscriptionId{
TxHash: txHash,
Index: index,
}
}
var ErrInscriptionIdInvalidSeparator = fmt.Errorf("invalid inscription id: must contain exactly one separator")
func NewInscriptionIdFromString(s string) (InscriptionId, error) {
parts := strings.SplitN(s, "i", 2)
if len(parts) != 2 {
return InscriptionId{}, errors.WithStack(ErrInscriptionIdInvalidSeparator)
}
txHash, err := chainhash.NewHashFromStr(parts[0])
if err != nil {
return InscriptionId{}, errors.Wrap(err, "invalid inscription id: cannot parse txHash")
}
index, err := strconv.ParseUint(parts[1], 10, 32)
if err != nil {
return InscriptionId{}, errors.Wrap(err, "invalid inscription id: cannot parse index")
}
return InscriptionId{
TxHash: *txHash,
Index: uint32(index),
}, nil
}
// MarshalJSON implements json.Marshaler
func (r InscriptionId) MarshalJSON() ([]byte, error) {
return []byte(`"` + r.String() + `"`), nil
}
// UnmarshalJSON implements json.Unmarshaler
func (r *InscriptionId) UnmarshalJSON(data []byte) error {
// data must be quoted
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("must be string")
}
data = data[1 : len(data)-1]
parsed, err := NewInscriptionIdFromString(string(data))
if err != nil {
return errors.WithStack(err)
}
*r = parsed
return nil
}

View File

@@ -1,109 +0,0 @@
package ordinals
import (
"testing"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/stretchr/testify/assert"
)
func TestNewInscriptionIdFromString(t *testing.T) {
tests := []struct {
name string
input string
expected InscriptionId
shouldError bool
}{
{
name: "valid inscription id 1",
input: "1111111111111111111111111111111111111111111111111111111111111111i0",
expected: InscriptionId{
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 0,
},
},
{
name: "valid inscription id 2",
input: "1111111111111111111111111111111111111111111111111111111111111111i1",
expected: InscriptionId{
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 1,
},
},
{
name: "valid inscription id 3",
input: "1111111111111111111111111111111111111111111111111111111111111111i4294967295",
expected: InscriptionId{
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 4294967295,
},
},
{
name: "error no separator",
input: "abc",
shouldError: true,
},
{
name: "error invalid index",
input: "xyzixyz",
shouldError: true,
},
{
name: "error invalid index",
input: "abcixyz",
shouldError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual, err := NewInscriptionIdFromString(tt.input)
if tt.shouldError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expected, actual)
}
})
}
}
func TestInscriptionIdString(t *testing.T) {
tests := []struct {
name string
expected string
input InscriptionId
}{
{
name: "valid inscription id 1",
expected: "1111111111111111111111111111111111111111111111111111111111111111i0",
input: InscriptionId{
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 0,
},
},
{
name: "valid inscription id 2",
expected: "1111111111111111111111111111111111111111111111111111111111111111i1",
input: InscriptionId{
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 1,
},
},
{
name: "valid inscription id 3",
expected: "1111111111111111111111111111111111111111111111111111111111111111i4294967295",
input: InscriptionId{
TxHash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 4294967295,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.expected, tt.input.String())
})
}
}

View File

@@ -1,68 +0,0 @@
package ordinals
import (
"fmt"
"strconv"
"strings"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
)
type SatPoint struct {
OutPoint wire.OutPoint
Offset uint64
}
func (s SatPoint) String() string {
return fmt.Sprintf("%s:%d", s.OutPoint.String(), s.Offset)
}
var ErrSatPointInvalidSeparator = fmt.Errorf("invalid sat point: must contain exactly two separators")
func NewSatPointFromString(s string) (SatPoint, error) {
parts := strings.SplitN(s, ":", 3)
if len(parts) != 3 {
return SatPoint{}, errors.WithStack(ErrSatPointInvalidSeparator)
}
txHash, err := chainhash.NewHashFromStr(parts[0])
if err != nil {
return SatPoint{}, errors.Wrap(err, "invalid inscription id: cannot parse txHash")
}
index, err := strconv.ParseUint(parts[1], 10, 32)
if err != nil {
return SatPoint{}, errors.Wrap(err, "invalid inscription id: cannot parse index")
}
offset, err := strconv.ParseUint(parts[2], 10, 64)
if err != nil {
return SatPoint{}, errors.Wrap(err, "invalid sat point: cannot parse offset")
}
return SatPoint{
OutPoint: wire.OutPoint{
Hash: *txHash,
Index: uint32(index),
},
Offset: offset,
}, nil
}
// MarshalJSON implements json.Marshaler
func (r SatPoint) MarshalJSON() ([]byte, error) {
return []byte(`"` + r.String() + `"`), nil
}
// UnmarshalJSON implements json.Unmarshaler
func (r *SatPoint) UnmarshalJSON(data []byte) error {
// data must be quoted
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("must be string")
}
data = data[1 : len(data)-1]
parsed, err := NewSatPointFromString(string(data))
if err != nil {
return errors.WithStack(err)
}
*r = parsed
return nil
}

View File

@@ -1,89 +0,0 @@
package ordinals
import (
"testing"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/stretchr/testify/assert"
)
func TestNewSatPointFromString(t *testing.T) {
tests := []struct {
name string
input string
expected SatPoint
shouldError bool
}{
{
name: "valid sat point",
input: "1111111111111111111111111111111111111111111111111111111111111111:1:2",
expected: SatPoint{
OutPoint: wire.OutPoint{
Hash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 1,
},
Offset: 2,
},
},
{
name: "error no separator",
input: "abc",
shouldError: true,
},
{
name: "error invalid output index",
input: "abc:xyz",
shouldError: true,
},
{
name: "error no offset",
input: "1111111111111111111111111111111111111111111111111111111111111111:1",
shouldError: true,
},
{
name: "error invalid offset",
input: "1111111111111111111111111111111111111111111111111111111111111111:1:foo",
shouldError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual, err := NewSatPointFromString(tt.input)
if tt.shouldError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expected, actual)
}
})
}
}
func TestSatPointString(t *testing.T) {
tests := []struct {
name string
input SatPoint
expected string
}{
{
name: "valid sat point",
input: SatPoint{
OutPoint: wire.OutPoint{
Hash: *utils.Must(chainhash.NewHashFromStr("1111111111111111111111111111111111111111111111111111111111111111")),
Index: 1,
},
Offset: 2,
},
expected: "1111111111111111111111111111111111111111111111111111111111111111:1:2",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.expected, tt.input.String())
})
}
}

View File

@@ -1,170 +0,0 @@
package ordinals
import (
"encoding/binary"
"fmt"
"github.com/btcsuite/btcd/txscript"
)
// PushScriptBuilder is a helper to build scripts that requires data pushes to use OP_PUSHDATA* or OP_DATA_* opcodes only.
// Empty data pushes are still encoded as OP_0.
type PushScriptBuilder struct {
script []byte
err error
}
func NewPushScriptBuilder() *PushScriptBuilder {
return &PushScriptBuilder{}
}
// canonicalDataSize returns the number of bytes the canonical encoding of the
// data will take.
func canonicalDataSize(data []byte) int {
dataLen := len(data)
// When the data consists of a single number that can be represented
// by one of the "small integer" opcodes, that opcode will be instead
// of a data push opcode followed by the number.
if dataLen == 0 {
return 1
}
if dataLen < txscript.OP_PUSHDATA1 {
return 1 + dataLen
} else if dataLen <= 0xff {
return 2 + dataLen
} else if dataLen <= 0xffff {
return 3 + dataLen
}
return 5 + dataLen
}
func pushDataToBytes(data []byte) []byte {
if len(data) == 0 {
return []byte{txscript.OP_0}
}
script := make([]byte, 0)
dataLen := len(data)
if dataLen < txscript.OP_PUSHDATA1 {
script = append(script, byte(txscript.OP_DATA_1-1+dataLen))
} else if dataLen <= 0xff {
script = append(script, txscript.OP_PUSHDATA1, byte(dataLen))
} else if dataLen <= 0xffff {
buf := make([]byte, 2)
binary.LittleEndian.PutUint16(buf, uint16(dataLen))
script = append(script, txscript.OP_PUSHDATA2)
script = append(script, buf...)
} else {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, uint32(dataLen))
script = append(script, txscript.OP_PUSHDATA4)
script = append(script, buf...)
}
// Append the actual data.
script = append(script, data...)
return script
}
// AddData pushes the passed data to the end of the script. It automatically
// chooses canonical opcodes depending on the length of the data. A zero length
// buffer will lead to a push of empty data onto the stack (OP_0) and any push
// of data greater than MaxScriptElementSize will not modify the script since
// that is not allowed by the script engine. Also, the script will not be
// modified if pushing the data would cause the script to exceed the maximum
// allowed script engine size.
func (b *PushScriptBuilder) AddData(data []byte) *PushScriptBuilder {
if b.err != nil {
return b
}
// Pushes that would cause the script to exceed the largest allowed
// script size would result in a non-canonical script.
dataSize := canonicalDataSize(data)
if len(b.script)+dataSize > txscript.MaxScriptSize {
str := fmt.Sprintf("adding %d bytes of data would exceed the "+
"maximum allowed canonical script length of %d",
dataSize, txscript.MaxScriptSize)
b.err = txscript.ErrScriptNotCanonical(str)
return b
}
// Pushes larger than the max script element size would result in a
// script that is not canonical.
dataLen := len(data)
if dataLen > txscript.MaxScriptElementSize {
str := fmt.Sprintf("adding a data element of %d bytes would "+
"exceed the maximum allowed script element size of %d",
dataLen, txscript.MaxScriptElementSize)
b.err = txscript.ErrScriptNotCanonical(str)
return b
}
b.script = append(b.script, pushDataToBytes(data)...)
return b
}
// AddFullData should not typically be used by ordinary users as it does not
// include the checks which prevent data pushes larger than the maximum allowed
// sizes which leads to scripts that can't be executed. This is provided for
// testing purposes such as regression tests where sizes are intentionally made
// larger than allowed.
//
// Use AddData instead.
func (b *PushScriptBuilder) AddFullData(data []byte) *PushScriptBuilder {
if b.err != nil {
return b
}
b.script = append(b.script, pushDataToBytes(data)...)
return b
}
// AddOp pushes the passed opcode to the end of the script. The script will not
// be modified if pushing the opcode would cause the script to exceed the
// maximum allowed script engine size.
func (b *PushScriptBuilder) AddOp(opcode byte) *PushScriptBuilder {
if b.err != nil {
return b
}
// Pushes that would cause the script to exceed the largest allowed
// script size would result in a non-canonical script.
if len(b.script)+1 > txscript.MaxScriptSize {
str := fmt.Sprintf("adding an opcode would exceed the maximum "+
"allowed canonical script length of %d", txscript.MaxScriptSize)
b.err = txscript.ErrScriptNotCanonical(str)
return b
}
b.script = append(b.script, opcode)
return b
}
// AddOps pushes the passed opcodes to the end of the script. The script will
// not be modified if pushing the opcodes would cause the script to exceed the
// maximum allowed script engine size.
func (b *PushScriptBuilder) AddOps(opcodes []byte) *PushScriptBuilder {
if b.err != nil {
return b
}
// Pushes that would cause the script to exceed the largest allowed
// script size would result in a non-canonical script.
if len(b.script)+len(opcodes) > txscript.MaxScriptSize {
str := fmt.Sprintf("adding opcodes would exceed the maximum "+
"allowed canonical script length of %d", txscript.MaxScriptSize)
b.err = txscript.ErrScriptNotCanonical(str)
return b
}
b.script = append(b.script, opcodes...)
return b
}
// Script returns the currently built script. When any errors occurred while
// building the script, the script will be returned up the point of the first
// error along with the error.
func (b *PushScriptBuilder) Script() ([]byte, error) {
return b.script, b.err
}

View File

@@ -1,81 +0,0 @@
package ordinals
// Tags represent data fields in a runestone. Unrecognized odd tags are ignored. Unrecognized even tags produce a cenotaph.
type Tag uint8
var (
TagBody = Tag(0)
TagPointer = Tag(2)
// TagUnbound is unrecognized
TagUnbound = Tag(66)
TagContentType = Tag(1)
TagParent = Tag(3)
TagMetadata = Tag(5)
TagMetaprotocol = Tag(7)
TagContentEncoding = Tag(9)
TagDelegate = Tag(11)
// TagNop is unrecognized
TagNop = Tag(255)
)
var allTags = map[Tag]struct{}{
TagPointer: {},
TagContentType: {},
TagParent: {},
TagMetadata: {},
TagMetaprotocol: {},
TagContentEncoding: {},
TagDelegate: {},
}
func (t Tag) IsValid() bool {
_, ok := allTags[t]
return ok
}
var chunkedTags = map[Tag]struct{}{
TagMetadata: {},
}
func (t Tag) IsChunked() bool {
_, ok := chunkedTags[t]
return ok
}
func (t Tag) Bytes() []byte {
if t == TagBody {
return []byte{} // body tag is empty data push
}
return []byte{byte(t)}
}
func ParseTag(input interface{}) (Tag, error) {
switch input := input.(type) {
case Tag:
return input, nil
case int:
return Tag(input), nil
case int8:
return Tag(input), nil
case int16:
return Tag(input), nil
case int32:
return Tag(input), nil
case int64:
return Tag(input), nil
case uint:
return Tag(input), nil
case uint8:
return Tag(input), nil
case uint16:
return Tag(input), nil
case uint32:
return Tag(input), nil
case uint64:
return Tag(input), nil
default:
panic("invalid tag input type")
}
}

View File

@@ -1,542 +0,0 @@
package postgres
import (
"context"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/gaze-network/indexer-network/modules/brc20/internal/repository/postgres/gen"
"github.com/jackc/pgx/v5"
"github.com/samber/lo"
)
var _ datagateway.BRC20DataGateway = (*Repository)(nil)
// warning: GetLatestBlock currently returns a types.BlockHeader with only Height and Hash fields populated.
// This is because it is known that all usage of this function only requires these fields. In the future, we may want to populate all fields for type safety.
func (r *Repository) GetLatestBlock(ctx context.Context) (types.BlockHeader, error) {
block, err := r.queries.GetLatestIndexedBlock(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return types.BlockHeader{}, errors.WithStack(errs.NotFound)
}
return types.BlockHeader{}, errors.Wrap(err, "error during query")
}
hash, err := chainhash.NewHashFromStr(block.Hash)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to parse block hash")
}
return types.BlockHeader{
Height: int64(block.Height),
Hash: *hash,
}, nil
}
// GetIndexedBlockByHeight implements datagateway.BRC20DataGateway.
func (r *Repository) GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error) {
model, err := r.queries.GetIndexedBlockByHeight(ctx, int32(height))
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, errors.WithStack(errs.NotFound)
}
return nil, errors.Wrap(err, "error during query")
}
indexedBlock, err := mapIndexedBlockModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse indexed block model")
}
return &indexedBlock, nil
}
func (r *Repository) GetProcessorStats(ctx context.Context) (*entity.ProcessorStats, error) {
model, err := r.queries.GetLatestProcessorStats(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, errors.WithStack(errs.NotFound)
}
return nil, errors.WithStack(err)
}
stats := mapProcessorStatsModelToType(model)
return &stats, nil
}
func (r *Repository) GetInscriptionTransfersInOutPoints(ctx context.Context, outPoints []wire.OutPoint) (map[ordinals.SatPoint][]*entity.InscriptionTransfer, error) {
txHashArr := lo.Map(outPoints, func(outPoint wire.OutPoint, _ int) string {
return outPoint.Hash.String()
})
txOutIdxArr := lo.Map(outPoints, func(outPoint wire.OutPoint, _ int) int32 {
return int32(outPoint.Index)
})
models, err := r.queries.GetInscriptionTransfersInOutPoints(ctx, gen.GetInscriptionTransfersInOutPointsParams{
TxHashArr: txHashArr,
TxOutIdxArr: txOutIdxArr,
})
if err != nil {
return nil, errors.WithStack(err)
}
results := make(map[ordinals.SatPoint][]*entity.InscriptionTransfer)
for _, model := range models {
inscriptionTransfer, err := mapInscriptionTransferModelToType(model)
if err != nil {
return nil, errors.WithStack(err)
}
results[inscriptionTransfer.NewSatPoint] = append(results[inscriptionTransfer.NewSatPoint], &inscriptionTransfer)
}
return results, nil
}
func (r *Repository) GetInscriptionEntriesByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*ordinals.InscriptionEntry, error) {
idStrs := lo.Map(ids, func(id ordinals.InscriptionId, _ int) string { return id.String() })
models, err := r.queries.GetInscriptionEntriesByIds(ctx, idStrs)
if err != nil {
return nil, errors.WithStack(err)
}
result := make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
for _, model := range models {
inscriptionEntry, err := mapInscriptionEntryModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse inscription entry model")
}
result[inscriptionEntry.Id] = &inscriptionEntry
}
return result, nil
}
func (r *Repository) GetInscriptionNumbersByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]int64, error) {
idStrs := lo.Map(ids, func(id ordinals.InscriptionId, _ int) string { return id.String() })
models, err := r.queries.GetInscriptionNumbersByIds(ctx, idStrs)
if err != nil {
return nil, errors.WithStack(err)
}
result := make(map[ordinals.InscriptionId]int64)
for _, model := range models {
inscriptionId, err := ordinals.NewInscriptionIdFromString(model.Id)
if err != nil {
return nil, errors.Wrap(err, "failed to parse inscription id")
}
result[inscriptionId] = model.Number
}
return result, nil
}
func (r *Repository) GetInscriptionParentsByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]ordinals.InscriptionId, error) {
idStrs := lo.Map(ids, func(id ordinals.InscriptionId, _ int) string { return id.String() })
models, err := r.queries.GetInscriptionParentsByIds(ctx, idStrs)
if err != nil {
return nil, errors.WithStack(err)
}
result := make(map[ordinals.InscriptionId]ordinals.InscriptionId)
for _, model := range models {
if len(model.Parents) == 0 {
// no parent
continue
}
if len(model.Parents) > 1 {
// sanity check, should not happen since 0.14 ord supports only 1 parent
continue
}
inscriptionId, err := ordinals.NewInscriptionIdFromString(model.Id)
if err != nil {
return nil, errors.Wrap(err, "failed to parse inscription id")
}
parentId, err := ordinals.NewInscriptionIdFromString(model.Parents[0])
if err != nil {
return nil, errors.Wrap(err, "failed to parse parent id")
}
result[inscriptionId] = parentId
}
return result, nil
}
func (r *Repository) GetLatestEventId(ctx context.Context) (int64, error) {
row, err := r.queries.GetLatestEventIds(ctx)
if err != nil {
return 0, errors.WithStack(err)
}
return max(row.EventDeployID.(int64), row.EventMintID.(int64), row.EventInscribeTransferID.(int64), row.EventTransferTransferID.(int64)), nil
}
func (r *Repository) GetBalancesBatchAtHeight(ctx context.Context, blockHeight uint64, queries []datagateway.GetBalancesBatchAtHeightQuery) (map[string]map[string]*entity.Balance, error) {
pkScripts := make([]string, 0)
ticks := make([]string, 0)
for _, query := range queries {
pkScripts = append(pkScripts, query.PkScriptHex)
ticks = append(ticks, query.Tick)
}
models, err := r.queries.GetBalancesBatchAtHeight(ctx, gen.GetBalancesBatchAtHeightParams{
PkscriptArr: pkScripts,
TickArr: ticks,
BlockHeight: int32(blockHeight),
})
if err != nil {
return nil, errors.WithStack(err)
}
result := make(map[string]map[string]*entity.Balance)
for _, model := range models {
balance, err := mapBalanceModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
if _, ok := result[model.Pkscript]; !ok {
result[model.Pkscript] = make(map[string]*entity.Balance)
}
result[model.Pkscript][model.Tick] = &balance
}
return result, nil
}
func (r *Repository) GetEventInscribeTransfersByInscriptionIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*entity.EventInscribeTransfer, error) {
idStrs := lo.Map(ids, func(id ordinals.InscriptionId, _ int) string { return id.String() })
models, err := r.queries.GetEventInscribeTransfersByInscriptionIds(ctx, idStrs)
if err != nil {
return nil, errors.WithStack(err)
}
result := make(map[ordinals.InscriptionId]*entity.EventInscribeTransfer)
for _, model := range models {
event, err := mapEventInscribeTransferModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse event inscribe transfer model")
}
result[event.InscriptionId] = &event
}
return result, nil
}
func (r *Repository) GetTickEntriesByTicks(ctx context.Context, ticks []string) (map[string]*entity.TickEntry, error) {
models, err := r.queries.GetTickEntriesByTicks(ctx, ticks)
if err != nil {
return nil, errors.WithStack(err)
}
result := make(map[string]*entity.TickEntry)
for _, model := range models {
tickEntry, err := mapTickEntryModelToType(model)
if err != nil {
return nil, errors.Wrap(err, "failed to parse tick entry model")
}
result[tickEntry.Tick] = &tickEntry
}
return result, nil
}
func (r *Repository) CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error {
params := mapIndexedBlockTypeToParams(*block)
if err := r.queries.CreateIndexedBlock(ctx, params); err != nil {
return errors.WithStack(err)
}
return nil
}
func (r *Repository) CreateProcessorStats(ctx context.Context, stats *entity.ProcessorStats) error {
params := mapProcessorStatsTypeToParams(*stats)
if err := r.queries.CreateProcessorStats(ctx, params); err != nil {
return errors.WithStack(err)
}
return nil
}
func (r *Repository) CreateTickEntries(ctx context.Context, blockHeight uint64, entries []*entity.TickEntry) error {
entryParams := make([]gen.CreateTickEntriesParams, 0)
for _, entry := range entries {
params, _, err := mapTickEntryTypeToParams(*entry, blockHeight)
if err != nil {
return errors.Wrap(err, "cannot map tick entry to create params")
}
entryParams = append(entryParams, params)
}
results := r.queries.CreateTickEntries(ctx, entryParams)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateTickEntryStates(ctx context.Context, blockHeight uint64, entryStates []*entity.TickEntry) error {
entryParams := make([]gen.CreateTickEntryStatesParams, 0)
for _, entry := range entryStates {
_, params, err := mapTickEntryTypeToParams(*entry, blockHeight)
if err != nil {
return errors.Wrap(err, "cannot map tick entry to create params")
}
entryParams = append(entryParams, params)
}
results := r.queries.CreateTickEntryStates(ctx, entryParams)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateInscriptionEntries(ctx context.Context, blockHeight uint64, entries []*ordinals.InscriptionEntry) error {
inscriptionEntryParams := make([]gen.CreateInscriptionEntriesParams, 0)
for _, entry := range entries {
params, _, err := mapInscriptionEntryTypeToParams(*entry, blockHeight)
if err != nil {
return errors.Wrap(err, "cannot map inscription entry to create params")
}
inscriptionEntryParams = append(inscriptionEntryParams, params)
}
results := r.queries.CreateInscriptionEntries(ctx, inscriptionEntryParams)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateInscriptionEntryStates(ctx context.Context, blockHeight uint64, entryStates []*ordinals.InscriptionEntry) error {
inscriptionEntryStatesParams := make([]gen.CreateInscriptionEntryStatesParams, 0)
for _, entry := range entryStates {
_, params, err := mapInscriptionEntryTypeToParams(*entry, blockHeight)
if err != nil {
return errors.Wrap(err, "cannot map inscription entry to create params")
}
inscriptionEntryStatesParams = append(inscriptionEntryStatesParams, params)
}
results := r.queries.CreateInscriptionEntryStates(ctx, inscriptionEntryStatesParams)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateInscriptionTransfers(ctx context.Context, transfers []*entity.InscriptionTransfer) error {
params := lo.Map(transfers, func(transfer *entity.InscriptionTransfer, _ int) gen.CreateInscriptionTransfersParams {
return mapInscriptionTransferTypeToParams(*transfer)
})
results := r.queries.CreateInscriptionTransfers(ctx, params)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateEventDeploys(ctx context.Context, events []*entity.EventDeploy) error {
params := make([]gen.CreateEventDeploysParams, 0)
for _, event := range events {
param, err := mapEventDeployTypeToParams(*event)
if err != nil {
return errors.Wrap(err, "cannot map event deploy to create params")
}
params = append(params, param)
}
results := r.queries.CreateEventDeploys(ctx, params)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateEventMints(ctx context.Context, events []*entity.EventMint) error {
params := make([]gen.CreateEventMintsParams, 0)
for _, event := range events {
param, err := mapEventMintTypeToParams(*event)
if err != nil {
return errors.Wrap(err, "cannot map event mint to create params")
}
params = append(params, param)
}
results := r.queries.CreateEventMints(ctx, params)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateEventInscribeTransfers(ctx context.Context, events []*entity.EventInscribeTransfer) error {
params := make([]gen.CreateEventInscribeTransfersParams, 0)
for _, event := range events {
param, err := mapEventInscribeTransferTypeToParams(*event)
if err != nil {
return errors.Wrap(err, "cannot map event transfer to create params")
}
params = append(params, param)
}
results := r.queries.CreateEventInscribeTransfers(ctx, params)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateEventTransferTransfers(ctx context.Context, events []*entity.EventTransferTransfer) error {
params := make([]gen.CreateEventTransferTransfersParams, 0)
for _, event := range events {
param, err := mapEventTransferTransferTypeToParams(*event)
if err != nil {
return errors.Wrap(err, "cannot map event transfer to create params")
}
params = append(params, param)
}
results := r.queries.CreateEventTransferTransfers(ctx, params)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateBalances(ctx context.Context, balances []*entity.Balance) error {
params := lo.Map(balances, func(balance *entity.Balance, _ int) gen.CreateBalancesParams {
return mapBalanceTypeToParams(*balance)
})
results := r.queries.CreateBalances(ctx, params)
var execErrors []error
results.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) DeleteIndexedBlocksSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteIndexedBlocksSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteProcessorStatsSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteProcessorStatsSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteTickEntriesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteTickEntriesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteTickEntryStatesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteTickEntryStatesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteEventDeploysSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteEventDeploysSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteEventMintsSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteEventMintsSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteEventInscribeTransfersSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteEventInscribeTransfersSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteEventTransferTransfersSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteEventTransferTransfersSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteBalancesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteBalancesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteInscriptionEntriesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteInscriptionEntriesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteInscriptionEntryStatesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteInscriptionEntryStatesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteInscriptionTransfersSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteInscriptionTransfersSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}

View File

@@ -1,684 +0,0 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: batch.go
package gen
import (
"context"
"errors"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgtype"
)
var (
ErrBatchAlreadyClosed = errors.New("batch already closed")
)
const createBalances = `-- name: CreateBalances :batchexec
INSERT INTO "brc20_balances" ("pkscript", "block_height", "tick", "overall_balance", "available_balance") VALUES ($1, $2, $3, $4, $5)
`
type CreateBalancesBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateBalancesParams struct {
Pkscript string
BlockHeight int32
Tick string
OverallBalance pgtype.Numeric
AvailableBalance pgtype.Numeric
}
func (q *Queries) CreateBalances(ctx context.Context, arg []CreateBalancesParams) *CreateBalancesBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Pkscript,
a.BlockHeight,
a.Tick,
a.OverallBalance,
a.AvailableBalance,
}
batch.Queue(createBalances, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateBalancesBatchResults{br, len(arg), false}
}
func (b *CreateBalancesBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateBalancesBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createEventDeploys = `-- name: CreateEventDeploys :batchexec
INSERT INTO "brc20_event_deploys" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "total_supply", "decimals", "limit_per_mint", "is_self_mint") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
`
type CreateEventDeploysBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateEventDeploysParams struct {
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
Pkscript string
Satpoint string
TotalSupply pgtype.Numeric
Decimals int16
LimitPerMint pgtype.Numeric
IsSelfMint bool
}
func (q *Queries) CreateEventDeploys(ctx context.Context, arg []CreateEventDeploysParams) *CreateEventDeploysBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.InscriptionID,
a.InscriptionNumber,
a.Tick,
a.OriginalTick,
a.TxHash,
a.BlockHeight,
a.TxIndex,
a.Timestamp,
a.Pkscript,
a.Satpoint,
a.TotalSupply,
a.Decimals,
a.LimitPerMint,
a.IsSelfMint,
}
batch.Queue(createEventDeploys, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateEventDeploysBatchResults{br, len(arg), false}
}
func (b *CreateEventDeploysBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateEventDeploysBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createEventInscribeTransfers = `-- name: CreateEventInscribeTransfers :batchexec
INSERT INTO "brc20_event_inscribe_transfers" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "output_index", "sats_amount", "amount") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
`
type CreateEventInscribeTransfersBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateEventInscribeTransfersParams struct {
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
Pkscript string
Satpoint string
OutputIndex int32
SatsAmount int64
Amount pgtype.Numeric
}
func (q *Queries) CreateEventInscribeTransfers(ctx context.Context, arg []CreateEventInscribeTransfersParams) *CreateEventInscribeTransfersBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.InscriptionID,
a.InscriptionNumber,
a.Tick,
a.OriginalTick,
a.TxHash,
a.BlockHeight,
a.TxIndex,
a.Timestamp,
a.Pkscript,
a.Satpoint,
a.OutputIndex,
a.SatsAmount,
a.Amount,
}
batch.Queue(createEventInscribeTransfers, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateEventInscribeTransfersBatchResults{br, len(arg), false}
}
func (b *CreateEventInscribeTransfersBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateEventInscribeTransfersBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createEventMints = `-- name: CreateEventMints :batchexec
INSERT INTO "brc20_event_mints" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "pkscript", "satpoint", "amount", "parent_id") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
`
type CreateEventMintsBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateEventMintsParams struct {
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
Pkscript string
Satpoint string
Amount pgtype.Numeric
ParentID pgtype.Text
}
func (q *Queries) CreateEventMints(ctx context.Context, arg []CreateEventMintsParams) *CreateEventMintsBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.InscriptionID,
a.InscriptionNumber,
a.Tick,
a.OriginalTick,
a.TxHash,
a.BlockHeight,
a.TxIndex,
a.Timestamp,
a.Pkscript,
a.Satpoint,
a.Amount,
a.ParentID,
}
batch.Queue(createEventMints, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateEventMintsBatchResults{br, len(arg), false}
}
func (b *CreateEventMintsBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateEventMintsBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createEventTransferTransfers = `-- name: CreateEventTransferTransfers :batchexec
INSERT INTO "brc20_event_transfer_transfers" ("inscription_id", "inscription_number", "tick", "original_tick", "tx_hash", "block_height", "tx_index", "timestamp", "from_pkscript", "from_satpoint", "from_input_index", "to_pkscript", "to_satpoint", "to_output_index", "spent_as_fee", "amount") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
`
type CreateEventTransferTransfersBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateEventTransferTransfersParams struct {
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
FromPkscript string
FromSatpoint string
FromInputIndex int32
ToPkscript string
ToSatpoint string
ToOutputIndex int32
SpentAsFee bool
Amount pgtype.Numeric
}
func (q *Queries) CreateEventTransferTransfers(ctx context.Context, arg []CreateEventTransferTransfersParams) *CreateEventTransferTransfersBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.InscriptionID,
a.InscriptionNumber,
a.Tick,
a.OriginalTick,
a.TxHash,
a.BlockHeight,
a.TxIndex,
a.Timestamp,
a.FromPkscript,
a.FromSatpoint,
a.FromInputIndex,
a.ToPkscript,
a.ToSatpoint,
a.ToOutputIndex,
a.SpentAsFee,
a.Amount,
}
batch.Queue(createEventTransferTransfers, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateEventTransferTransfersBatchResults{br, len(arg), false}
}
func (b *CreateEventTransferTransfersBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateEventTransferTransfersBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createInscriptionEntries = `-- name: CreateInscriptionEntries :batchexec
INSERT INTO "brc20_inscription_entries" ("id", "number", "sequence_number", "delegate", "metadata", "metaprotocol", "parents", "pointer", "content", "content_encoding", "content_type", "cursed", "cursed_for_brc20", "created_at", "created_at_height") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
`
type CreateInscriptionEntriesBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateInscriptionEntriesParams struct {
Id string
Number int64
SequenceNumber int64
Delegate pgtype.Text
Metadata []byte
Metaprotocol pgtype.Text
Parents []string
Pointer pgtype.Int8
Content []byte
ContentEncoding pgtype.Text
ContentType pgtype.Text
Cursed bool
CursedForBrc20 bool
CreatedAt pgtype.Timestamp
CreatedAtHeight int32
}
func (q *Queries) CreateInscriptionEntries(ctx context.Context, arg []CreateInscriptionEntriesParams) *CreateInscriptionEntriesBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Id,
a.Number,
a.SequenceNumber,
a.Delegate,
a.Metadata,
a.Metaprotocol,
a.Parents,
a.Pointer,
a.Content,
a.ContentEncoding,
a.ContentType,
a.Cursed,
a.CursedForBrc20,
a.CreatedAt,
a.CreatedAtHeight,
}
batch.Queue(createInscriptionEntries, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateInscriptionEntriesBatchResults{br, len(arg), false}
}
func (b *CreateInscriptionEntriesBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateInscriptionEntriesBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createInscriptionEntryStates = `-- name: CreateInscriptionEntryStates :batchexec
INSERT INTO "brc20_inscription_entry_states" ("id", "block_height", "transfer_count") VALUES ($1, $2, $3)
`
type CreateInscriptionEntryStatesBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateInscriptionEntryStatesParams struct {
Id string
BlockHeight int32
TransferCount int32
}
func (q *Queries) CreateInscriptionEntryStates(ctx context.Context, arg []CreateInscriptionEntryStatesParams) *CreateInscriptionEntryStatesBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Id,
a.BlockHeight,
a.TransferCount,
}
batch.Queue(createInscriptionEntryStates, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateInscriptionEntryStatesBatchResults{br, len(arg), false}
}
func (b *CreateInscriptionEntryStatesBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateInscriptionEntryStatesBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createInscriptionTransfers = `-- name: CreateInscriptionTransfers :batchexec
INSERT INTO "brc20_inscription_transfers" ("inscription_id", "block_height", "tx_index", "tx_hash", "from_input_index", "old_satpoint_tx_hash", "old_satpoint_out_idx", "old_satpoint_offset", "new_satpoint_tx_hash", "new_satpoint_out_idx", "new_satpoint_offset", "new_pkscript", "new_output_value", "sent_as_fee", "transfer_count") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
`
type CreateInscriptionTransfersBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateInscriptionTransfersParams struct {
InscriptionID string
BlockHeight int32
TxIndex int32
TxHash string
FromInputIndex int32
OldSatpointTxHash pgtype.Text
OldSatpointOutIdx pgtype.Int4
OldSatpointOffset pgtype.Int8
NewSatpointTxHash pgtype.Text
NewSatpointOutIdx pgtype.Int4
NewSatpointOffset pgtype.Int8
NewPkscript string
NewOutputValue int64
SentAsFee bool
TransferCount int32
}
func (q *Queries) CreateInscriptionTransfers(ctx context.Context, arg []CreateInscriptionTransfersParams) *CreateInscriptionTransfersBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.InscriptionID,
a.BlockHeight,
a.TxIndex,
a.TxHash,
a.FromInputIndex,
a.OldSatpointTxHash,
a.OldSatpointOutIdx,
a.OldSatpointOffset,
a.NewSatpointTxHash,
a.NewSatpointOutIdx,
a.NewSatpointOffset,
a.NewPkscript,
a.NewOutputValue,
a.SentAsFee,
a.TransferCount,
}
batch.Queue(createInscriptionTransfers, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateInscriptionTransfersBatchResults{br, len(arg), false}
}
func (b *CreateInscriptionTransfersBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateInscriptionTransfersBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createTickEntries = `-- name: CreateTickEntries :batchexec
INSERT INTO "brc20_tick_entries" ("tick", "original_tick", "total_supply", "decimals", "limit_per_mint", "is_self_mint", "deploy_inscription_id", "deployed_at", "deployed_at_height") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
`
type CreateTickEntriesBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateTickEntriesParams struct {
Tick string
OriginalTick string
TotalSupply pgtype.Numeric
Decimals int16
LimitPerMint pgtype.Numeric
IsSelfMint bool
DeployInscriptionID string
DeployedAt pgtype.Timestamp
DeployedAtHeight int32
}
func (q *Queries) CreateTickEntries(ctx context.Context, arg []CreateTickEntriesParams) *CreateTickEntriesBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Tick,
a.OriginalTick,
a.TotalSupply,
a.Decimals,
a.LimitPerMint,
a.IsSelfMint,
a.DeployInscriptionID,
a.DeployedAt,
a.DeployedAtHeight,
}
batch.Queue(createTickEntries, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateTickEntriesBatchResults{br, len(arg), false}
}
func (b *CreateTickEntriesBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateTickEntriesBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createTickEntryStates = `-- name: CreateTickEntryStates :batchexec
INSERT INTO "brc20_tick_entry_states" ("tick", "block_height", "minted_amount", "burned_amount", "completed_at", "completed_at_height") VALUES ($1, $2, $3, $4, $5, $6)
`
type CreateTickEntryStatesBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateTickEntryStatesParams struct {
Tick string
BlockHeight int32
MintedAmount pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}
func (q *Queries) CreateTickEntryStates(ctx context.Context, arg []CreateTickEntryStatesParams) *CreateTickEntryStatesBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Tick,
a.BlockHeight,
a.MintedAmount,
a.BurnedAmount,
a.CompletedAt,
a.CompletedAtHeight,
}
batch.Queue(createTickEntryStates, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateTickEntryStatesBatchResults{br, len(arg), false}
}
func (b *CreateTickEntryStatesBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateTickEntryStatesBatchResults) Close() error {
b.closed = true
return b.br.Close()
}

View File

@@ -1,593 +0,0 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: data.sql
package gen
import (
"context"
"github.com/jackc/pgx/v5/pgtype"
)
const createIndexedBlock = `-- name: CreateIndexedBlock :exec
INSERT INTO "brc20_indexed_blocks" ("height", "hash", "event_hash", "cumulative_event_hash") VALUES ($1, $2, $3, $4)
`
type CreateIndexedBlockParams struct {
Height int32
Hash string
EventHash string
CumulativeEventHash string
}
func (q *Queries) CreateIndexedBlock(ctx context.Context, arg CreateIndexedBlockParams) error {
_, err := q.db.Exec(ctx, createIndexedBlock,
arg.Height,
arg.Hash,
arg.EventHash,
arg.CumulativeEventHash,
)
return err
}
const createProcessorStats = `-- name: CreateProcessorStats :exec
INSERT INTO "brc20_processor_stats" ("block_height", "cursed_inscription_count", "blessed_inscription_count", "lost_sats") VALUES ($1, $2, $3, $4)
`
type CreateProcessorStatsParams struct {
BlockHeight int32
CursedInscriptionCount int32
BlessedInscriptionCount int32
LostSats int64
}
func (q *Queries) CreateProcessorStats(ctx context.Context, arg CreateProcessorStatsParams) error {
_, err := q.db.Exec(ctx, createProcessorStats,
arg.BlockHeight,
arg.CursedInscriptionCount,
arg.BlessedInscriptionCount,
arg.LostSats,
)
return err
}
const deleteBalancesSinceHeight = `-- name: DeleteBalancesSinceHeight :exec
DELETE FROM "brc20_balances" WHERE "block_height" >= $1
`
func (q *Queries) DeleteBalancesSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteBalancesSinceHeight, blockHeight)
return err
}
const deleteEventDeploysSinceHeight = `-- name: DeleteEventDeploysSinceHeight :exec
DELETE FROM "brc20_event_deploys" WHERE "block_height" >= $1
`
func (q *Queries) DeleteEventDeploysSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteEventDeploysSinceHeight, blockHeight)
return err
}
const deleteEventInscribeTransfersSinceHeight = `-- name: DeleteEventInscribeTransfersSinceHeight :exec
DELETE FROM "brc20_event_inscribe_transfers" WHERE "block_height" >= $1
`
func (q *Queries) DeleteEventInscribeTransfersSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteEventInscribeTransfersSinceHeight, blockHeight)
return err
}
const deleteEventMintsSinceHeight = `-- name: DeleteEventMintsSinceHeight :exec
DELETE FROM "brc20_event_mints" WHERE "block_height" >= $1
`
func (q *Queries) DeleteEventMintsSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteEventMintsSinceHeight, blockHeight)
return err
}
const deleteEventTransferTransfersSinceHeight = `-- name: DeleteEventTransferTransfersSinceHeight :exec
DELETE FROM "brc20_event_transfer_transfers" WHERE "block_height" >= $1
`
func (q *Queries) DeleteEventTransferTransfersSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteEventTransferTransfersSinceHeight, blockHeight)
return err
}
const deleteIndexedBlocksSinceHeight = `-- name: DeleteIndexedBlocksSinceHeight :exec
DELETE FROM "brc20_indexed_blocks" WHERE "height" >= $1
`
func (q *Queries) DeleteIndexedBlocksSinceHeight(ctx context.Context, height int32) error {
_, err := q.db.Exec(ctx, deleteIndexedBlocksSinceHeight, height)
return err
}
const deleteInscriptionEntriesSinceHeight = `-- name: DeleteInscriptionEntriesSinceHeight :exec
DELETE FROM "brc20_inscription_entries" WHERE "created_at_height" >= $1
`
func (q *Queries) DeleteInscriptionEntriesSinceHeight(ctx context.Context, createdAtHeight int32) error {
_, err := q.db.Exec(ctx, deleteInscriptionEntriesSinceHeight, createdAtHeight)
return err
}
const deleteInscriptionEntryStatesSinceHeight = `-- name: DeleteInscriptionEntryStatesSinceHeight :exec
DELETE FROM "brc20_inscription_entry_states" WHERE "block_height" >= $1
`
func (q *Queries) DeleteInscriptionEntryStatesSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteInscriptionEntryStatesSinceHeight, blockHeight)
return err
}
const deleteInscriptionTransfersSinceHeight = `-- name: DeleteInscriptionTransfersSinceHeight :exec
DELETE FROM "brc20_inscription_transfers" WHERE "block_height" >= $1
`
func (q *Queries) DeleteInscriptionTransfersSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteInscriptionTransfersSinceHeight, blockHeight)
return err
}
const deleteProcessorStatsSinceHeight = `-- name: DeleteProcessorStatsSinceHeight :exec
DELETE FROM "brc20_processor_stats" WHERE "block_height" >= $1
`
func (q *Queries) DeleteProcessorStatsSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteProcessorStatsSinceHeight, blockHeight)
return err
}
const deleteTickEntriesSinceHeight = `-- name: DeleteTickEntriesSinceHeight :exec
DELETE FROM "brc20_tick_entries" WHERE "deployed_at_height" >= $1
`
func (q *Queries) DeleteTickEntriesSinceHeight(ctx context.Context, deployedAtHeight int32) error {
_, err := q.db.Exec(ctx, deleteTickEntriesSinceHeight, deployedAtHeight)
return err
}
const deleteTickEntryStatesSinceHeight = `-- name: DeleteTickEntryStatesSinceHeight :exec
DELETE FROM "brc20_tick_entry_states" WHERE "block_height" >= $1
`
func (q *Queries) DeleteTickEntryStatesSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteTickEntryStatesSinceHeight, blockHeight)
return err
}
const getBalancesBatchAtHeight = `-- name: GetBalancesBatchAtHeight :many
SELECT DISTINCT ON ("brc20_balances"."pkscript", "brc20_balances"."tick") brc20_balances.pkscript, brc20_balances.block_height, brc20_balances.tick, brc20_balances.overall_balance, brc20_balances.available_balance FROM "brc20_balances"
INNER JOIN (
SELECT
unnest($1::text[]) AS "pkscript",
unnest($2::text[]) AS "tick"
) "queries" ON "brc20_balances"."pkscript" = "queries"."pkscript" AND "brc20_balances"."tick" = "queries"."tick" AND "brc20_balances"."block_height" <= $3
ORDER BY "brc20_balances"."pkscript", "brc20_balances"."tick", "block_height" DESC
`
type GetBalancesBatchAtHeightParams struct {
PkscriptArr []string
TickArr []string
BlockHeight int32
}
func (q *Queries) GetBalancesBatchAtHeight(ctx context.Context, arg GetBalancesBatchAtHeightParams) ([]Brc20Balance, error) {
rows, err := q.db.Query(ctx, getBalancesBatchAtHeight, arg.PkscriptArr, arg.TickArr, arg.BlockHeight)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Brc20Balance
for rows.Next() {
var i Brc20Balance
if err := rows.Scan(
&i.Pkscript,
&i.BlockHeight,
&i.Tick,
&i.OverallBalance,
&i.AvailableBalance,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getEventInscribeTransfersByInscriptionIds = `-- name: GetEventInscribeTransfersByInscriptionIds :many
SELECT id, inscription_id, inscription_number, tick, original_tick, tx_hash, block_height, tx_index, timestamp, pkscript, satpoint, output_index, sats_amount, amount FROM "brc20_event_inscribe_transfers" WHERE "inscription_id" = ANY($1::text[])
`
func (q *Queries) GetEventInscribeTransfersByInscriptionIds(ctx context.Context, inscriptionIds []string) ([]Brc20EventInscribeTransfer, error) {
rows, err := q.db.Query(ctx, getEventInscribeTransfersByInscriptionIds, inscriptionIds)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Brc20EventInscribeTransfer
for rows.Next() {
var i Brc20EventInscribeTransfer
if err := rows.Scan(
&i.Id,
&i.InscriptionID,
&i.InscriptionNumber,
&i.Tick,
&i.OriginalTick,
&i.TxHash,
&i.BlockHeight,
&i.TxIndex,
&i.Timestamp,
&i.Pkscript,
&i.Satpoint,
&i.OutputIndex,
&i.SatsAmount,
&i.Amount,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getIndexedBlockByHeight = `-- name: GetIndexedBlockByHeight :one
SELECT height, hash, event_hash, cumulative_event_hash FROM "brc20_indexed_blocks" WHERE "height" = $1
`
func (q *Queries) GetIndexedBlockByHeight(ctx context.Context, height int32) (Brc20IndexedBlock, error) {
row := q.db.QueryRow(ctx, getIndexedBlockByHeight, height)
var i Brc20IndexedBlock
err := row.Scan(
&i.Height,
&i.Hash,
&i.EventHash,
&i.CumulativeEventHash,
)
return i, err
}
const getInscriptionEntriesByIds = `-- name: GetInscriptionEntriesByIds :many
WITH "states" AS (
-- select latest state
SELECT DISTINCT ON ("id") id, block_height, transfer_count FROM "brc20_inscription_entry_states" WHERE "id" = ANY($1::text[]) ORDER BY "id", "block_height" DESC
)
SELECT brc20_inscription_entries.id, number, sequence_number, delegate, metadata, metaprotocol, parents, pointer, content, content_encoding, content_type, cursed, cursed_for_brc20, created_at, created_at_height, states.id, block_height, transfer_count FROM "brc20_inscription_entries"
LEFT JOIN "states" ON "brc20_inscription_entries"."id" = "states"."id"
WHERE "brc20_inscription_entries"."id" = ANY($1::text[])
`
type GetInscriptionEntriesByIdsRow struct {
Id string
Number int64
SequenceNumber int64
Delegate pgtype.Text
Metadata []byte
Metaprotocol pgtype.Text
Parents []string
Pointer pgtype.Int8
Content []byte
ContentEncoding pgtype.Text
ContentType pgtype.Text
Cursed bool
CursedForBrc20 bool
CreatedAt pgtype.Timestamp
CreatedAtHeight int32
Id_2 pgtype.Text
BlockHeight pgtype.Int4
TransferCount pgtype.Int4
}
func (q *Queries) GetInscriptionEntriesByIds(ctx context.Context, inscriptionIds []string) ([]GetInscriptionEntriesByIdsRow, error) {
rows, err := q.db.Query(ctx, getInscriptionEntriesByIds, inscriptionIds)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetInscriptionEntriesByIdsRow
for rows.Next() {
var i GetInscriptionEntriesByIdsRow
if err := rows.Scan(
&i.Id,
&i.Number,
&i.SequenceNumber,
&i.Delegate,
&i.Metadata,
&i.Metaprotocol,
&i.Parents,
&i.Pointer,
&i.Content,
&i.ContentEncoding,
&i.ContentType,
&i.Cursed,
&i.CursedForBrc20,
&i.CreatedAt,
&i.CreatedAtHeight,
&i.Id_2,
&i.BlockHeight,
&i.TransferCount,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getInscriptionNumbersByIds = `-- name: GetInscriptionNumbersByIds :many
SELECT id, number FROM "brc20_inscription_entries" WHERE "id" = ANY($1::text[])
`
type GetInscriptionNumbersByIdsRow struct {
Id string
Number int64
}
func (q *Queries) GetInscriptionNumbersByIds(ctx context.Context, inscriptionIds []string) ([]GetInscriptionNumbersByIdsRow, error) {
rows, err := q.db.Query(ctx, getInscriptionNumbersByIds, inscriptionIds)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetInscriptionNumbersByIdsRow
for rows.Next() {
var i GetInscriptionNumbersByIdsRow
if err := rows.Scan(&i.Id, &i.Number); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getInscriptionParentsByIds = `-- name: GetInscriptionParentsByIds :many
SELECT id, parents FROM "brc20_inscription_entries" WHERE "id" = ANY($1::text[])
`
type GetInscriptionParentsByIdsRow struct {
Id string
Parents []string
}
func (q *Queries) GetInscriptionParentsByIds(ctx context.Context, inscriptionIds []string) ([]GetInscriptionParentsByIdsRow, error) {
rows, err := q.db.Query(ctx, getInscriptionParentsByIds, inscriptionIds)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetInscriptionParentsByIdsRow
for rows.Next() {
var i GetInscriptionParentsByIdsRow
if err := rows.Scan(&i.Id, &i.Parents); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getInscriptionTransfersInOutPoints = `-- name: GetInscriptionTransfersInOutPoints :many
SELECT it.inscription_id, it.block_height, it.tx_index, it.tx_hash, it.from_input_index, it.old_satpoint_tx_hash, it.old_satpoint_out_idx, it.old_satpoint_offset, it.new_satpoint_tx_hash, it.new_satpoint_out_idx, it.new_satpoint_offset, it.new_pkscript, it.new_output_value, it.sent_as_fee, it.transfer_count, "ie"."content" FROM (
SELECT
unnest($1::text[]) AS "tx_hash",
unnest($2::int[]) AS "tx_out_idx"
) "inputs"
INNER JOIN "brc20_inscription_transfers" it ON "inputs"."tx_hash" = "it"."new_satpoint_tx_hash" AND "inputs"."tx_out_idx" = "it"."new_satpoint_out_idx"
LEFT JOIN "brc20_inscription_entries" ie ON "it"."inscription_id" = "ie"."id"
`
type GetInscriptionTransfersInOutPointsParams struct {
TxHashArr []string
TxOutIdxArr []int32
}
type GetInscriptionTransfersInOutPointsRow struct {
InscriptionID string
BlockHeight int32
TxIndex int32
TxHash string
FromInputIndex int32
OldSatpointTxHash pgtype.Text
OldSatpointOutIdx pgtype.Int4
OldSatpointOffset pgtype.Int8
NewSatpointTxHash pgtype.Text
NewSatpointOutIdx pgtype.Int4
NewSatpointOffset pgtype.Int8
NewPkscript string
NewOutputValue int64
SentAsFee bool
TransferCount int32
Content []byte
}
func (q *Queries) GetInscriptionTransfersInOutPoints(ctx context.Context, arg GetInscriptionTransfersInOutPointsParams) ([]GetInscriptionTransfersInOutPointsRow, error) {
rows, err := q.db.Query(ctx, getInscriptionTransfersInOutPoints, arg.TxHashArr, arg.TxOutIdxArr)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetInscriptionTransfersInOutPointsRow
for rows.Next() {
var i GetInscriptionTransfersInOutPointsRow
if err := rows.Scan(
&i.InscriptionID,
&i.BlockHeight,
&i.TxIndex,
&i.TxHash,
&i.FromInputIndex,
&i.OldSatpointTxHash,
&i.OldSatpointOutIdx,
&i.OldSatpointOffset,
&i.NewSatpointTxHash,
&i.NewSatpointOutIdx,
&i.NewSatpointOffset,
&i.NewPkscript,
&i.NewOutputValue,
&i.SentAsFee,
&i.TransferCount,
&i.Content,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getLatestEventIds = `-- name: GetLatestEventIds :one
WITH "latest_deploy_id" AS (
SELECT "id" FROM "brc20_event_deploys" ORDER BY "id" DESC LIMIT 1
),
"latest_mint_id" AS (
SELECT "id" FROM "brc20_event_mints" ORDER BY "id" DESC LIMIT 1
),
"latest_inscribe_transfer_id" AS (
SELECT "id" FROM "brc20_event_inscribe_transfers" ORDER BY "id" DESC LIMIT 1
),
"latest_transfer_transfer_id" AS (
SELECT "id" FROM "brc20_event_transfer_transfers" ORDER BY "id" DESC LIMIT 1
)
SELECT
COALESCE((SELECT "id" FROM "latest_deploy_id"), -1) AS "event_deploy_id",
COALESCE((SELECT "id" FROM "latest_mint_id"), -1) AS "event_mint_id",
COALESCE((SELECT "id" FROM "latest_inscribe_transfer_id"), -1) AS "event_inscribe_transfer_id",
COALESCE((SELECT "id" FROM "latest_transfer_transfer_id"), -1) AS "event_transfer_transfer_id"
`
type GetLatestEventIdsRow struct {
EventDeployID interface{}
EventMintID interface{}
EventInscribeTransferID interface{}
EventTransferTransferID interface{}
}
func (q *Queries) GetLatestEventIds(ctx context.Context) (GetLatestEventIdsRow, error) {
row := q.db.QueryRow(ctx, getLatestEventIds)
var i GetLatestEventIdsRow
err := row.Scan(
&i.EventDeployID,
&i.EventMintID,
&i.EventInscribeTransferID,
&i.EventTransferTransferID,
)
return i, err
}
const getLatestIndexedBlock = `-- name: GetLatestIndexedBlock :one
SELECT height, hash, event_hash, cumulative_event_hash FROM "brc20_indexed_blocks" ORDER BY "height" DESC LIMIT 1
`
func (q *Queries) GetLatestIndexedBlock(ctx context.Context) (Brc20IndexedBlock, error) {
row := q.db.QueryRow(ctx, getLatestIndexedBlock)
var i Brc20IndexedBlock
err := row.Scan(
&i.Height,
&i.Hash,
&i.EventHash,
&i.CumulativeEventHash,
)
return i, err
}
const getLatestProcessorStats = `-- name: GetLatestProcessorStats :one
SELECT block_height, cursed_inscription_count, blessed_inscription_count, lost_sats FROM "brc20_processor_stats" ORDER BY "block_height" DESC LIMIT 1
`
func (q *Queries) GetLatestProcessorStats(ctx context.Context) (Brc20ProcessorStat, error) {
row := q.db.QueryRow(ctx, getLatestProcessorStats)
var i Brc20ProcessorStat
err := row.Scan(
&i.BlockHeight,
&i.CursedInscriptionCount,
&i.BlessedInscriptionCount,
&i.LostSats,
)
return i, err
}
const getTickEntriesByTicks = `-- name: GetTickEntriesByTicks :many
WITH "states" AS (
-- select latest state
SELECT DISTINCT ON ("tick") tick, block_height, minted_amount, burned_amount, completed_at, completed_at_height FROM "brc20_tick_entry_states" WHERE "tick" = ANY($1::text[]) ORDER BY "tick", "block_height" DESC
)
SELECT brc20_tick_entries.tick, original_tick, total_supply, decimals, limit_per_mint, is_self_mint, deploy_inscription_id, deployed_at, deployed_at_height, states.tick, block_height, minted_amount, burned_amount, completed_at, completed_at_height FROM "brc20_tick_entries"
LEFT JOIN "states" ON "brc20_tick_entries"."tick" = "states"."tick"
WHERE "brc20_tick_entries"."tick" = ANY($1::text[])
`
type GetTickEntriesByTicksRow struct {
Tick string
OriginalTick string
TotalSupply pgtype.Numeric
Decimals int16
LimitPerMint pgtype.Numeric
IsSelfMint bool
DeployInscriptionID string
DeployedAt pgtype.Timestamp
DeployedAtHeight int32
Tick_2 pgtype.Text
BlockHeight pgtype.Int4
MintedAmount pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}
func (q *Queries) GetTickEntriesByTicks(ctx context.Context, ticks []string) ([]GetTickEntriesByTicksRow, error) {
rows, err := q.db.Query(ctx, getTickEntriesByTicks, ticks)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetTickEntriesByTicksRow
for rows.Next() {
var i GetTickEntriesByTicksRow
if err := rows.Scan(
&i.Tick,
&i.OriginalTick,
&i.TotalSupply,
&i.Decimals,
&i.LimitPerMint,
&i.IsSelfMint,
&i.DeployInscriptionID,
&i.DeployedAt,
&i.DeployedAtHeight,
&i.Tick_2,
&i.BlockHeight,
&i.MintedAmount,
&i.BurnedAmount,
&i.CompletedAt,
&i.CompletedAtHeight,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}

View File

@@ -1,33 +0,0 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
package gen
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
)
type DBTX interface {
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
QueryRow(context.Context, string, ...interface{}) pgx.Row
SendBatch(context.Context, *pgx.Batch) pgx.BatchResults
}
func New(db DBTX) *Queries {
return &Queries{db: db}
}
type Queries struct {
db DBTX
}
func (q *Queries) WithTx(tx pgx.Tx) *Queries {
return &Queries{
db: tx,
}
}

View File

@@ -1,49 +0,0 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: info.sql
package gen
import (
"context"
)
const createIndexerState = `-- name: CreateIndexerState :exec
INSERT INTO brc20_indexer_states (client_version, network, db_version, event_hash_version) VALUES ($1, $2, $3, $4)
`
type CreateIndexerStateParams struct {
ClientVersion string
Network string
DbVersion int32
EventHashVersion int32
}
func (q *Queries) CreateIndexerState(ctx context.Context, arg CreateIndexerStateParams) error {
_, err := q.db.Exec(ctx, createIndexerState,
arg.ClientVersion,
arg.Network,
arg.DbVersion,
arg.EventHashVersion,
)
return err
}
const getLatestIndexerState = `-- name: GetLatestIndexerState :one
SELECT id, client_version, network, db_version, event_hash_version, created_at FROM brc20_indexer_states ORDER BY created_at DESC LIMIT 1
`
func (q *Queries) GetLatestIndexerState(ctx context.Context) (Brc20IndexerState, error) {
row := q.db.QueryRow(ctx, getLatestIndexerState)
var i Brc20IndexerState
err := row.Scan(
&i.Id,
&i.ClientVersion,
&i.Network,
&i.DbVersion,
&i.EventHashVersion,
&i.CreatedAt,
)
return i, err
}

View File

@@ -1,174 +0,0 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
package gen
import (
"github.com/jackc/pgx/v5/pgtype"
)
type Brc20Balance struct {
Pkscript string
BlockHeight int32
Tick string
OverallBalance pgtype.Numeric
AvailableBalance pgtype.Numeric
}
type Brc20EventDeploy struct {
Id int64
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
Pkscript string
Satpoint string
TotalSupply pgtype.Numeric
Decimals int16
LimitPerMint pgtype.Numeric
IsSelfMint bool
}
type Brc20EventInscribeTransfer struct {
Id int64
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
Pkscript string
Satpoint string
OutputIndex int32
SatsAmount int64
Amount pgtype.Numeric
}
type Brc20EventMint struct {
Id int64
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
Pkscript string
Satpoint string
Amount pgtype.Numeric
ParentID pgtype.Text
}
type Brc20EventTransferTransfer struct {
Id int64
InscriptionID string
InscriptionNumber int64
Tick string
OriginalTick string
TxHash string
BlockHeight int32
TxIndex int32
Timestamp pgtype.Timestamp
FromPkscript string
FromSatpoint string
FromInputIndex int32
ToPkscript string
ToSatpoint string
ToOutputIndex int32
SpentAsFee bool
Amount pgtype.Numeric
}
type Brc20IndexedBlock struct {
Height int32
Hash string
EventHash string
CumulativeEventHash string
}
type Brc20IndexerState struct {
Id int64
ClientVersion string
Network string
DbVersion int32
EventHashVersion int32
CreatedAt pgtype.Timestamptz
}
type Brc20InscriptionEntry struct {
Id string
Number int64
SequenceNumber int64
Delegate pgtype.Text
Metadata []byte
Metaprotocol pgtype.Text
Parents []string
Pointer pgtype.Int8
Content []byte
ContentEncoding pgtype.Text
ContentType pgtype.Text
Cursed bool
CursedForBrc20 bool
CreatedAt pgtype.Timestamp
CreatedAtHeight int32
}
type Brc20InscriptionEntryState struct {
Id string
BlockHeight int32
TransferCount int32
}
type Brc20InscriptionTransfer struct {
InscriptionID string
BlockHeight int32
TxIndex int32
TxHash string
FromInputIndex int32
OldSatpointTxHash pgtype.Text
OldSatpointOutIdx pgtype.Int4
OldSatpointOffset pgtype.Int8
NewSatpointTxHash pgtype.Text
NewSatpointOutIdx pgtype.Int4
NewSatpointOffset pgtype.Int8
NewPkscript string
NewOutputValue int64
SentAsFee bool
TransferCount int32
}
type Brc20ProcessorStat struct {
BlockHeight int32
CursedInscriptionCount int32
BlessedInscriptionCount int32
LostSats int64
}
type Brc20TickEntry struct {
Tick string
OriginalTick string
TotalSupply pgtype.Numeric
Decimals int16
LimitPerMint pgtype.Numeric
IsSelfMint bool
DeployInscriptionID string
DeployedAt pgtype.Timestamp
DeployedAtHeight int32
}
type Brc20TickEntryState struct {
Tick string
BlockHeight int32
MintedAmount pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}

View File

@@ -1,33 +0,0 @@
package postgres
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/jackc/pgx/v5"
)
var _ datagateway.IndexerInfoDataGateway = (*Repository)(nil)
func (r *Repository) GetLatestIndexerState(ctx context.Context) (entity.IndexerState, error) {
model, err := r.queries.GetLatestIndexerState(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return entity.IndexerState{}, errors.WithStack(errs.NotFound)
}
return entity.IndexerState{}, errors.Wrap(err, "error during query")
}
state := mapIndexerStatesModelToType(model)
return state, nil
}
func (r *Repository) CreateIndexerState(ctx context.Context, state entity.IndexerState) error {
params := mapIndexerStatesTypeToParams(state)
if err := r.queries.CreateIndexerState(ctx, params); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}

View File

@@ -1,614 +0,0 @@
package postgres
import (
"encoding/hex"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/gaze-network/indexer-network/modules/brc20/internal/repository/postgres/gen"
"github.com/jackc/pgx/v5/pgtype"
"github.com/samber/lo"
"github.com/shopspring/decimal"
)
func decimalFromNumeric(src pgtype.Numeric) decimal.NullDecimal {
if !src.Valid || src.NaN || src.InfinityModifier != pgtype.Finite {
return decimal.NullDecimal{}
}
result := decimal.NewFromBigInt(src.Int, src.Exp)
return decimal.NewNullDecimal(result)
}
func numericFromDecimal(src decimal.Decimal) pgtype.Numeric {
result := pgtype.Numeric{
Int: src.Coefficient(),
Exp: src.Exponent(),
NaN: false,
InfinityModifier: pgtype.Finite,
Valid: true,
}
return result
}
func numericFromNullDecimal(src decimal.NullDecimal) pgtype.Numeric {
if !src.Valid {
return pgtype.Numeric{}
}
return numericFromDecimal(src.Decimal)
}
func mapIndexerStatesModelToType(src gen.Brc20IndexerState) entity.IndexerState {
var createdAt time.Time
if src.CreatedAt.Valid {
createdAt = src.CreatedAt.Time
}
return entity.IndexerState{
ClientVersion: src.ClientVersion,
Network: common.Network(src.Network),
DBVersion: int32(src.DbVersion),
EventHashVersion: int32(src.EventHashVersion),
CreatedAt: createdAt,
}
}
func mapIndexerStatesTypeToParams(src entity.IndexerState) gen.CreateIndexerStateParams {
return gen.CreateIndexerStateParams{
ClientVersion: src.ClientVersion,
Network: string(src.Network),
DbVersion: int32(src.DBVersion),
EventHashVersion: int32(src.EventHashVersion),
}
}
func mapIndexedBlockModelToType(src gen.Brc20IndexedBlock) (entity.IndexedBlock, error) {
hash, err := chainhash.NewHashFromStr(src.Hash)
if err != nil {
return entity.IndexedBlock{}, errors.Wrap(err, "invalid block hash")
}
eventHash, err := hex.DecodeString(src.EventHash)
if err != nil {
return entity.IndexedBlock{}, errors.Wrap(err, "invalid event hash")
}
cumulativeEventHash, err := hex.DecodeString(src.CumulativeEventHash)
if err != nil {
return entity.IndexedBlock{}, errors.Wrap(err, "invalid cumulative event hash")
}
return entity.IndexedBlock{
Height: uint64(src.Height),
Hash: *hash,
EventHash: eventHash,
CumulativeEventHash: cumulativeEventHash,
}, nil
}
func mapIndexedBlockTypeToParams(src entity.IndexedBlock) gen.CreateIndexedBlockParams {
return gen.CreateIndexedBlockParams{
Height: int32(src.Height),
Hash: src.Hash.String(),
EventHash: hex.EncodeToString(src.EventHash),
CumulativeEventHash: hex.EncodeToString(src.CumulativeEventHash),
}
}
func mapProcessorStatsModelToType(src gen.Brc20ProcessorStat) entity.ProcessorStats {
return entity.ProcessorStats{
BlockHeight: uint64(src.BlockHeight),
CursedInscriptionCount: uint64(src.CursedInscriptionCount),
BlessedInscriptionCount: uint64(src.BlessedInscriptionCount),
LostSats: uint64(src.LostSats),
}
}
func mapProcessorStatsTypeToParams(src entity.ProcessorStats) gen.CreateProcessorStatsParams {
return gen.CreateProcessorStatsParams{
BlockHeight: int32(src.BlockHeight),
CursedInscriptionCount: int32(src.CursedInscriptionCount),
BlessedInscriptionCount: int32(src.BlessedInscriptionCount),
LostSats: int64(src.LostSats),
}
}
func mapTickEntryModelToType(src gen.GetTickEntriesByTicksRow) (entity.TickEntry, error) {
deployInscriptionId, err := ordinals.NewInscriptionIdFromString(src.DeployInscriptionID)
if err != nil {
return entity.TickEntry{}, errors.Wrap(err, "invalid deployInscriptionId")
}
var completedAt time.Time
if src.CompletedAt.Valid {
completedAt = src.CompletedAt.Time
}
return entity.TickEntry{
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TotalSupply: decimalFromNumeric(src.TotalSupply).Decimal,
Decimals: uint16(src.Decimals),
LimitPerMint: decimalFromNumeric(src.LimitPerMint).Decimal,
IsSelfMint: src.IsSelfMint,
DeployInscriptionId: deployInscriptionId,
DeployedAt: src.DeployedAt.Time,
DeployedAtHeight: uint64(src.DeployedAtHeight),
MintedAmount: decimalFromNumeric(src.MintedAmount).Decimal,
BurnedAmount: decimalFromNumeric(src.BurnedAmount).Decimal,
CompletedAt: completedAt,
CompletedAtHeight: lo.Ternary(src.CompletedAtHeight.Valid, uint64(src.CompletedAtHeight.Int32), 0),
}, nil
}
func mapTickEntryTypeToParams(src entity.TickEntry, blockHeight uint64) (gen.CreateTickEntriesParams, gen.CreateTickEntryStatesParams, error) {
return gen.CreateTickEntriesParams{
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TotalSupply: numericFromDecimal(src.TotalSupply),
Decimals: int16(src.Decimals),
LimitPerMint: numericFromDecimal(src.LimitPerMint),
IsSelfMint: src.IsSelfMint,
DeployInscriptionID: src.DeployInscriptionId.String(),
DeployedAt: pgtype.Timestamp{Time: src.DeployedAt, Valid: true},
DeployedAtHeight: int32(src.DeployedAtHeight),
}, gen.CreateTickEntryStatesParams{
Tick: src.Tick,
BlockHeight: int32(blockHeight),
CompletedAt: pgtype.Timestamp{Time: src.CompletedAt, Valid: !src.CompletedAt.IsZero()},
CompletedAtHeight: pgtype.Int4{Int32: int32(src.CompletedAtHeight), Valid: src.CompletedAtHeight != 0},
MintedAmount: numericFromDecimal(src.MintedAmount),
BurnedAmount: numericFromDecimal(src.BurnedAmount),
}, nil
}
func mapInscriptionEntryModelToType(src gen.GetInscriptionEntriesByIdsRow) (ordinals.InscriptionEntry, error) {
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.Id)
if err != nil {
return ordinals.InscriptionEntry{}, errors.Wrap(err, "invalid inscription id")
}
var delegate, parent *ordinals.InscriptionId
if src.Delegate.Valid {
delegateValue, err := ordinals.NewInscriptionIdFromString(src.Delegate.String)
if err != nil {
return ordinals.InscriptionEntry{}, errors.Wrap(err, "invalid delegate id")
}
delegate = &delegateValue
}
// ord 0.14.0 supports only one parent
if len(src.Parents) > 0 {
parentValue, err := ordinals.NewInscriptionIdFromString(src.Parents[0])
if err != nil {
return ordinals.InscriptionEntry{}, errors.Wrap(err, "invalid parent id")
}
parent = &parentValue
}
inscription := ordinals.Inscription{
Content: src.Content,
ContentEncoding: lo.Ternary(src.ContentEncoding.Valid, src.ContentEncoding.String, ""),
ContentType: lo.Ternary(src.ContentType.Valid, src.ContentType.String, ""),
Delegate: delegate,
Metadata: src.Metadata,
Metaprotocol: lo.Ternary(src.Metaprotocol.Valid, src.Metaprotocol.String, ""),
Parent: parent,
Pointer: lo.Ternary(src.Pointer.Valid, lo.ToPtr(uint64(src.Pointer.Int64)), nil),
}
return ordinals.InscriptionEntry{
Id: inscriptionId,
Number: src.Number,
SequenceNumber: uint64(src.SequenceNumber),
Cursed: src.Cursed,
CursedForBRC20: src.CursedForBrc20,
CreatedAt: lo.Ternary(src.CreatedAt.Valid, src.CreatedAt.Time, time.Time{}),
CreatedAtHeight: uint64(src.CreatedAtHeight),
Inscription: inscription,
TransferCount: lo.Ternary(src.TransferCount.Valid, uint32(src.TransferCount.Int32), 0),
}, nil
}
func mapInscriptionEntryTypeToParams(src ordinals.InscriptionEntry, blockHeight uint64) (gen.CreateInscriptionEntriesParams, gen.CreateInscriptionEntryStatesParams, error) {
var delegate, metaprotocol, contentEncoding, contentType pgtype.Text
if src.Inscription.Delegate != nil {
delegate = pgtype.Text{String: src.Inscription.Delegate.String(), Valid: true}
}
if src.Inscription.Metaprotocol != "" {
metaprotocol = pgtype.Text{String: src.Inscription.Metaprotocol, Valid: true}
}
if src.Inscription.ContentEncoding != "" {
contentEncoding = pgtype.Text{String: src.Inscription.ContentEncoding, Valid: true}
}
if src.Inscription.ContentType != "" {
contentType = pgtype.Text{String: src.Inscription.ContentType, Valid: true}
}
var parents []string
if src.Inscription.Parent != nil {
parents = append(parents, src.Inscription.Parent.String())
}
var pointer pgtype.Int8
if src.Inscription.Pointer != nil {
pointer = pgtype.Int8{Int64: int64(*src.Inscription.Pointer), Valid: true}
}
return gen.CreateInscriptionEntriesParams{
Id: src.Id.String(),
Number: src.Number,
SequenceNumber: int64(src.SequenceNumber),
Delegate: delegate,
Metadata: src.Inscription.Metadata,
Metaprotocol: metaprotocol,
Parents: parents,
Pointer: pointer,
Content: src.Inscription.Content,
ContentEncoding: contentEncoding,
ContentType: contentType,
Cursed: src.Cursed,
CursedForBrc20: src.CursedForBRC20,
CreatedAt: lo.Ternary(!src.CreatedAt.IsZero(), pgtype.Timestamp{Time: src.CreatedAt, Valid: true}, pgtype.Timestamp{}),
CreatedAtHeight: int32(src.CreatedAtHeight),
}, gen.CreateInscriptionEntryStatesParams{
Id: src.Id.String(),
BlockHeight: int32(blockHeight),
TransferCount: int32(src.TransferCount),
}, nil
}
func mapInscriptionTransferModelToType(src gen.GetInscriptionTransfersInOutPointsRow) (entity.InscriptionTransfer, error) {
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
if err != nil {
return entity.InscriptionTransfer{}, errors.Wrap(err, "invalid inscription id")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return entity.InscriptionTransfer{}, errors.Wrap(err, "invalid tx hash")
}
var oldSatPoint, newSatPoint ordinals.SatPoint
if src.OldSatpointTxHash.Valid {
if !src.OldSatpointOutIdx.Valid || !src.OldSatpointOffset.Valid {
return entity.InscriptionTransfer{}, errors.New("old satpoint out idx and offset must exist if hash exists")
}
txHash, err := chainhash.NewHashFromStr(src.OldSatpointTxHash.String)
if err != nil {
return entity.InscriptionTransfer{}, errors.Wrap(err, "invalid old satpoint tx hash")
}
oldSatPoint = ordinals.SatPoint{
OutPoint: wire.OutPoint{
Hash: *txHash,
Index: uint32(src.OldSatpointOutIdx.Int32),
},
Offset: uint64(src.OldSatpointOffset.Int64),
}
}
if src.NewSatpointTxHash.Valid {
if !src.NewSatpointOutIdx.Valid || !src.NewSatpointOffset.Valid {
return entity.InscriptionTransfer{}, errors.New("new satpoint out idx and offset must exist if hash exists")
}
txHash, err := chainhash.NewHashFromStr(src.NewSatpointTxHash.String)
if err != nil {
return entity.InscriptionTransfer{}, errors.Wrap(err, "invalid new satpoint tx hash")
}
newSatPoint = ordinals.SatPoint{
OutPoint: wire.OutPoint{
Hash: *txHash,
Index: uint32(src.NewSatpointOutIdx.Int32),
},
Offset: uint64(src.NewSatpointOffset.Int64),
}
}
newPkScript, err := hex.DecodeString(src.NewPkscript)
if err != nil {
return entity.InscriptionTransfer{}, errors.Wrap(err, "failed to parse pkscript")
}
return entity.InscriptionTransfer{
InscriptionId: inscriptionId,
BlockHeight: uint64(src.BlockHeight),
TxIndex: uint32(src.TxIndex),
TxHash: *txHash,
FromInputIndex: uint32(src.FromInputIndex),
Content: src.Content,
OldSatPoint: oldSatPoint,
NewSatPoint: newSatPoint,
NewPkScript: newPkScript,
NewOutputValue: uint64(src.NewOutputValue),
SentAsFee: src.SentAsFee,
TransferCount: uint32(src.TransferCount),
}, nil
}
func mapInscriptionTransferTypeToParams(src entity.InscriptionTransfer) gen.CreateInscriptionTransfersParams {
return gen.CreateInscriptionTransfersParams{
InscriptionID: src.InscriptionId.String(),
BlockHeight: int32(src.BlockHeight),
TxIndex: int32(src.TxIndex),
TxHash: src.TxHash.String(),
FromInputIndex: int32(src.FromInputIndex),
OldSatpointTxHash: lo.Ternary(src.OldSatPoint != ordinals.SatPoint{}, pgtype.Text{String: src.OldSatPoint.OutPoint.Hash.String(), Valid: true}, pgtype.Text{}),
OldSatpointOutIdx: lo.Ternary(src.OldSatPoint != ordinals.SatPoint{}, pgtype.Int4{Int32: int32(src.OldSatPoint.OutPoint.Index), Valid: true}, pgtype.Int4{}),
OldSatpointOffset: lo.Ternary(src.OldSatPoint != ordinals.SatPoint{}, pgtype.Int8{Int64: int64(src.OldSatPoint.Offset), Valid: true}, pgtype.Int8{}),
NewSatpointTxHash: lo.Ternary(src.NewSatPoint != ordinals.SatPoint{}, pgtype.Text{String: src.NewSatPoint.OutPoint.Hash.String(), Valid: true}, pgtype.Text{}),
NewSatpointOutIdx: lo.Ternary(src.NewSatPoint != ordinals.SatPoint{}, pgtype.Int4{Int32: int32(src.NewSatPoint.OutPoint.Index), Valid: true}, pgtype.Int4{}),
NewSatpointOffset: lo.Ternary(src.NewSatPoint != ordinals.SatPoint{}, pgtype.Int8{Int64: int64(src.NewSatPoint.Offset), Valid: true}, pgtype.Int8{}),
NewPkscript: hex.EncodeToString(src.NewPkScript),
NewOutputValue: int64(src.NewOutputValue),
SentAsFee: src.SentAsFee,
TransferCount: int32(src.TransferCount),
}
}
func mapEventDeployModelToType(src gen.Brc20EventDeploy) (entity.EventDeploy, error) {
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
if err != nil {
return entity.EventDeploy{}, errors.Wrap(err, "invalid inscription id")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return entity.EventDeploy{}, errors.Wrap(err, "invalid tx hash")
}
pkScript, err := hex.DecodeString(src.Pkscript)
if err != nil {
return entity.EventDeploy{}, errors.Wrap(err, "failed to parse pkscript")
}
satPoint, err := ordinals.NewSatPointFromString(src.Satpoint)
if err != nil {
return entity.EventDeploy{}, errors.Wrap(err, "cannot parse satpoint")
}
return entity.EventDeploy{
Id: src.Id,
InscriptionId: inscriptionId,
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: *txHash,
BlockHeight: uint64(src.BlockHeight),
TxIndex: uint32(src.TxIndex),
Timestamp: src.Timestamp.Time,
PkScript: pkScript,
SatPoint: satPoint,
TotalSupply: decimalFromNumeric(src.TotalSupply).Decimal,
Decimals: uint16(src.Decimals),
LimitPerMint: decimalFromNumeric(src.LimitPerMint).Decimal,
IsSelfMint: src.IsSelfMint,
}, nil
}
func mapEventDeployTypeToParams(src entity.EventDeploy) (gen.CreateEventDeploysParams, error) {
var timestamp pgtype.Timestamp
if !src.Timestamp.IsZero() {
timestamp = pgtype.Timestamp{Time: src.Timestamp, Valid: true}
}
return gen.CreateEventDeploysParams{
InscriptionID: src.InscriptionId.String(),
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: src.TxHash.String(),
BlockHeight: int32(src.BlockHeight),
TxIndex: int32(src.TxIndex),
Timestamp: timestamp,
Pkscript: hex.EncodeToString(src.PkScript),
Satpoint: src.SatPoint.String(),
TotalSupply: numericFromDecimal(src.TotalSupply),
Decimals: int16(src.Decimals),
LimitPerMint: numericFromDecimal(src.LimitPerMint),
IsSelfMint: src.IsSelfMint,
}, nil
}
func mapEventMintModelToType(src gen.Brc20EventMint) (entity.EventMint, error) {
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
if err != nil {
return entity.EventMint{}, errors.Wrap(err, "invalid inscription id")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return entity.EventMint{}, errors.Wrap(err, "invalid tx hash")
}
pkScript, err := hex.DecodeString(src.Pkscript)
if err != nil {
return entity.EventMint{}, errors.Wrap(err, "failed to parse pkscript")
}
satPoint, err := ordinals.NewSatPointFromString(src.Satpoint)
if err != nil {
return entity.EventMint{}, errors.Wrap(err, "cannot parse satpoint")
}
var parentId *ordinals.InscriptionId
if src.ParentID.Valid {
parentIdValue, err := ordinals.NewInscriptionIdFromString(src.ParentID.String)
if err != nil {
return entity.EventMint{}, errors.Wrap(err, "invalid parent id")
}
parentId = &parentIdValue
}
return entity.EventMint{
Id: src.Id,
InscriptionId: inscriptionId,
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: *txHash,
BlockHeight: uint64(src.BlockHeight),
TxIndex: uint32(src.TxIndex),
Timestamp: src.Timestamp.Time,
PkScript: pkScript,
SatPoint: satPoint,
Amount: decimalFromNumeric(src.Amount).Decimal,
ParentId: parentId,
}, nil
}
func mapEventMintTypeToParams(src entity.EventMint) (gen.CreateEventMintsParams, error) {
var timestamp pgtype.Timestamp
if !src.Timestamp.IsZero() {
timestamp = pgtype.Timestamp{Time: src.Timestamp, Valid: true}
}
var parentId pgtype.Text
if src.ParentId != nil {
parentId = pgtype.Text{String: src.ParentId.String(), Valid: true}
}
return gen.CreateEventMintsParams{
InscriptionID: src.InscriptionId.String(),
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: src.TxHash.String(),
BlockHeight: int32(src.BlockHeight),
TxIndex: int32(src.TxIndex),
Timestamp: timestamp,
Pkscript: hex.EncodeToString(src.PkScript),
Satpoint: src.SatPoint.String(),
Amount: numericFromDecimal(src.Amount),
ParentID: parentId,
}, nil
}
func mapEventInscribeTransferModelToType(src gen.Brc20EventInscribeTransfer) (entity.EventInscribeTransfer, error) {
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
if err != nil {
return entity.EventInscribeTransfer{}, errors.Wrap(err, "cannot parse inscription id")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return entity.EventInscribeTransfer{}, errors.Wrap(err, "cannot parse hash")
}
pkScript, err := hex.DecodeString(src.Pkscript)
if err != nil {
return entity.EventInscribeTransfer{}, errors.Wrap(err, "cannot parse pkScript")
}
satPoint, err := ordinals.NewSatPointFromString(src.Satpoint)
if err != nil {
return entity.EventInscribeTransfer{}, errors.Wrap(err, "cannot parse satPoint")
}
return entity.EventInscribeTransfer{
Id: src.Id,
InscriptionId: inscriptionId,
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: *txHash,
BlockHeight: uint64(src.BlockHeight),
TxIndex: uint32(src.TxIndex),
Timestamp: src.Timestamp.Time,
PkScript: pkScript,
SatPoint: satPoint,
OutputIndex: uint32(src.OutputIndex),
SatsAmount: uint64(src.SatsAmount),
Amount: decimalFromNumeric(src.Amount).Decimal,
}, nil
}
func mapEventInscribeTransferTypeToParams(src entity.EventInscribeTransfer) (gen.CreateEventInscribeTransfersParams, error) {
var timestamp pgtype.Timestamp
if !src.Timestamp.IsZero() {
timestamp = pgtype.Timestamp{Time: src.Timestamp, Valid: true}
}
return gen.CreateEventInscribeTransfersParams{
InscriptionID: src.InscriptionId.String(),
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: src.TxHash.String(),
BlockHeight: int32(src.BlockHeight),
TxIndex: int32(src.TxIndex),
Timestamp: timestamp,
Pkscript: hex.EncodeToString(src.PkScript),
Satpoint: src.SatPoint.String(),
OutputIndex: int32(src.OutputIndex),
SatsAmount: int64(src.SatsAmount),
Amount: numericFromDecimal(src.Amount),
}, nil
}
func mapEventTransferTransferModelToType(src gen.Brc20EventTransferTransfer) (entity.EventTransferTransfer, error) {
inscriptionId, err := ordinals.NewInscriptionIdFromString(src.InscriptionID)
if err != nil {
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse inscription id")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse hash")
}
fromPkScript, err := hex.DecodeString(src.FromPkscript)
if err != nil {
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse fromPkScript")
}
fromSatPoint, err := ordinals.NewSatPointFromString(src.FromSatpoint)
if err != nil {
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse fromSatPoint")
}
toPkScript, err := hex.DecodeString(src.ToPkscript)
if err != nil {
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse toPkScript")
}
toSatPoint, err := ordinals.NewSatPointFromString(src.ToSatpoint)
if err != nil {
return entity.EventTransferTransfer{}, errors.Wrap(err, "cannot parse toSatPoint")
}
return entity.EventTransferTransfer{
Id: src.Id,
InscriptionId: inscriptionId,
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: *txHash,
BlockHeight: uint64(src.BlockHeight),
TxIndex: uint32(src.TxIndex),
Timestamp: src.Timestamp.Time,
FromPkScript: fromPkScript,
FromSatPoint: fromSatPoint,
FromInputIndex: uint32(src.FromInputIndex),
ToPkScript: toPkScript,
ToSatPoint: toSatPoint,
ToOutputIndex: uint32(src.ToOutputIndex),
SpentAsFee: src.SpentAsFee,
Amount: decimalFromNumeric(src.Amount).Decimal,
}, nil
}
func mapEventTransferTransferTypeToParams(src entity.EventTransferTransfer) (gen.CreateEventTransferTransfersParams, error) {
var timestamp pgtype.Timestamp
if !src.Timestamp.IsZero() {
timestamp = pgtype.Timestamp{Time: src.Timestamp, Valid: true}
}
return gen.CreateEventTransferTransfersParams{
InscriptionID: src.InscriptionId.String(),
InscriptionNumber: src.InscriptionNumber,
Tick: src.Tick,
OriginalTick: src.OriginalTick,
TxHash: src.TxHash.String(),
BlockHeight: int32(src.BlockHeight),
TxIndex: int32(src.TxIndex),
Timestamp: timestamp,
FromPkscript: hex.EncodeToString(src.FromPkScript),
FromSatpoint: src.FromSatPoint.String(),
FromInputIndex: int32(src.FromInputIndex),
ToPkscript: hex.EncodeToString(src.ToPkScript),
ToSatpoint: src.ToSatPoint.String(),
ToOutputIndex: int32(src.ToOutputIndex),
SpentAsFee: src.SpentAsFee,
Amount: numericFromDecimal(src.Amount),
}, nil
}
func mapBalanceModelToType(src gen.Brc20Balance) (entity.Balance, error) {
pkScript, err := hex.DecodeString(src.Pkscript)
if err != nil {
return entity.Balance{}, errors.Wrap(err, "failed to parse pkscript")
}
return entity.Balance{
PkScript: pkScript,
Tick: src.Tick,
BlockHeight: uint64(src.BlockHeight),
OverallBalance: decimalFromNumeric(src.OverallBalance).Decimal,
AvailableBalance: decimalFromNumeric(src.AvailableBalance).Decimal,
}, nil
}
func mapBalanceTypeToParams(src entity.Balance) gen.CreateBalancesParams {
return gen.CreateBalancesParams{
Pkscript: hex.EncodeToString(src.PkScript),
Tick: src.Tick,
BlockHeight: int32(src.BlockHeight),
OverallBalance: numericFromDecimal(src.OverallBalance),
AvailableBalance: numericFromDecimal(src.AvailableBalance),
}
}

View File

@@ -1,20 +0,0 @@
package postgres
import (
"github.com/gaze-network/indexer-network/internal/postgres"
"github.com/gaze-network/indexer-network/modules/brc20/internal/repository/postgres/gen"
"github.com/jackc/pgx/v5"
)
type Repository struct {
db postgres.DB
queries *gen.Queries
tx pgx.Tx
}
func NewRepository(db postgres.DB) *Repository {
return &Repository{
db: db,
queries: gen.New(db),
}
}

View File

@@ -1,62 +0,0 @@
package postgres
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/jackc/pgx/v5"
)
var ErrTxAlreadyExists = errors.New("Transaction already exists. Call Commit() or Rollback() first.")
func (r *Repository) begin(ctx context.Context) (*Repository, error) {
if r.tx != nil {
return nil, errors.WithStack(ErrTxAlreadyExists)
}
tx, err := r.db.Begin(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to begin transaction")
}
return &Repository{
db: r.db,
queries: r.queries.WithTx(tx),
tx: tx,
}, nil
}
func (r *Repository) BeginBRC20Tx(ctx context.Context) (datagateway.BRC20DataGatewayWithTx, error) {
repo, err := r.begin(ctx)
if err != nil {
return nil, errors.WithStack(err)
}
return repo, nil
}
func (r *Repository) Commit(ctx context.Context) error {
if r.tx == nil {
return nil
}
err := r.tx.Commit(ctx)
if err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
r.tx = nil
return nil
}
func (r *Repository) Rollback(ctx context.Context) error {
if r.tx == nil {
return nil
}
err := r.tx.Rollback(ctx)
if err != nil && !errors.Is(err, pgx.ErrTxClosed) {
return errors.Wrap(err, "failed to rollback transaction")
}
if err == nil {
logger.DebugContext(ctx, "rolled back transaction")
}
r.tx = nil
return nil
}

View File

@@ -1,238 +0,0 @@
package brc20
import (
"context"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/gaze-network/indexer-network/pkg/btcclient"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/lru"
)
// Make sure to implement the Bitcoin Processor interface
var _ indexer.Processor[*types.Block] = (*Processor)(nil)
type Processor struct {
brc20Dg datagateway.BRC20DataGateway
indexerInfoDg datagateway.IndexerInfoDataGateway
btcClient btcclient.Contract
network common.Network
cleanupFuncs []func(context.Context) error
// block states
flotsamsSentAsFee []*entity.Flotsam
blockReward uint64
// processor stats
cursedInscriptionCount uint64
blessedInscriptionCount uint64
lostSats uint64
// cache
outPointValueCache *lru.Cache[wire.OutPoint, uint64]
// flush buffers - inscription states
newInscriptionTransfers []*entity.InscriptionTransfer
newInscriptionEntries map[ordinals.InscriptionId]*ordinals.InscriptionEntry
newInscriptionEntryStates map[ordinals.InscriptionId]*ordinals.InscriptionEntry
// flush buffers - brc20 states
newTickEntries map[string]*entity.TickEntry
newTickEntryStates map[string]*entity.TickEntry
newEventDeploys []*entity.EventDeploy
newEventMints []*entity.EventMint
newEventInscribeTransfers []*entity.EventInscribeTransfer
newEventTransferTransfers []*entity.EventTransferTransfer
newBalances map[string]map[string]*entity.Balance // pkscript -> tick -> balance
eventHashString string
}
// TODO: move this to config
const outPointValueCacheSize = 100000
func NewProcessor(brc20Dg datagateway.BRC20DataGateway, indexerInfoDg datagateway.IndexerInfoDataGateway, btcClient btcclient.Contract, network common.Network, cleanupFuncs []func(context.Context) error) (*Processor, error) {
outPointValueCache, err := lru.New[wire.OutPoint, uint64](outPointValueCacheSize)
if err != nil {
return nil, errors.Wrap(err, "failed to create outPointValueCache")
}
return &Processor{
brc20Dg: brc20Dg,
indexerInfoDg: indexerInfoDg,
btcClient: btcClient,
network: network,
cleanupFuncs: cleanupFuncs,
flotsamsSentAsFee: make([]*entity.Flotsam, 0),
blockReward: 0,
cursedInscriptionCount: 0, // to be initialized by p.VerifyStates()
blessedInscriptionCount: 0, // to be initialized by p.VerifyStates()
lostSats: 0, // to be initialized by p.VerifyStates()
outPointValueCache: outPointValueCache,
newInscriptionTransfers: make([]*entity.InscriptionTransfer, 0),
newInscriptionEntries: make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry),
newInscriptionEntryStates: make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry),
newTickEntries: make(map[string]*entity.TickEntry),
newTickEntryStates: make(map[string]*entity.TickEntry),
newEventDeploys: make([]*entity.EventDeploy, 0),
newEventMints: make([]*entity.EventMint, 0),
newEventInscribeTransfers: make([]*entity.EventInscribeTransfer, 0),
newEventTransferTransfers: make([]*entity.EventTransferTransfer, 0),
newBalances: make(map[string]map[string]*entity.Balance),
}, nil
}
// VerifyStates implements indexer.Processor.
func (p *Processor) VerifyStates(ctx context.Context) error {
indexerState, err := p.indexerInfoDg.GetLatestIndexerState(ctx)
if err != nil && !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "failed to get latest indexer state")
}
// if not found, create indexer state
if errors.Is(err, errs.NotFound) {
if err := p.indexerInfoDg.CreateIndexerState(ctx, entity.IndexerState{
ClientVersion: ClientVersion,
DBVersion: DBVersion,
EventHashVersion: EventHashVersion,
Network: p.network,
}); err != nil {
return errors.Wrap(err, "failed to set indexer state")
}
} else {
if indexerState.DBVersion != DBVersion {
return errors.Wrapf(errs.ConflictSetting, "db version mismatch: current version is %d. Please upgrade to version %d", indexerState.DBVersion, DBVersion)
}
if indexerState.EventHashVersion != EventHashVersion {
return errors.Wrapf(errs.ConflictSetting, "event version mismatch: current version is %d. Please reset rune's db first.", indexerState.EventHashVersion, EventHashVersion)
}
if indexerState.Network != p.network {
return errors.Wrapf(errs.ConflictSetting, "network mismatch: latest indexed network is %d, configured network is %d. If you want to change the network, please reset the database", indexerState.Network, p.network)
}
}
stats, err := p.brc20Dg.GetProcessorStats(ctx)
if err != nil {
if !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "failed to count cursed inscriptions")
}
stats = &entity.ProcessorStats{
BlockHeight: uint64(startingBlockHeader[p.network].Height),
CursedInscriptionCount: 0,
BlessedInscriptionCount: 0,
LostSats: 0,
}
}
p.cursedInscriptionCount = stats.CursedInscriptionCount
p.blessedInscriptionCount = stats.BlessedInscriptionCount
p.lostSats = stats.LostSats
return nil
}
// CurrentBlock implements indexer.Processor.
func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error) {
blockHeader, err := p.brc20Dg.GetLatestBlock(ctx)
if err != nil {
if errors.Is(err, errs.NotFound) {
return startingBlockHeader[p.network], nil
}
return types.BlockHeader{}, errors.Wrap(err, "failed to get latest block")
}
return blockHeader, nil
}
// GetIndexedBlock implements indexer.Processor.
func (p *Processor) GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error) {
block, err := p.brc20Dg.GetIndexedBlockByHeight(ctx, height)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to get indexed block")
}
return types.BlockHeader{
Height: int64(block.Height),
Hash: block.Hash,
}, nil
}
// Name implements indexer.Processor.
func (p *Processor) Name() string {
return "brc20"
}
// RevertData implements indexer.Processor.
func (p *Processor) RevertData(ctx context.Context, from int64) error {
brc20DgTx, err := p.brc20Dg.BeginBRC20Tx(ctx)
if err != nil {
return errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if err := brc20DgTx.Rollback(ctx); err != nil {
logger.WarnContext(ctx, "failed to rollback transaction",
slogx.Error(err),
slogx.String("event", "rollback_brc20_insertion"),
)
}
}()
if err := brc20DgTx.DeleteIndexedBlocksSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete indexed blocks")
}
if err := brc20DgTx.DeleteProcessorStatsSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete processor stats")
}
if err := brc20DgTx.DeleteTickEntriesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete ticks")
}
if err := brc20DgTx.DeleteTickEntryStatesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete tick states")
}
if err := brc20DgTx.DeleteEventDeploysSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete deploy events")
}
if err := brc20DgTx.DeleteEventMintsSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete mint events")
}
if err := brc20DgTx.DeleteEventInscribeTransfersSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete inscribe transfer events")
}
if err := brc20DgTx.DeleteEventTransferTransfersSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete transfer transfer events")
}
if err := brc20DgTx.DeleteBalancesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete balances")
}
if err := brc20DgTx.DeleteInscriptionEntriesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete inscription entries")
}
if err := brc20DgTx.DeleteInscriptionEntryStatesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete inscription entry states")
}
if err := brc20DgTx.DeleteInscriptionTransfersSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete inscription transfers")
}
if err := brc20DgTx.Commit(ctx); err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
return nil
}
func (p *Processor) Shutdown(ctx context.Context) error {
var errs []error
for _, cleanup := range p.cleanupFuncs {
if err := cleanup(ctx); err != nil {
errs = append(errs, err)
}
}
return errors.WithStack(errors.Join(errs...))
}

View File

@@ -1,448 +0,0 @@
package brc20
import (
"bytes"
"context"
"encoding/hex"
"strings"
"time"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/brc20"
"github.com/gaze-network/indexer-network/modules/brc20/internal/datagateway"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/samber/lo"
"github.com/shopspring/decimal"
)
func (p *Processor) processBRC20States(ctx context.Context, transfers []*entity.InscriptionTransfer, blockHeader types.BlockHeader) error {
payloads := make([]*brc20.Payload, 0)
ticks := make(map[string]struct{})
for _, transfer := range transfers {
if transfer.Content == nil {
// skip empty content
continue
}
payload, err := brc20.ParsePayload(transfer)
if err != nil {
// skip invalid payloads
continue
}
payloads = append(payloads, payload)
ticks[payload.Tick] = struct{}{}
}
if len(payloads) == 0 {
// skip if no valid payloads
return nil
}
// TODO: concurrently fetch from db to optimize speed
tickEntries, err := p.brc20Dg.GetTickEntriesByTicks(ctx, lo.Keys(ticks))
if err != nil {
return errors.Wrap(err, "failed to get inscription entries by ids")
}
// preload required data to reduce individual data fetching during process
inscriptionIds := make([]ordinals.InscriptionId, 0)
inscriptionIdsToFetchParent := make([]ordinals.InscriptionId, 0)
inscriptionIdsToFetchEventInscribeTransfer := make([]ordinals.InscriptionId, 0)
balancesToFetch := make([]datagateway.GetBalancesBatchAtHeightQuery, 0) // pkscript -> tick -> struct{}
for _, payload := range payloads {
inscriptionIds = append(inscriptionIds, payload.Transfer.InscriptionId)
if payload.Op == brc20.OperationMint {
// preload parent id to validate mint events with self mint
if entry := tickEntries[payload.Tick]; entry.IsSelfMint {
inscriptionIdsToFetchParent = append(inscriptionIdsToFetchParent, payload.Transfer.InscriptionId)
}
}
if payload.Op == brc20.OperationTransfer {
if payload.Transfer.OldSatPoint == (ordinals.SatPoint{}) {
// preload balance to validate inscribe transfer event
balancesToFetch = append(balancesToFetch, datagateway.GetBalancesBatchAtHeightQuery{
PkScriptHex: hex.EncodeToString(payload.Transfer.NewPkScript),
Tick: payload.Tick,
})
} else {
// preload inscribe-transfer events to validate transfer-transfer event
inscriptionIdsToFetchEventInscribeTransfer = append(inscriptionIdsToFetchEventInscribeTransfer, payload.Transfer.InscriptionId)
}
}
}
inscriptionIdsToNumber, err := p.getInscriptionNumbersByIds(ctx, lo.Uniq(inscriptionIds))
if err != nil {
return errors.Wrap(err, "failed to get inscription numbers by ids")
}
inscriptionIdsToParent, err := p.getInscriptionParentsByIds(ctx, lo.Uniq(inscriptionIdsToFetchParent))
if err != nil {
return errors.Wrap(err, "failed to get inscription parents by ids")
}
latestEventId, err := p.brc20Dg.GetLatestEventId(ctx)
if err != nil {
return errors.Wrap(err, "failed to get latest event id")
}
// pkscript -> tick -> balance
balances, err := p.brc20Dg.GetBalancesBatchAtHeight(ctx, uint64(blockHeader.Height-1), balancesToFetch)
if err != nil {
return errors.Wrap(err, "failed to get balances batch at height")
}
eventInscribeTransfers, err := p.brc20Dg.GetEventInscribeTransfersByInscriptionIds(ctx, lo.Uniq(inscriptionIdsToFetchEventInscribeTransfer))
if err != nil {
return errors.Wrap(err, "failed to get event inscribe transfers by inscription ids")
}
newTickEntries := make(map[string]*entity.TickEntry)
newTickEntryStates := make(map[string]*entity.TickEntry)
newEventDeploys := make([]*entity.EventDeploy, 0)
newEventMints := make([]*entity.EventMint, 0)
newEventInscribeTransfers := make([]*entity.EventInscribeTransfer, 0)
newEventTransferTransfers := make([]*entity.EventTransferTransfer, 0)
newBalances := make(map[string]map[string]*entity.Balance)
var eventHashBuilder strings.Builder
handleEventDeploy := func(payload *brc20.Payload, tickEntry *entity.TickEntry) {
if payload.Transfer.TransferCount > 1 {
// skip used deploy inscriptions
return
}
if tickEntry != nil {
// skip deploy inscriptions for duplicate ticks
return
}
newEntry := &entity.TickEntry{
Tick: payload.Tick,
OriginalTick: payload.OriginalTick,
TotalSupply: payload.Max,
Decimals: payload.Dec,
LimitPerMint: payload.Lim,
IsSelfMint: payload.SelfMint,
DeployInscriptionId: payload.Transfer.InscriptionId,
DeployedAt: blockHeader.Timestamp,
DeployedAtHeight: payload.Transfer.BlockHeight,
MintedAmount: decimal.Zero,
BurnedAmount: decimal.Zero,
CompletedAt: time.Time{},
CompletedAtHeight: 0,
}
newTickEntries[payload.Tick] = newEntry
newTickEntryStates[payload.Tick] = newEntry
// update entries for other operations in same block
tickEntries[payload.Tick] = newEntry
event := &entity.EventDeploy{
Id: latestEventId + 1,
InscriptionId: payload.Transfer.InscriptionId,
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
Tick: payload.Tick,
OriginalTick: payload.OriginalTick,
TxHash: payload.Transfer.TxHash,
BlockHeight: payload.Transfer.BlockHeight,
TxIndex: payload.Transfer.TxIndex,
Timestamp: blockHeader.Timestamp,
PkScript: payload.Transfer.NewPkScript,
SatPoint: payload.Transfer.NewSatPoint,
TotalSupply: payload.Max,
Decimals: payload.Dec,
LimitPerMint: payload.Lim,
IsSelfMint: payload.SelfMint,
}
newEventDeploys = append(newEventDeploys, event)
latestEventId++
eventHashBuilder.WriteString(getEventDeployString(event) + eventHashSeparator)
}
handleEventMint := func(payload *brc20.Payload, tickEntry *entity.TickEntry) {
if payload.Transfer.TransferCount > 1 {
// skip used mint inscriptions that are already used
return
}
if tickEntry == nil {
// skip mint inscriptions for non-existent ticks
return
}
if -payload.Amt.Exponent() > int32(tickEntry.Decimals) {
// skip mint inscriptions with decimals greater than allowed
return
}
if tickEntry.MintedAmount.GreaterThanOrEqual(tickEntry.TotalSupply) {
// skip mint inscriptions for ticks with completed mints
return
}
if payload.Amt.GreaterThan(tickEntry.LimitPerMint) {
// skip mint inscriptions with amount greater than limit per mint
return
}
mintableAmount := tickEntry.TotalSupply.Sub(tickEntry.MintedAmount)
if payload.Amt.GreaterThan(mintableAmount) {
payload.Amt = mintableAmount
}
var parentId *ordinals.InscriptionId
if tickEntry.IsSelfMint {
parentIdValue, ok := inscriptionIdsToParent[payload.Transfer.InscriptionId]
if !ok {
// skip mint inscriptions for self mint ticks without parent inscription
return
}
if parentIdValue != tickEntry.DeployInscriptionId {
// skip mint inscriptions for self mint ticks with invalid parent inscription
return
}
parentId = &parentIdValue
}
tickEntry.MintedAmount = tickEntry.MintedAmount.Add(payload.Amt)
// mark as completed if this mint completes the total supply
if tickEntry.MintedAmount.GreaterThanOrEqual(tickEntry.TotalSupply) {
tickEntry.CompletedAt = blockHeader.Timestamp
tickEntry.CompletedAtHeight = payload.Transfer.BlockHeight
}
newTickEntryStates[payload.Tick] = tickEntry
event := &entity.EventMint{
Id: latestEventId + 1,
InscriptionId: payload.Transfer.InscriptionId,
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
Tick: payload.Tick,
OriginalTick: payload.OriginalTick,
TxHash: payload.Transfer.TxHash,
BlockHeight: payload.Transfer.BlockHeight,
TxIndex: payload.Transfer.TxIndex,
Timestamp: blockHeader.Timestamp,
PkScript: payload.Transfer.NewPkScript,
SatPoint: payload.Transfer.NewSatPoint,
Amount: payload.Amt,
ParentId: parentId,
}
newEventMints = append(newEventMints, event)
latestEventId++
eventHashBuilder.WriteString(getEventMintString(event, tickEntry.Decimals) + eventHashSeparator)
}
handleEventInscribeTransfer := func(payload *brc20.Payload, tickEntry *entity.TickEntry) {
// inscribe transfer event
pkScriptHex := hex.EncodeToString(payload.Transfer.NewPkScript)
balance, ok := balances[pkScriptHex][payload.Tick]
if !ok {
balance = &entity.Balance{
PkScript: payload.Transfer.NewPkScript,
Tick: payload.Tick,
BlockHeight: uint64(blockHeader.Height - 1),
OverallBalance: decimal.Zero, // defaults balance to zero if not found
AvailableBalance: decimal.Zero,
}
}
if payload.Amt.GreaterThan(balance.AvailableBalance) {
// skip inscribe transfer event if amount exceeds available balance
return
}
// update balance state
balance.BlockHeight = uint64(blockHeader.Height)
balance.AvailableBalance = balance.AvailableBalance.Sub(payload.Amt)
if _, ok := balances[pkScriptHex]; !ok {
balances[pkScriptHex] = make(map[string]*entity.Balance)
}
balances[pkScriptHex][payload.Tick] = balance
if _, ok := newBalances[pkScriptHex]; !ok {
newBalances[pkScriptHex] = make(map[string]*entity.Balance)
}
newBalances[pkScriptHex][payload.Tick] = &entity.Balance{}
event := &entity.EventInscribeTransfer{
Id: latestEventId + 1,
InscriptionId: payload.Transfer.InscriptionId,
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
Tick: payload.Tick,
OriginalTick: payload.OriginalTick,
TxHash: payload.Transfer.TxHash,
BlockHeight: payload.Transfer.BlockHeight,
TxIndex: payload.Transfer.TxIndex,
Timestamp: blockHeader.Timestamp,
PkScript: payload.Transfer.NewPkScript,
SatPoint: payload.Transfer.NewSatPoint,
OutputIndex: payload.Transfer.NewSatPoint.OutPoint.Index,
SatsAmount: payload.Transfer.NewOutputValue,
Amount: payload.Amt,
}
latestEventId++
eventInscribeTransfers[payload.Transfer.InscriptionId] = event
newEventInscribeTransfers = append(newEventInscribeTransfers, event)
eventHashBuilder.WriteString(getEventInscribeTransferString(event, tickEntry.Decimals) + eventHashSeparator)
}
handleEventTransferTransferAsFee := func(payload *brc20.Payload, tickEntry *entity.TickEntry, inscribeTransfer *entity.EventInscribeTransfer) {
// return balance to sender
fromPkScriptHex := hex.EncodeToString(inscribeTransfer.PkScript)
fromBalance, ok := balances[fromPkScriptHex][payload.Tick]
if !ok {
fromBalance = &entity.Balance{
PkScript: inscribeTransfer.PkScript,
Tick: payload.Tick,
BlockHeight: uint64(blockHeader.Height),
OverallBalance: decimal.Zero, // defaults balance to zero if not found
AvailableBalance: decimal.Zero,
}
}
fromBalance.BlockHeight = uint64(blockHeader.Height)
fromBalance.AvailableBalance = fromBalance.AvailableBalance.Add(payload.Amt)
if _, ok := balances[fromPkScriptHex]; !ok {
balances[fromPkScriptHex] = make(map[string]*entity.Balance)
}
balances[fromPkScriptHex][payload.Tick] = fromBalance
if _, ok := newBalances[fromPkScriptHex]; !ok {
newBalances[fromPkScriptHex] = make(map[string]*entity.Balance)
}
newBalances[fromPkScriptHex][payload.Tick] = fromBalance
event := &entity.EventTransferTransfer{
Id: latestEventId + 1,
InscriptionId: payload.Transfer.InscriptionId,
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
Tick: payload.Tick,
OriginalTick: payload.OriginalTick,
TxHash: payload.Transfer.TxHash,
BlockHeight: payload.Transfer.BlockHeight,
TxIndex: payload.Transfer.TxIndex,
Timestamp: blockHeader.Timestamp,
FromPkScript: inscribeTransfer.PkScript,
FromSatPoint: inscribeTransfer.SatPoint,
FromInputIndex: payload.Transfer.FromInputIndex,
ToPkScript: payload.Transfer.NewPkScript,
ToSatPoint: payload.Transfer.NewSatPoint,
ToOutputIndex: payload.Transfer.NewSatPoint.OutPoint.Index,
SpentAsFee: true,
Amount: payload.Amt,
}
newEventTransferTransfers = append(newEventTransferTransfers, event)
eventHashBuilder.WriteString(getEventTransferTransferString(event, tickEntry.Decimals) + eventHashSeparator)
}
handleEventTransferTransferNormal := func(payload *brc20.Payload, tickEntry *entity.TickEntry, inscribeTransfer *entity.EventInscribeTransfer) {
// subtract balance from sender
fromPkScriptHex := hex.EncodeToString(inscribeTransfer.PkScript)
fromBalance, ok := balances[fromPkScriptHex][payload.Tick]
if !ok {
// skip transfer transfer event if from balance does not exist
return
}
fromBalance.BlockHeight = uint64(blockHeader.Height)
fromBalance.OverallBalance = fromBalance.OverallBalance.Sub(payload.Amt)
if _, ok := balances[fromPkScriptHex]; !ok {
balances[fromPkScriptHex] = make(map[string]*entity.Balance)
}
balances[fromPkScriptHex][payload.Tick] = fromBalance
if _, ok := newBalances[fromPkScriptHex]; !ok {
newBalances[fromPkScriptHex] = make(map[string]*entity.Balance)
}
newBalances[fromPkScriptHex][payload.Tick] = fromBalance
// add balance to receiver
if bytes.Equal(payload.Transfer.NewPkScript, []byte{0x6a}) {
// burn if sent to OP_RETURN
tickEntry.BurnedAmount = tickEntry.BurnedAmount.Add(payload.Amt)
tickEntries[payload.Tick] = tickEntry
newTickEntryStates[payload.Tick] = tickEntry
} else {
toPkScriptHex := hex.EncodeToString(payload.Transfer.NewPkScript)
toBalance, ok := balances[toPkScriptHex][payload.Tick]
if !ok {
toBalance = &entity.Balance{
PkScript: payload.Transfer.NewPkScript,
Tick: payload.Tick,
BlockHeight: uint64(blockHeader.Height),
OverallBalance: decimal.Zero, // defaults balance to zero if not found
AvailableBalance: decimal.Zero,
}
}
toBalance.BlockHeight = uint64(blockHeader.Height)
toBalance.OverallBalance = toBalance.OverallBalance.Add(payload.Amt)
toBalance.AvailableBalance = toBalance.AvailableBalance.Add(payload.Amt)
if _, ok := balances[toPkScriptHex]; !ok {
balances[toPkScriptHex] = make(map[string]*entity.Balance)
}
balances[toPkScriptHex][payload.Tick] = toBalance
if _, ok := newBalances[toPkScriptHex]; !ok {
newBalances[toPkScriptHex] = make(map[string]*entity.Balance)
}
newBalances[toPkScriptHex][payload.Tick] = toBalance
}
event := &entity.EventTransferTransfer{
Id: latestEventId + 1,
InscriptionId: payload.Transfer.InscriptionId,
InscriptionNumber: inscriptionIdsToNumber[payload.Transfer.InscriptionId],
Tick: payload.Tick,
OriginalTick: payload.OriginalTick,
TxHash: payload.Transfer.TxHash,
BlockHeight: payload.Transfer.BlockHeight,
TxIndex: payload.Transfer.TxIndex,
Timestamp: blockHeader.Timestamp,
FromPkScript: inscribeTransfer.PkScript,
FromSatPoint: inscribeTransfer.SatPoint,
FromInputIndex: payload.Transfer.FromInputIndex,
ToPkScript: payload.Transfer.NewPkScript,
ToSatPoint: payload.Transfer.NewSatPoint,
ToOutputIndex: payload.Transfer.NewSatPoint.OutPoint.Index,
SpentAsFee: false,
Amount: payload.Amt,
}
newEventTransferTransfers = append(newEventTransferTransfers, event)
eventHashBuilder.WriteString(getEventTransferTransferString(event, tickEntry.Decimals) + eventHashSeparator)
}
for _, payload := range payloads {
tickEntry := tickEntries[payload.Tick]
if payload.Transfer.SentAsFee && payload.Transfer.OldSatPoint == (ordinals.SatPoint{}) {
// skip inscriptions inscribed as fee
continue
}
switch payload.Op {
case brc20.OperationDeploy:
handleEventDeploy(payload, tickEntry)
case brc20.OperationMint:
handleEventMint(payload, tickEntry)
case brc20.OperationTransfer:
if payload.Transfer.TransferCount > 2 {
// skip used transfer inscriptions
continue
}
if tickEntry == nil {
// skip transfer inscriptions for non-existent ticks
continue
}
if -payload.Amt.Exponent() > int32(tickEntry.Decimals) {
// skip transfer inscriptions with decimals greater than allowed
continue
}
if payload.Transfer.OldSatPoint == (ordinals.SatPoint{}) {
handleEventInscribeTransfer(payload, tickEntry)
} else {
// transfer transfer event
inscribeTransfer, ok := eventInscribeTransfers[payload.Transfer.InscriptionId]
if !ok {
// skip transfer transfer event if prior inscribe transfer event does not exist
continue
}
if payload.Transfer.SentAsFee {
handleEventTransferTransferAsFee(payload, tickEntry, inscribeTransfer)
} else {
handleEventTransferTransferNormal(payload, tickEntry, inscribeTransfer)
}
}
}
}
p.newTickEntries = newTickEntries
p.newTickEntryStates = newTickEntryStates
p.newEventDeploys = newEventDeploys
p.newEventMints = newEventMints
p.newEventInscribeTransfers = newEventInscribeTransfers
p.newEventTransferTransfers = newEventTransferTransfers
p.newBalances = newBalances
p.eventHashString = eventHashBuilder.String()
return nil
}

View File

@@ -1,584 +0,0 @@
package brc20
import (
"context"
"encoding/json"
"slices"
"sync"
"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/samber/lo"
"golang.org/x/sync/errgroup"
)
func (p *Processor) processInscriptionTx(ctx context.Context, tx *types.Transaction, blockHeader types.BlockHeader, transfersInOutPoints map[wire.OutPoint]map[ordinals.SatPoint][]*entity.InscriptionTransfer, outpointValues map[wire.OutPoint]uint64) error {
ctx = logger.WithContext(ctx, slogx.String("tx_hash", tx.TxHash.String()))
envelopes := ordinals.ParseEnvelopesFromTx(tx)
inputOutPoints := lo.Map(tx.TxIn, func(txIn *types.TxIn, _ int) wire.OutPoint {
return wire.OutPoint{
Hash: txIn.PreviousOutTxHash,
Index: txIn.PreviousOutIndex,
}
})
// cache outpoint values for future blocks
for outIndex, txOut := range tx.TxOut {
outPoint := wire.OutPoint{
Hash: tx.TxHash,
Index: uint32(outIndex),
}
p.outPointValueCache.Add(outPoint, uint64(txOut.Value))
outpointValues[outPoint] = uint64(txOut.Value)
}
outPointsWithTransfers := lo.Keys(transfersInOutPoints)
txContainsTransfers := len(lo.Intersect(inputOutPoints, outPointsWithTransfers)) > 0
isCoinbase := tx.TxIn[0].PreviousOutTxHash.IsEqual(&chainhash.Hash{})
if len(envelopes) == 0 && !txContainsTransfers && !isCoinbase {
// no inscription activity, skip
return nil
}
floatingInscriptions := make([]*entity.Flotsam, 0)
totalInputValue := uint64(0)
totalOutputValue := lo.SumBy(tx.TxOut, func(txOut *types.TxOut) uint64 { return uint64(txOut.Value) })
inscribeOffsets := make(map[uint64]*struct {
inscriptionId ordinals.InscriptionId
count int
})
idCounter := uint32(0)
for i, input := range tx.TxIn {
// skip coinbase inputs since there can't be an inscription in coinbase
if input.PreviousOutTxHash.IsEqual(&chainhash.Hash{}) {
totalInputValue += p.getBlockSubsidy(uint64(tx.BlockHeight))
continue
}
inputOutPoint := wire.OutPoint{
Hash: input.PreviousOutTxHash,
Index: input.PreviousOutIndex,
}
inputValue := outpointValues[inputOutPoint]
transfersInOutPoint := transfersInOutPoints[inputOutPoint]
for satPoint, transfers := range transfersInOutPoint {
offset := totalInputValue + satPoint.Offset
for _, transfer := range transfers {
floatingInscriptions = append(floatingInscriptions, &entity.Flotsam{
Offset: offset,
InscriptionId: transfer.InscriptionId,
Tx: tx,
OriginOld: &entity.OriginOld{
OldSatPoint: satPoint,
Content: transfer.Content,
InputIndex: uint32(i),
},
})
if _, ok := inscribeOffsets[offset]; !ok {
inscribeOffsets[offset] = &struct {
inscriptionId ordinals.InscriptionId
count int
}{transfer.InscriptionId, 0}
}
inscribeOffsets[offset].count++
}
}
// offset on output to inscribe new inscriptions from this input
offset := totalInputValue
totalInputValue += inputValue
envelopesInInput := lo.Filter(envelopes, func(envelope *ordinals.Envelope, _ int) bool {
return envelope.InputIndex == uint32(i)
})
for _, envelope := range envelopesInInput {
inscriptionId := ordinals.InscriptionId{
TxHash: tx.TxHash,
Index: idCounter,
}
var cursed, cursedForBRC20 bool
if envelope.UnrecognizedEvenField || // unrecognized even field
envelope.DuplicateField || // duplicate field
envelope.IncompleteField || // incomplete field
envelope.InputIndex != 0 || // not first input
envelope.Offset != 0 || // not first envelope in input
envelope.Inscription.Pointer != nil || // contains pointer
envelope.PushNum || // contains pushnum opcodes
envelope.Stutter { // contains stuttering curse structure
cursed = true
cursedForBRC20 = true
}
if initial, ok := inscribeOffsets[offset]; !cursed && ok {
if initial.count > 1 {
cursed = true // reinscription
cursedForBRC20 = true
} else {
initialInscriptionEntry, err := p.getInscriptionEntryById(ctx, initial.inscriptionId)
if err != nil {
return errors.Wrapf(err, "failed to get inscription entry id %s", initial.inscriptionId)
}
if !initialInscriptionEntry.Cursed {
cursed = true // reinscription curse if initial inscription is not cursed
}
if !initialInscriptionEntry.CursedForBRC20 {
cursedForBRC20 = true
}
}
}
// inscriptions are no longer cursed after jubilee, but BRC20 still considers them as cursed
if cursed && uint64(tx.BlockHeight) >= ordinals.GetJubileeHeight(p.network) {
cursed = false
}
unbound := inputValue == 0 || envelope.UnrecognizedEvenField
if envelope.Inscription.Pointer != nil && *envelope.Inscription.Pointer < totalOutputValue {
offset = *envelope.Inscription.Pointer
}
floatingInscriptions = append(floatingInscriptions, &entity.Flotsam{
Offset: offset,
InscriptionId: inscriptionId,
Tx: tx,
OriginNew: &entity.OriginNew{
Reinscription: inscribeOffsets[offset] != nil,
Cursed: cursed,
CursedForBRC20: cursedForBRC20,
Fee: 0,
Hidden: false, // we don't care about this field for brc20
Parent: envelope.Inscription.Parent,
Pointer: envelope.Inscription.Pointer,
Unbound: unbound,
Inscription: envelope.Inscription,
},
})
if _, ok := inscribeOffsets[offset]; !ok {
inscribeOffsets[offset] = &struct {
inscriptionId ordinals.InscriptionId
count int
}{inscriptionId, 0}
}
inscribeOffsets[offset].count++
idCounter++
}
}
// parents must exist in floatingInscriptions to be valid
potentialParents := make(map[ordinals.InscriptionId]struct{})
for _, flotsam := range floatingInscriptions {
potentialParents[flotsam.InscriptionId] = struct{}{}
}
for _, flotsam := range floatingInscriptions {
if flotsam.OriginNew != nil && flotsam.OriginNew.Parent != nil {
if _, ok := potentialParents[*flotsam.OriginNew.Parent]; !ok {
// parent not found, ignore parent
flotsam.OriginNew.Parent = nil
}
}
}
// calculate fee for each new inscription
for _, flotsam := range floatingInscriptions {
if flotsam.OriginNew != nil {
flotsam.OriginNew.Fee = (totalInputValue - totalOutputValue) / uint64(idCounter)
}
}
// if tx is coinbase, add inscriptions sent as fee to outputs of this tx
ownInscriptionCount := len(floatingInscriptions)
if isCoinbase {
floatingInscriptions = append(floatingInscriptions, p.flotsamsSentAsFee...)
}
// sort floatingInscriptions by offset
slices.SortFunc(floatingInscriptions, func(i, j *entity.Flotsam) int {
return int(i.Offset) - int(j.Offset)
})
outputValue := uint64(0)
curIncrIdx := 0
// newLocations := make(map[ordinals.SatPoint][]*Flotsam)
type location struct {
satPoint ordinals.SatPoint
flotsam *entity.Flotsam
sentAsFee bool
}
newLocations := make([]*location, 0)
outputToSumValue := make([]uint64, 0, len(tx.TxOut))
for outIndex, txOut := range tx.TxOut {
end := outputValue + uint64(txOut.Value)
// process all inscriptions that are supposed to be inscribed in this output
for curIncrIdx < len(floatingInscriptions) && floatingInscriptions[curIncrIdx].Offset < end {
newSatPoint := ordinals.SatPoint{
OutPoint: wire.OutPoint{
Hash: tx.TxHash,
Index: uint32(outIndex),
},
Offset: floatingInscriptions[curIncrIdx].Offset - outputValue,
}
// newLocations[newSatPoint] = append(newLocations[newSatPoint], floatingInscriptions[curIncrIdx])
newLocations = append(newLocations, &location{
satPoint: newSatPoint,
flotsam: floatingInscriptions[curIncrIdx],
sentAsFee: isCoinbase && curIncrIdx >= ownInscriptionCount, // if curIncrIdx >= ownInscriptionCount, then current inscription came from p.flotSamsSentAsFee
})
curIncrIdx++
}
outputValue = end
outputToSumValue = append(outputToSumValue, outputValue)
}
for _, loc := range newLocations {
satPoint := loc.satPoint
flotsam := loc.flotsam
sentAsFee := loc.sentAsFee
// TODO: not sure if we still need to handle pointer here, it's already handled above.
if flotsam.OriginNew != nil && flotsam.OriginNew.Pointer != nil {
pointer := *flotsam.OriginNew.Pointer
for outIndex, outputValue := range outputToSumValue {
start := uint64(0)
if outIndex > 0 {
start = outputToSumValue[outIndex-1]
}
end := outputValue
if start <= pointer && pointer < end {
satPoint.Offset = pointer - start
break
}
}
}
if err := p.updateInscriptionLocation(ctx, satPoint, flotsam, sentAsFee, tx, blockHeader, transfersInOutPoints); err != nil {
return errors.Wrap(err, "failed to update inscription location")
}
}
// handle leftover flotsams (flotsams with offset over total output value) )
if isCoinbase {
// if there are leftover inscriptions in coinbase, they are lost permanently
for _, flotsam := range floatingInscriptions[curIncrIdx:] {
newSatPoint := ordinals.SatPoint{
OutPoint: wire.OutPoint{},
Offset: p.lostSats + flotsam.Offset - totalOutputValue,
}
if err := p.updateInscriptionLocation(ctx, newSatPoint, flotsam, false, tx, blockHeader, transfersInOutPoints); err != nil {
return errors.Wrap(err, "failed to update inscription location")
}
}
p.lostSats += p.blockReward - totalOutputValue
} else {
// if there are leftover inscriptions in non-coinbase tx, they are stored in p.flotsamsSentAsFee for processing in this block's coinbase tx
for _, flotsam := range floatingInscriptions[curIncrIdx:] {
flotsam.Offset = p.blockReward + flotsam.Offset - totalOutputValue
p.flotsamsSentAsFee = append(p.flotsamsSentAsFee, flotsam)
}
// add fees to block reward
p.blockReward = totalInputValue - totalOutputValue
}
return nil
}
func (p *Processor) updateInscriptionLocation(ctx context.Context, newSatPoint ordinals.SatPoint, flotsam *entity.Flotsam, sentAsFee bool, tx *types.Transaction, blockHeader types.BlockHeader, transfersInOutPoints map[wire.OutPoint]map[ordinals.SatPoint][]*entity.InscriptionTransfer) error {
txOut := tx.TxOut[newSatPoint.OutPoint.Index]
if flotsam.OriginOld != nil {
entry, err := p.getInscriptionEntryById(ctx, flotsam.InscriptionId)
if err != nil {
return errors.Wrapf(err, "failed to get inscription entry id %s", flotsam.InscriptionId)
}
entry.TransferCount++
transfer := &entity.InscriptionTransfer{
InscriptionId: flotsam.InscriptionId,
BlockHeight: uint64(flotsam.Tx.BlockHeight), // use flotsam's tx to track tx that initiated the transfer
TxIndex: flotsam.Tx.Index, // use flotsam's tx to track tx that initiated the transfer
TxHash: flotsam.Tx.TxHash,
Content: flotsam.OriginOld.Content,
FromInputIndex: flotsam.OriginOld.InputIndex,
OldSatPoint: flotsam.OriginOld.OldSatPoint,
NewSatPoint: newSatPoint,
NewPkScript: txOut.PkScript,
NewOutputValue: uint64(txOut.Value),
SentAsFee: sentAsFee,
TransferCount: entry.TransferCount,
}
// track transfers even if transfer count exceeds 2 (because we need to check for reinscriptions)
p.newInscriptionTransfers = append(p.newInscriptionTransfers, transfer)
p.newInscriptionEntryStates[entry.Id] = entry
// add new transfer to transfersInOutPoints cache
if _, ok := transfersInOutPoints[newSatPoint.OutPoint]; !ok {
transfersInOutPoints[newSatPoint.OutPoint] = make(map[ordinals.SatPoint][]*entity.InscriptionTransfer)
}
transfersInOutPoints[newSatPoint.OutPoint][newSatPoint] = append(transfersInOutPoints[newSatPoint.OutPoint][newSatPoint], transfer)
return nil
}
if flotsam.OriginNew != nil {
origin := flotsam.OriginNew
var inscriptionNumber int64
sequenceNumber := p.cursedInscriptionCount + p.blessedInscriptionCount
if origin.Cursed {
inscriptionNumber = -int64(p.cursedInscriptionCount + 1)
p.cursedInscriptionCount++
} else {
inscriptionNumber = int64(p.blessedInscriptionCount)
p.blessedInscriptionCount++
}
// if not valid brc20 inscription, delete content to save space
if !isBRC20Inscription(origin.Inscription) {
origin.Inscription.Content = nil
origin.Inscription.ContentType = ""
origin.Inscription.ContentEncoding = ""
}
transfer := &entity.InscriptionTransfer{
InscriptionId: flotsam.InscriptionId,
BlockHeight: uint64(flotsam.Tx.BlockHeight), // use flotsam's tx to track tx that initiated the transfer
TxIndex: flotsam.Tx.Index, // use flotsam's tx to track tx that initiated the transfer
TxHash: flotsam.Tx.TxHash,
Content: origin.Inscription.Content,
FromInputIndex: 0, // unused
OldSatPoint: ordinals.SatPoint{},
NewSatPoint: newSatPoint,
NewPkScript: txOut.PkScript,
NewOutputValue: uint64(txOut.Value),
SentAsFee: sentAsFee,
TransferCount: 1, // count inscription as first transfer
}
entry := &ordinals.InscriptionEntry{
Id: flotsam.InscriptionId,
Number: inscriptionNumber,
SequenceNumber: sequenceNumber,
Cursed: origin.Cursed,
CursedForBRC20: origin.CursedForBRC20,
CreatedAt: blockHeader.Timestamp,
CreatedAtHeight: uint64(blockHeader.Height),
Inscription: origin.Inscription,
TransferCount: 1, // count inscription as first transfer
}
p.newInscriptionTransfers = append(p.newInscriptionTransfers, transfer)
p.newInscriptionEntries[entry.Id] = entry
p.newInscriptionEntryStates[entry.Id] = entry
// add new transfer to transfersInOutPoints cache
if _, ok := transfersInOutPoints[newSatPoint.OutPoint]; !ok {
transfersInOutPoints[newSatPoint.OutPoint] = make(map[ordinals.SatPoint][]*entity.InscriptionTransfer)
}
transfersInOutPoints[newSatPoint.OutPoint][newSatPoint] = append(transfersInOutPoints[newSatPoint.OutPoint][newSatPoint], transfer)
return nil
}
panic("unreachable")
}
type brc20Inscription struct {
P string `json:"p"`
}
func isBRC20Inscription(inscription ordinals.Inscription) bool {
if inscription.ContentType != "application/json" && inscription.ContentType != "text/plain" {
return false
}
// attempt to parse content as json
if inscription.Content == nil {
return false
}
var parsed brc20Inscription
if err := json.Unmarshal(inscription.Content, &parsed); err != nil {
return false
}
if parsed.P != "brc-20" {
return false
}
return true
}
func (p *Processor) getOutPointValues(ctx context.Context, outPoints []wire.OutPoint) (map[wire.OutPoint]uint64, error) {
// try to get from cache if exists
cacheValues := p.outPointValueCache.MGet(outPoints)
result := make(map[wire.OutPoint]uint64)
outPointsToFetch := make([]wire.OutPoint, 0)
for i, outPoint := range outPoints {
if outPoint.Hash == (chainhash.Hash{}) {
// skip coinbase input
continue
}
if cacheValues[i] != 0 {
result[outPoint] = cacheValues[i]
} else {
outPointsToFetch = append(outPointsToFetch, outPoint)
}
}
eg, ectx := errgroup.WithContext(ctx)
txHashes := make(map[chainhash.Hash]struct{})
for _, outPoint := range outPointsToFetch {
txHashes[outPoint.Hash] = struct{}{}
}
txOutsByHash := make(map[chainhash.Hash][]*types.TxOut)
var mutex sync.Mutex
for txHash := range txHashes {
txHash := txHash
eg.Go(func() error {
txOuts, err := p.btcClient.GetTransactionOutputs(ectx, txHash)
if err != nil {
return errors.Wrapf(err, "failed to get transaction outputs for hash %s", txHash)
}
// update cache
mutex.Lock()
defer mutex.Unlock()
txOutsByHash[txHash] = txOuts
for i, txOut := range txOuts {
p.outPointValueCache.Add(wire.OutPoint{Hash: txHash, Index: uint32(i)}, uint64(txOut.Value))
}
return nil
})
}
if err := eg.Wait(); err != nil {
return nil, errors.WithStack(err)
}
for i := range outPoints {
if outPoints[i].Hash == (chainhash.Hash{}) {
// skip coinbase input
continue
}
if result[outPoints[i]] == 0 {
result[outPoints[i]] = uint64(txOutsByHash[outPoints[i].Hash][outPoints[i].Index].Value)
}
}
return result, nil
}
func (p *Processor) getInscriptionTransfersInOutPoints(ctx context.Context, outPoints []wire.OutPoint) (map[wire.OutPoint]map[ordinals.SatPoint][]*entity.InscriptionTransfer, error) {
// try to get from flush buffer if exists
result := make(map[wire.OutPoint]map[ordinals.SatPoint][]*entity.InscriptionTransfer)
outPointsToFetch := make([]wire.OutPoint, 0)
for _, outPoint := range outPoints {
var found bool
for _, transfer := range p.newInscriptionTransfers {
if transfer.NewSatPoint.OutPoint == outPoint {
found = true
if _, ok := result[outPoint]; !ok {
result[outPoint] = make(map[ordinals.SatPoint][]*entity.InscriptionTransfer)
}
result[outPoint][transfer.NewSatPoint] = append(result[outPoint][transfer.NewSatPoint], transfer)
}
}
if !found {
outPointsToFetch = append(outPointsToFetch, outPoint)
}
}
transfers, err := p.brc20Dg.GetInscriptionTransfersInOutPoints(ctx, outPointsToFetch)
if err != nil {
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
}
for satPoint, transferList := range transfers {
if _, ok := result[satPoint.OutPoint]; !ok {
result[satPoint.OutPoint] = make(map[ordinals.SatPoint][]*entity.InscriptionTransfer)
}
result[satPoint.OutPoint][satPoint] = append(result[satPoint.OutPoint][satPoint], transferList...)
}
return result, nil
}
func (p *Processor) getInscriptionEntryById(ctx context.Context, id ordinals.InscriptionId) (*ordinals.InscriptionEntry, error) {
inscriptions, err := p.getInscriptionEntriesByIds(ctx, []ordinals.InscriptionId{id})
if err != nil {
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
}
inscription, ok := inscriptions[id]
if !ok {
return nil, errors.Wrap(errs.NotFound, "inscription not found")
}
return inscription, nil
}
func (p *Processor) getInscriptionEntriesByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]*ordinals.InscriptionEntry, error) {
// try to get from cache if exists
result := make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
idsToFetch := make([]ordinals.InscriptionId, 0)
for _, id := range ids {
if inscriptionEntry, ok := p.newInscriptionEntryStates[id]; ok {
result[id] = inscriptionEntry
} else {
idsToFetch = append(idsToFetch, id)
}
}
if len(idsToFetch) > 0 {
inscriptions, err := p.brc20Dg.GetInscriptionEntriesByIds(ctx, idsToFetch)
if err != nil {
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
}
for id, inscription := range inscriptions {
result[id] = inscription
}
}
return result, nil
}
func (p *Processor) getInscriptionNumbersByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]int64, error) {
// try to get from cache if exists
result := make(map[ordinals.InscriptionId]int64)
idsToFetch := make([]ordinals.InscriptionId, 0)
for _, id := range ids {
if entry, ok := p.newInscriptionEntryStates[id]; ok {
result[id] = int64(entry.Number)
} else {
idsToFetch = append(idsToFetch, id)
}
}
if len(idsToFetch) > 0 {
inscriptions, err := p.brc20Dg.GetInscriptionNumbersByIds(ctx, idsToFetch)
if err != nil {
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
}
for id, number := range inscriptions {
result[id] = number
}
}
return result, nil
}
func (p *Processor) getInscriptionParentsByIds(ctx context.Context, ids []ordinals.InscriptionId) (map[ordinals.InscriptionId]ordinals.InscriptionId, error) {
// try to get from cache if exists
result := make(map[ordinals.InscriptionId]ordinals.InscriptionId)
idsToFetch := make([]ordinals.InscriptionId, 0)
for _, id := range ids {
if entry, ok := p.newInscriptionEntryStates[id]; ok {
if entry.Inscription.Parent != nil {
result[id] = *entry.Inscription.Parent
}
} else {
idsToFetch = append(idsToFetch, id)
}
}
if len(idsToFetch) > 0 {
inscriptions, err := p.brc20Dg.GetInscriptionParentsByIds(ctx, idsToFetch)
if err != nil {
return nil, errors.Wrap(err, "failed to get inscriptions by outpoint")
}
for id, parent := range inscriptions {
result[id] = parent
}
}
return result, nil
}
func (p *Processor) getBlockSubsidy(blockHeight uint64) uint64 {
return uint64(blockchain.CalcBlockSubsidy(int32(blockHeight), p.network.ChainParams()))
}

View File

@@ -1,271 +0,0 @@
package brc20
import (
"context"
"crypto/sha256"
"encoding/hex"
"slices"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/brc20/internal/entity"
"github.com/gaze-network/indexer-network/modules/brc20/internal/ordinals"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/samber/lo"
)
// Process implements indexer.Processor.
func (p *Processor) Process(ctx context.Context, blocks []*types.Block) error {
for _, block := range blocks {
ctx = logger.WithContext(ctx, slogx.Uint64("height", uint64(block.Header.Height)))
p.blockReward = p.getBlockSubsidy(uint64(block.Header.Height))
p.flotsamsSentAsFee = make([]*entity.Flotsam, 0)
// put coinbase tx (first tx) at the end of block
transactions := append(block.Transactions[1:], block.Transactions[0])
var inputOutPoints []wire.OutPoint
for _, tx := range transactions {
for _, txIn := range tx.TxIn {
if txIn.PreviousOutTxHash == (chainhash.Hash{}) {
// skip coinbase input
continue
}
inputOutPoints = append(inputOutPoints, wire.OutPoint{
Hash: txIn.PreviousOutTxHash,
Index: txIn.PreviousOutIndex,
})
}
}
// pre-fetch inscriptions in outpoints
transfersInOutPoints, err := p.getInscriptionTransfersInOutPoints(ctx, inputOutPoints)
if err != nil {
return errors.Wrap(err, "failed to get inscriptions in outpoints")
}
// pre-fetch outpoint values for transactions with inscriptions/envelopes
outPointsToFetchValues := make([]wire.OutPoint, 0)
for _, tx := range transactions {
txInputOutPoints := lo.Map(tx.TxIn, func(txIn *types.TxIn, _ int) wire.OutPoint {
return wire.OutPoint{
Hash: txIn.PreviousOutTxHash,
Index: txIn.PreviousOutIndex,
}
})
envelopes := ordinals.ParseEnvelopesFromTx(tx)
outPointsWithTransfers := lo.Keys(transfersInOutPoints)
txContainsTransfers := len(lo.Intersect(txInputOutPoints, outPointsWithTransfers)) > 0
isCoinbase := tx.TxIn[0].PreviousOutTxHash.IsEqual(&chainhash.Hash{})
if len(envelopes) == 0 && !txContainsTransfers && !isCoinbase {
// no inscription activity, skip tx
continue
}
outPointsToFetchValues = append(outPointsToFetchValues, lo.Map(tx.TxIn, func(txIn *types.TxIn, _ int) wire.OutPoint {
return wire.OutPoint{
Hash: txIn.PreviousOutTxHash,
Index: txIn.PreviousOutIndex,
}
})...)
}
outPointValues, err := p.getOutPointValues(ctx, outPointsToFetchValues)
if err != nil {
return errors.Wrap(err, "failed to get input values")
}
for _, tx := range transactions {
if err := p.processInscriptionTx(ctx, tx, block.Header, transfersInOutPoints, outPointValues); err != nil {
return errors.Wrap(err, "failed to process tx")
}
}
// sort transfers by tx index, output index, output sat offset
// NOTE: ord indexes inscription transfers spent as fee at the end of the block, but brc20 indexes them as soon as they are sent
slices.SortFunc(p.newInscriptionTransfers, func(t1, t2 *entity.InscriptionTransfer) int {
if t1.TxIndex != t2.TxIndex {
return int(t1.TxIndex) - int(t2.TxIndex)
}
if t1.SentAsFee != t2.SentAsFee {
// transfers sent as fee should be ordered after non-fees
if t1.SentAsFee {
return 1
}
return -1
}
if t1.NewSatPoint.OutPoint.Index != t2.NewSatPoint.OutPoint.Index {
return int(t1.NewSatPoint.OutPoint.Index) - int(t2.NewSatPoint.OutPoint.Index)
}
return int(t1.NewSatPoint.Offset) - int(t2.NewSatPoint.Offset)
})
if err := p.processBRC20States(ctx, p.newInscriptionTransfers, block.Header); err != nil {
return errors.Wrap(err, "failed to process brc20 states")
}
if err := p.flushBlock(ctx, block.Header); err != nil {
return errors.Wrap(err, "failed to flush block")
}
logger.DebugContext(ctx, "Inserted new block")
}
return nil
}
func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeader) error {
brc20DgTx, err := p.brc20Dg.BeginBRC20Tx(ctx)
if err != nil {
return errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if err := brc20DgTx.Rollback(ctx); err != nil {
logger.WarnContext(ctx, "failed to rollback transaction",
slogx.Error(err),
slogx.String("event", "rollback_brc20_insertion"),
)
}
}()
blockHeight := uint64(blockHeader.Height)
// calculate event hash
{
eventHashString := p.eventHashString
if len(eventHashString) > 0 && eventHashString[len(eventHashString)-1:] == eventHashSeparator {
eventHashString = eventHashString[:len(eventHashString)-1]
}
eventHash := sha256.Sum256([]byte(eventHashString))
prevIndexedBlock, err := brc20DgTx.GetIndexedBlockByHeight(ctx, blockHeader.Height-1)
if err != nil && errors.Is(err, errs.NotFound) && blockHeader.Height-1 == startingBlockHeader[p.network].Height {
prevIndexedBlock = &entity.IndexedBlock{
Height: uint64(startingBlockHeader[p.network].Height),
Hash: startingBlockHeader[p.network].Hash,
EventHash: []byte{},
CumulativeEventHash: []byte{},
}
err = nil
}
if err != nil {
return errors.Wrap(err, "failed to get previous indexed block")
}
var cumulativeEventHash [32]byte
if len(prevIndexedBlock.CumulativeEventHash) == 0 {
cumulativeEventHash = eventHash
} else {
cumulativeEventHash = sha256.Sum256([]byte(hex.EncodeToString(prevIndexedBlock.CumulativeEventHash[:]) + hex.EncodeToString(eventHash[:])))
}
if err := brc20DgTx.CreateIndexedBlock(ctx, &entity.IndexedBlock{
Height: blockHeight,
Hash: blockHeader.Hash,
EventHash: eventHash[:],
CumulativeEventHash: cumulativeEventHash[:],
}); err != nil {
return errors.Wrap(err, "failed to create indexed block")
}
p.eventHashString = ""
}
// flush new inscription entries
{
newInscriptionEntries := lo.Values(p.newInscriptionEntries)
if err := brc20DgTx.CreateInscriptionEntries(ctx, blockHeight, newInscriptionEntries); err != nil {
return errors.Wrap(err, "failed to create inscription entries")
}
p.newInscriptionEntries = make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
}
// flush new inscription entry states
{
newInscriptionEntryStates := lo.Values(p.newInscriptionEntryStates)
if err := brc20DgTx.CreateInscriptionEntryStates(ctx, blockHeight, newInscriptionEntryStates); err != nil {
return errors.Wrap(err, "failed to create inscription entry states")
}
p.newInscriptionEntryStates = make(map[ordinals.InscriptionId]*ordinals.InscriptionEntry)
}
// flush new inscription entry states
{
if err := brc20DgTx.CreateInscriptionTransfers(ctx, p.newInscriptionTransfers); err != nil {
return errors.Wrap(err, "failed to create inscription transfers")
}
p.newInscriptionTransfers = make([]*entity.InscriptionTransfer, 0)
}
// flush processor stats
{
stats := &entity.ProcessorStats{
BlockHeight: blockHeight,
CursedInscriptionCount: p.cursedInscriptionCount,
BlessedInscriptionCount: p.blessedInscriptionCount,
LostSats: p.lostSats,
}
if err := brc20DgTx.CreateProcessorStats(ctx, stats); err != nil {
return errors.Wrap(err, "failed to create processor stats")
}
}
// newTickEntries map[string]*entity.TickEntry
// newTickEntryStates map[string]*entity.TickEntry
// newEventDeploys []*entity.EventDeploy
// newEventMints []*entity.EventMint
// newEventInscribeTransfers []*entity.EventInscribeTransfer
// newEventTransferTransfers []*entity.EventTransferTransfer
// newBalances map[string]map[string]*entity.Balance
// flush new tick entries
{
newTickEntries := lo.Values(p.newTickEntries)
if err := brc20DgTx.CreateTickEntries(ctx, blockHeight, newTickEntries); err != nil {
return errors.Wrap(err, "failed to create tick entries")
}
p.newTickEntries = make(map[string]*entity.TickEntry)
}
// flush new tick entry states
{
newTickEntryStates := lo.Values(p.newTickEntryStates)
if err := brc20DgTx.CreateTickEntryStates(ctx, blockHeight, newTickEntryStates); err != nil {
return errors.Wrap(err, "failed to create tick entry states")
}
p.newTickEntryStates = make(map[string]*entity.TickEntry)
}
// flush new events
{
if err := brc20DgTx.CreateEventDeploys(ctx, p.newEventDeploys); err != nil {
return errors.Wrap(err, "failed to create event deploys")
}
if err := brc20DgTx.CreateEventMints(ctx, p.newEventMints); err != nil {
return errors.Wrap(err, "failed to create event mints")
}
if err := brc20DgTx.CreateEventInscribeTransfers(ctx, p.newEventInscribeTransfers); err != nil {
return errors.Wrap(err, "failed to create event inscribe transfers")
}
if err := brc20DgTx.CreateEventTransferTransfers(ctx, p.newEventTransferTransfers); err != nil {
return errors.Wrap(err, "failed to create event transfer transfers")
}
p.newEventDeploys = make([]*entity.EventDeploy, 0)
p.newEventMints = make([]*entity.EventMint, 0)
p.newEventInscribeTransfers = make([]*entity.EventInscribeTransfer, 0)
p.newEventTransferTransfers = make([]*entity.EventTransferTransfer, 0)
}
// flush new balances
{
newBalances := make([]*entity.Balance, 0)
for _, tickBalances := range p.newBalances {
for _, balance := range tickBalances {
newBalances = append(newBalances, balance)
}
}
if err := brc20DgTx.CreateBalances(ctx, newBalances); err != nil {
return errors.Wrap(err, "failed to create balances")
}
p.newBalances = make(map[string]map[string]*entity.Balance)
}
if err := brc20DgTx.Commit(ctx); err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
return nil
}

View File

@@ -31,6 +31,7 @@ type Processor struct {
bitcoinClient btcclient.Contract
network common.Network
reportingClient *reportingclient.ReportingClient
cleanupFuncs []func(context.Context) error
newRuneEntries map[runes.RuneId]*runes.RuneEntry
newRuneEntryStates map[runes.RuneId]*runes.RuneEntry
@@ -40,13 +41,14 @@ type Processor struct {
newRuneTxs []*entity.RuneTransaction
}
func NewProcessor(runesDg datagateway.RunesDataGateway, indexerInfoDg datagateway.IndexerInfoDataGateway, bitcoinClient btcclient.Contract, network common.Network, reportingClient *reportingclient.ReportingClient) *Processor {
func NewProcessor(runesDg datagateway.RunesDataGateway, indexerInfoDg datagateway.IndexerInfoDataGateway, bitcoinClient btcclient.Contract, network common.Network, reportingClient *reportingclient.ReportingClient, cleanupFuncs []func(context.Context) error) *Processor {
return &Processor{
runesDg: runesDg,
indexerInfoDg: indexerInfoDg,
bitcoinClient: bitcoinClient,
network: network,
reportingClient: reportingClient,
cleanupFuncs: cleanupFuncs,
newRuneEntries: make(map[runes.RuneId]*runes.RuneEntry),
newRuneEntryStates: make(map[runes.RuneId]*runes.RuneEntry),
newOutPointBalances: make(map[wire.OutPoint][]*entity.OutPointBalance),
@@ -228,3 +230,13 @@ func (p *Processor) RevertData(ctx context.Context, from int64) error {
}
return nil
}
func (p *Processor) Shutdown(ctx context.Context) error {
var errs []error
for _, cleanup := range p.cleanupFuncs {
if err := cleanup(ctx); err != nil {
errs = append(errs, err)
}
}
return errors.WithStack(errors.Join(errs...))
}

View File

@@ -33,6 +33,7 @@ func New(injector do.Injector) (indexer.IndexerWorker, error) {
runesDg runesdatagateway.RunesDataGateway
indexerInfoDg runesdatagateway.IndexerInfoDataGateway
)
var cleanupFuncs []func(context.Context) error
switch strings.ToLower(conf.Modules.Runes.Database) {
case "postgresql", "postgres", "pg":
pg, err := postgres.NewPool(ctx, conf.Modules.Runes.Postgres)
@@ -42,7 +43,10 @@ func New(injector do.Injector) (indexer.IndexerWorker, error) {
}
return nil, errors.Wrap(err, "can't create Postgres connection pool")
}
defer pg.Close()
cleanupFuncs = append(cleanupFuncs, func(ctx context.Context) error {
pg.Close()
return nil
})
runesRepo := runespostgres.NewRepository(pg)
runesDg = runesRepo
indexerInfoDg = runesRepo
@@ -62,7 +66,7 @@ func New(injector do.Injector) (indexer.IndexerWorker, error) {
return nil, errors.Wrapf(errs.Unsupported, "%q datasource is not supported", conf.Modules.Runes.Datasource)
}
processor := NewProcessor(runesDg, indexerInfoDg, bitcoinClient, conf.Network, reportingClient)
processor := NewProcessor(runesDg, indexerInfoDg, bitcoinClient, conf.Network, reportingClient, cleanupFuncs)
if err := processor.VerifyStates(ctx); err != nil {
return nil, errors.WithStack(err)
}

View File

@@ -1,6 +1,7 @@
package runes
import (
"fmt"
"log"
"slices"
"unicode/utf8"
@@ -334,6 +335,7 @@ func runestonePayloadFromTx(tx *types.Transaction) ([]byte, Flaws) {
continue
}
if err := tokenizer.Err(); err != nil {
fmt.Println(err.Error())
continue
}
if opCode := tokenizer.Opcode(); opCode != RUNESTONE_PAYLOAD_MAGIC_NUMBER {

View File

@@ -9,5 +9,4 @@ import (
type Contract interface {
GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error)
GetTransactionOutputs(ctx context.Context, txHash chainhash.Hash) ([]*types.TxOut, error)
}

203
pkg/btcutils/address.go Normal file
View File

@@ -0,0 +1,203 @@
package btcutils
import (
"encoding/json"
"reflect"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
)
// IsAddress returns whether or not the passed string is a valid bitcoin address and valid supported type.
//
// NetParams is optional. If provided, we only check for that network,
// otherwise, we check for all supported networks.
func IsAddress(address string, defaultNet ...*chaincfg.Params) bool {
if len(address) == 0 {
return false
}
// If defaultNet is provided, we only check for that network.
net, ok := utils.Optional(defaultNet)
if ok {
_, _, err := parseAddress(address, net)
return err == nil
}
// Otherwise, we check for all supported networks.
for _, net := range supportedNetworks {
_, _, err := parseAddress(address, net)
if err == nil {
return true
}
}
return false
}
// TODO: create GetAddressNetwork
// check `Bech32HRPSegwit` prefix or netID for P2SH/P2PKH is equal to `PubKeyHashAddrID/ScriptHashAddrID`
// GetAddressType returns the address type of the passed address.
func GetAddressType(address string, net *chaincfg.Params) (AddressType, error) {
_, addrType, err := parseAddress(address, net)
return addrType, errors.WithStack(err)
}
type Address struct {
decoded btcutil.Address
net *chaincfg.Params
encoded string
encodedType AddressType
scriptPubKey []byte
}
// NewAddress creates a new address from the given address string.
//
// defaultNet is required if your address is P2SH or P2PKH (legacy or nested segwit)
// If your address is P2WSH, P2WPKH or P2TR, defaultNet is not required.
func NewAddress(address string, defaultNet ...*chaincfg.Params) Address {
addr, err := SafeNewAddress(address, defaultNet...)
if err != nil {
logger.Panic("can't create parse address", slogx.Error(err), slogx.String("package", "btcutils"))
}
return addr
}
// SafeNewAddress creates a new address from the given address string.
// It returns an error if the address is invalid.
//
// defaultNet is required if your address is P2SH or P2PKH (legacy or nested segwit)
// If your address is P2WSH, P2WPKH or P2TR, defaultNet is not required.
func SafeNewAddress(address string, defaultNet ...*chaincfg.Params) (Address, error) {
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
decoded, addrType, err := parseAddress(address, net)
if err != nil {
return Address{}, errors.Wrap(err, "can't parse address")
}
scriptPubkey, err := txscript.PayToAddrScript(decoded)
if err != nil {
return Address{}, errors.Wrap(err, "can't get script pubkey")
}
return Address{
decoded: decoded,
net: net,
encoded: decoded.EncodeAddress(),
encodedType: addrType,
scriptPubKey: scriptPubkey,
}, nil
}
// String returns the address string.
func (a Address) String() string {
return a.encoded
}
// Type returns the address type.
func (a Address) Type() AddressType {
return a.encodedType
}
// Decoded returns the btcutil.Address
func (a Address) Decoded() btcutil.Address {
return a.decoded
}
// IsForNet returns whether or not the address is associated with the passed bitcoin network.
func (a Address) IsForNet(net *chaincfg.Params) bool {
return a.decoded.IsForNet(net)
}
// ScriptAddress returns the raw bytes of the address to be used when inserting the address into a txout's script.
func (a Address) ScriptAddress() []byte {
return a.decoded.ScriptAddress()
}
// Net returns the address network params.
func (a Address) Net() *chaincfg.Params {
return a.net
}
// NetworkName
func (a Address) NetworkName() string {
return a.net.Name
}
// ScriptPubKey or pubkey script
func (a Address) ScriptPubKey() []byte {
return a.scriptPubKey
}
// Equal return true if addresses are equal
func (a Address) Equal(b Address) bool {
return a.encoded == b.encoded
}
// MarshalText implements the encoding.TextMarshaler interface.
func (a Address) MarshalText() ([]byte, error) {
return []byte(a.encoded), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (a *Address) UnmarshalText(input []byte) error {
address := string(input)
addr, err := SafeNewAddress(address)
if err == nil {
*a = addr
return nil
}
return errors.Wrapf(errs.InvalidArgument, "invalid address `%s`", address)
}
// MarshalJSON implements the json.Marshaler interface.
func (a Address) MarshalJSON() ([]byte, error) {
t, err := a.MarshalText()
if err != nil {
return nil, &json.MarshalerError{Type: reflect.TypeOf(a), Err: err}
}
b := make([]byte, len(t)+2)
b[0], b[len(b)-1] = '"', '"' // add quotes
copy(b[1:], t)
return b, nil
}
// UnmarshalJSON parses a hash in hex syntax.
func (a *Address) UnmarshalJSON(input []byte) error {
if !(len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"') {
return &json.UnmarshalTypeError{Value: "non-string", Type: reflect.TypeOf(Address{})}
}
if err := a.UnmarshalText(input[1 : len(input)-1]); err != nil {
return err
}
return nil
}
func parseAddress(address string, params *chaincfg.Params) (btcutil.Address, AddressType, error) {
decoded, err := btcutil.DecodeAddress(address, params)
if err != nil {
return nil, 0, errors.Wrapf(err, "can't decode address `%s` for network `%s`", address, params.Name)
}
switch decoded.(type) {
case *btcutil.AddressWitnessPubKeyHash:
return decoded, AddressP2WPKH, nil
case *btcutil.AddressTaproot:
return decoded, AddressP2TR, nil
case *btcutil.AddressScriptHash:
return decoded, AddressP2SH, nil
case *btcutil.AddressPubKeyHash:
return decoded, AddressP2PKH, nil
case *btcutil.AddressWitnessScriptHash:
return decoded, AddressP2WSH, nil
default:
return nil, 0, errors.Wrap(errs.Unsupported, "unsupported address type")
}
}

View File

@@ -0,0 +1,80 @@
package btcutils_test
import (
"testing"
"github.com/btcsuite/btcd/chaincfg"
"github.com/gaze-network/indexer-network/pkg/btcutils"
)
/*
NOTE:
# Compare this benchmark to go-ethereum/common.Address utils
- go-ethereum/common.HexToAddress speed: 45 ns/op, 48 B/op, 1 allocs/op
- go-ethereum/common.IsHexAddress speed: 25 ns/op, 0 B/op, 0 allocs/op
It's slower than go-ethereum/common.Address utils because ethereum wallet address is Hex string 20 bytes,
but Bitcoin has many types of address and each type has complex algorithm to solve (can't solve and validate address type directly from address string)
20/Jan/2024 @Planxnx Macbook Air M1 16GB
BenchmarkIsAddress/specific-network/mainnet/P2WPKH-8 1776146 625.6 ns/op 120 B/op 3 allocs/op
BenchmarkIsAddress/specific-network/testnet3/P2WPKH-8 1917876 623.2 ns/op 120 B/op 3 allocs/op
BenchmarkIsAddress/specific-network/mainnet/P2TR-8 1330348 915.4 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/specific-network/testnet3/P2TR-8 1235806 931.1 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/specific-network/mainnet/P2WSH-8 1261730 960.9 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/specific-network/testnet3/P2WSH-8 1307851 916.1 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/specific-network/mainnet/P2SH-8 3081762 402.0 ns/op 192 B/op 8 allocs/op
BenchmarkIsAddress/specific-network/testnet3/P2SH-8 3245838 344.9 ns/op 176 B/op 7 allocs/op
BenchmarkIsAddress/specific-network/mainnet/P2PKH-8 2904252 410.4 ns/op 184 B/op 8 allocs/op
BenchmarkIsAddress/specific-network/testnet3/P2PKH-8 3522332 342.8 ns/op 176 B/op 7 allocs/op
BenchmarkIsAddress/automate-network/mainnet/P2WPKH-8 1882059 637.6 ns/op 120 B/op 3 allocs/op
BenchmarkIsAddress/automate-network/testnet3/P2WPKH-8 1626151 664.8 ns/op 120 B/op 3 allocs/op
BenchmarkIsAddress/automate-network/mainnet/P2TR-8 1250253 952.1 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/automate-network/testnet3/P2TR-8 1257901 993.7 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/automate-network/mainnet/P2WSH-8 1000000 1005 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/automate-network/testnet3/P2WSH-8 1209108 971.2 ns/op 160 B/op 3 allocs/op
BenchmarkIsAddress/automate-network/mainnet/P2SH-8 1869075 625.0 ns/op 268 B/op 9 allocs/op
BenchmarkIsAddress/automate-network/testnet3/P2SH-8 779496 1609 ns/op 694 B/op 17 allocs/op
BenchmarkIsAddress/automate-network/mainnet/P2PKH-8 1924058 650.6 ns/op 259 B/op 9 allocs/op
BenchmarkIsAddress/automate-network/testnet3/P2PKH-8 721510 1690 ns/op 694 B/op 17 allocs/op
*/
func BenchmarkIsAddress(b *testing.B) {
cases := []btcutils.Address{
/* P2WPKH */ btcutils.NewAddress("bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh", &chaincfg.MainNetParams),
/* P2WPKH */ btcutils.NewAddress("tb1qfpgdxtpl7kz5qdus2pmexyjaza99c28qd6ltey", &chaincfg.TestNet3Params),
/* P2TR */ btcutils.NewAddress("bc1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qvz5d38", &chaincfg.MainNetParams),
/* P2TR */ btcutils.NewAddress("tb1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qm2zztg", &chaincfg.TestNet3Params),
/* P2WSH */ btcutils.NewAddress("bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak", &chaincfg.MainNetParams),
/* P2WSH */ btcutils.NewAddress("tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7", &chaincfg.TestNet3Params),
/* P2SH */ btcutils.NewAddress("3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw", &chaincfg.MainNetParams),
/* P2SH */ btcutils.NewAddress("2NCxMvHPTduZcCuUeAiWUpuwHga7Y66y9XJ", &chaincfg.TestNet3Params),
/* P2PKH */ btcutils.NewAddress("1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH", &chaincfg.MainNetParams),
/* P2PKH */ btcutils.NewAddress("migbBPcDajPfffrhoLpYFTQNXQFbWbhpz3", &chaincfg.TestNet3Params),
}
b.Run("specific-network", func(b *testing.B) {
for _, c := range cases {
b.Run(c.NetworkName()+"/"+c.Type().String(), func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = btcutils.IsAddress(c.String(), c.Net())
}
})
}
})
b.Run("automate-network", func(b *testing.B) {
for _, c := range cases {
b.Run(c.NetworkName()+"/"+c.Type().String(), func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
ok := btcutils.IsAddress(c.String())
if !ok {
b.Error("IsAddress returned false")
}
}
})
}
})
}

View File

@@ -0,0 +1,363 @@
package btcutils_test
import (
"encoding/json"
"fmt"
"testing"
"github.com/btcsuite/btcd/chaincfg"
"github.com/gaze-network/indexer-network/pkg/btcutils"
"github.com/stretchr/testify/assert"
)
func TestGetAddressType(t *testing.T) {
type Spec struct {
Address string
DefaultNet *chaincfg.Params
ExpectedError error
ExpectedAddressType btcutils.AddressType
}
specs := []Spec{
{
Address: "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2WPKH,
},
{
Address: "tb1qfpgdxtpl7kz5qdus2pmexyjaza99c28qd6ltey",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2WPKH,
},
{
Address: "bc1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qvz5d38",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2TR,
},
{
Address: "tb1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qm2zztg",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2TR,
},
{
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2SH,
},
{
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2PKH,
},
{
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2WSH,
},
{
Address: "migbBPcDajPfffrhoLpYFTQNXQFbWbhpz3",
DefaultNet: &chaincfg.TestNet3Params,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2PKH,
},
{
Address: "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2WSH,
},
{
Address: "2NCxMvHPTduZcCuUeAiWUpuwHga7Y66y9XJ",
DefaultNet: &chaincfg.TestNet3Params,
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2SH,
},
}
for _, spec := range specs {
t.Run(fmt.Sprintf("address:%s", spec.Address), func(t *testing.T) {
actualAddressType, actualError := btcutils.GetAddressType(spec.Address, spec.DefaultNet)
if spec.ExpectedError != nil {
assert.ErrorIs(t, actualError, spec.ExpectedError)
} else {
assert.Equal(t, spec.ExpectedAddressType, actualAddressType)
}
})
}
}
func TestNewAddress(t *testing.T) {
type Spec struct {
Address string
DefaultNet *chaincfg.Params
ExpectedAddressType btcutils.AddressType
}
specs := []Spec{
{
Address: "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh",
// DefaultNet: &chaincfg.MainNetParams, // Optional
ExpectedAddressType: btcutils.AddressP2WPKH,
},
{
Address: "tb1qfpgdxtpl7kz5qdus2pmexyjaza99c28qd6ltey",
// DefaultNet: &chaincfg.MainNetParams, // Optional
ExpectedAddressType: btcutils.AddressP2WPKH,
},
{
Address: "bc1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qvz5d38",
// DefaultNet: &chaincfg.MainNetParams, // Optional
ExpectedAddressType: btcutils.AddressP2TR,
},
{
Address: "tb1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qm2zztg",
// DefaultNet: &chaincfg.MainNetParams, // Optional
ExpectedAddressType: btcutils.AddressP2TR,
},
{
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
// DefaultNet: &chaincfg.MainNetParams, // Optional
ExpectedAddressType: btcutils.AddressP2WSH,
},
{
Address: "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
// DefaultNet: &chaincfg.MainNetParams, // Optional
ExpectedAddressType: btcutils.AddressP2WSH,
},
{
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
DefaultNet: &chaincfg.MainNetParams,
ExpectedAddressType: btcutils.AddressP2SH,
},
{
Address: "2NCxMvHPTduZcCuUeAiWUpuwHga7Y66y9XJ",
DefaultNet: &chaincfg.TestNet3Params,
ExpectedAddressType: btcutils.AddressP2SH,
},
{
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
DefaultNet: &chaincfg.MainNetParams,
ExpectedAddressType: btcutils.AddressP2PKH,
},
{
Address: "migbBPcDajPfffrhoLpYFTQNXQFbWbhpz3",
DefaultNet: &chaincfg.TestNet3Params,
ExpectedAddressType: btcutils.AddressP2PKH,
},
}
for _, spec := range specs {
t.Run(fmt.Sprintf("address:%s,type:%s", spec.Address, spec.ExpectedAddressType), func(t *testing.T) {
addr := btcutils.NewAddress(spec.Address, spec.DefaultNet)
assert.Equal(t, spec.ExpectedAddressType, addr.Type())
assert.Equal(t, spec.Address, addr.String())
})
}
}
func TestIsAddress(t *testing.T) {
type Spec struct {
Address string
Expected bool
}
specs := []Spec{
{
Address: "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh",
Expected: true,
},
{
Address: "tb1qfpgdxtpl7kz5qdus2pmexyjaza99c28qd6ltey",
Expected: true,
},
{
Address: "bc1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qvz5d38",
Expected: true,
},
{
Address: "tb1p7h87kqsmpzatddzhdhuy9gmxdpvn5kvar6hhqlgau8d2ffa0pa3qm2zztg",
Expected: true,
},
{
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
Expected: true,
},
{
Address: "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
Expected: true,
},
{
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
Expected: true,
},
{
Address: "2NCxMvHPTduZcCuUeAiWUpuwHga7Y66y9XJ",
Expected: true,
},
{
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
Expected: true,
},
{
Address: "migbBPcDajPfffrhoLpYFTQNXQFbWbhpz3",
Expected: true,
},
{
Address: "",
Expected: false,
},
{
Address: "migbBPcDajPfffrhoLpYFTQNXQFbWbhpz2",
Expected: false,
},
{
Address: "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczz",
Expected: false,
},
}
for _, spec := range specs {
t.Run(fmt.Sprintf("address:%s", spec.Address), func(t *testing.T) {
ok := btcutils.IsAddress(spec.Address)
assert.Equal(t, spec.Expected, ok)
})
}
}
func TestAddressEncoding(t *testing.T) {
rawAddress := "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh"
address := btcutils.NewAddress(rawAddress, &chaincfg.MainNetParams)
type Spec struct {
Data interface{}
Expected string
}
specs := []Spec{
{
Data: address,
Expected: fmt.Sprintf(`"%s"`, rawAddress),
},
{
Data: map[string]interface{}{
"address": rawAddress,
},
Expected: fmt.Sprintf(`{"address":"%s"}`, rawAddress),
},
}
for i, spec := range specs {
t.Run(fmt.Sprint(i+1), func(t *testing.T) {
actual, err := json.Marshal(spec.Data)
assert.NoError(t, err)
assert.Equal(t, spec.Expected, string(actual))
})
}
}
func TestAddressDecoding(t *testing.T) {
rawAddress := "bc1qfpgdxtpl7kz5qdus2pmexyjaza99c28q8uyczh"
address := btcutils.NewAddress(rawAddress, &chaincfg.MainNetParams)
// Case #1: address is a string
t.Run("from_string", func(t *testing.T) {
input := fmt.Sprintf(`"%s"`, rawAddress)
expected := address
actual := btcutils.Address{}
err := json.Unmarshal([]byte(input), &actual)
if !assert.NoError(t, err) {
t.FailNow()
}
assert.Equal(t, expected, actual)
})
// Case #2: address is a field of a struct
t.Run("from_field_string", func(t *testing.T) {
type Data struct {
Address btcutils.Address `json:"address"`
}
input := fmt.Sprintf(`{"address":"%s"}`, rawAddress)
expected := Data{Address: address}
actual := Data{}
err := json.Unmarshal([]byte(input), &actual)
if !assert.NoError(t, err) {
t.FailNow()
}
assert.Equal(t, expected, actual)
})
// Case #3: address is an element of an array
t.Run("from_array", func(t *testing.T) {
input := fmt.Sprintf(`["%s"]`, rawAddress)
expected := []btcutils.Address{address}
actual := []btcutils.Address{}
err := json.Unmarshal([]byte(input), &actual)
if !assert.NoError(t, err) {
t.FailNow()
}
assert.Equal(t, expected, actual)
})
// Case #4: not supported address type
t.Run("from_string/not_address", func(t *testing.T) {
input := fmt.Sprintf(`"%s"`, "THIS_IS_NOT_SUPPORTED_ADDRESS")
actual := btcutils.Address{}
err := json.Unmarshal([]byte(input), &actual)
assert.Error(t, err)
})
// Case #5: invalid field type
t.Run("from_number", func(t *testing.T) {
type Data struct {
Address btcutils.Address `json:"address"`
}
input := fmt.Sprintf(`{"address":%d}`, 123)
actual := Data{}
err := json.Unmarshal([]byte(input), &actual)
assert.Error(t, err)
})
}

44
pkg/btcutils/btc.go Normal file
View File

@@ -0,0 +1,44 @@
package btcutils
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
)
var (
// NullAddress is an address that script address is all zeros.
NullAddress = NewAddress("1111111111111111111114oLvT2", &chaincfg.MainNetParams)
// NullHash is a hash that all bytes are zero.
NullHash = utils.Must(chainhash.NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000"))
)
// TransactionType is the type of bitcoin transaction
// It's an alias of txscript.ScriptClass
type TransactionType = txscript.ScriptClass
// AddressType is the type of bitcoin address.
// It's an alias of txscript.ScriptClass
type AddressType = txscript.ScriptClass
// Types of bitcoin transaction
const (
TransactionP2WPKH = txscript.WitnessV0PubKeyHashTy
TransactionP2TR = txscript.WitnessV1TaprootTy
TransactionTaproot = TransactionP2TR // Alias of P2TR
TransactionP2SH = txscript.ScriptHashTy
TransactionP2PKH = txscript.PubKeyHashTy
TransactionP2WSH = txscript.WitnessV0ScriptHashTy
)
// Types of bitcoin address
const (
AddressP2WPKH = txscript.WitnessV0PubKeyHashTy
AddressP2TR = txscript.WitnessV1TaprootTy
AddressTaproot = AddressP2TR // Alias of P2TR
AddressP2SH = txscript.ScriptHashTy
AddressP2PKH = txscript.PubKeyHashTy
AddressP2WSH = txscript.WitnessV0ScriptHashTy
)

View File

@@ -0,0 +1,23 @@
package btcutils
import (
"github.com/btcsuite/btcd/chaincfg"
)
var supportedNetworks = map[string]*chaincfg.Params{
"mainnet": &chaincfg.MainNetParams,
"testnet": &chaincfg.TestNet3Params,
}
// IsSupportedNetwork returns true if the given network is supported.
//
// TODO: create enum for network
func IsSupportedNetwork(network string) bool {
_, ok := supportedNetworks[network]
return ok
}
// GetNetParams returns the *chaincfg.Params for the given network.
func GetNetParams(network string) *chaincfg.Params {
return supportedNetworks[network]
}

54
pkg/btcutils/pk_script.go Normal file
View File

@@ -0,0 +1,54 @@
package btcutils
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/cockroachdb/errors"
)
// NewPkScript creates a pubkey script(or witness program) from the given address string
//
// see: https://en.bitcoin.it/wiki/Script
func NewPkScript(address string, defaultNet ...*chaincfg.Params) ([]byte, error) {
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
decoded, _, err := parseAddress(address, net)
if err != nil {
return nil, errors.Wrap(err, "can't parse address")
}
scriptPubkey, err := txscript.PayToAddrScript(decoded)
if err != nil {
return nil, errors.Wrap(err, "can't get script pubkey")
}
return scriptPubkey, nil
}
// GetAddressTypeFromPkScript returns the address type from the given pubkey script/script pubkey.
func GetAddressTypeFromPkScript(pkScript []byte, defaultNet ...*chaincfg.Params) (AddressType, error) {
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
scriptClass, _, _, err := txscript.ExtractPkScriptAddrs(pkScript, net)
if err != nil {
return txscript.NonStandardTy, errors.Wrap(err, "can't parse pkScript")
}
return scriptClass, nil
}
// ExtractAddressFromPkScript extracts address from the given pubkey script/script pubkey.
// multi-signature script not supported
func ExtractAddressFromPkScript(pkScript []byte, defaultNet ...*chaincfg.Params) (Address, error) {
net := utils.DefaultOptional(defaultNet, &chaincfg.MainNetParams)
addrType, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, net)
if err != nil {
return Address{}, errors.Wrap(err, "can't parse pkScript")
}
if len(addrs) == 0 {
return Address{}, errors.New("can't extract address from pkScript")
}
return Address{
decoded: addrs[0],
net: net,
encoded: addrs[0].EncodeAddress(),
encodedType: addrType,
scriptPubKey: pkScript,
}, nil
}

View File

@@ -0,0 +1,205 @@
package btcutils_test
import (
"encoding/hex"
"fmt"
"testing"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/pkg/btcutils"
"github.com/stretchr/testify/assert"
)
func TestNewPkScript(t *testing.T) {
anyError := errors.New("any error")
type Spec struct {
Address string
DefaultNet *chaincfg.Params
ExpectedError error
ExpectedPkScript string // hex encoded
}
specs := []Spec{
{
Address: "some_invalid_address",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: anyError,
ExpectedPkScript: "",
},
{
// P2WPKH
Address: "bc1qdx72th7e3z8zc5wdrdxweswfcne974pjneyjln",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "001469bca5dfd9888e2c51cd1b4cecc1c9c4f25f5432",
},
{
// P2WPKH
Address: "bc1q7cj6gz6t3d28qg7kxhrc7h5t3h0re34fqqalga",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "0014f625a40b4b8b547023d635c78f5e8b8dde3cc6a9",
},
{
// P2TR
Address: "bc1pfd0zw2jwlpn4xckpr3dxpt7x0gw6wetuftxvrc4dt2qgn9azjuus65fug6",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "51204b5e272a4ef8675362c11c5a60afc67a1da7657c4accc1e2ad5a808997a29739",
},
{
// P2TR
Address: "bc1pxpumml545tqum5afarzlmnnez2npd35nvf0j0vnrp88nemqsn54qle05sm",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "51203079bdfe95a2c1cdd3a9e8c5fdce7912a616c693625f27b26309cf3cec109d2a",
},
{
// P2SH
Address: "3Ccte7SJz71tcssLPZy3TdWz5DTPeNRbPw",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "a91477e1a3d54f545d83869ae3a6b28b071422801d7b87",
},
{
// P2PKH
Address: "1KrRZSShVkdc8J71CtY4wdw46Rx3BRLKyH",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "76a914cecb25b53809991c7beef2d27bc2be49e78c684388ac",
},
{
// P2WSH
Address: "bc1qeklep85ntjz4605drds6aww9u0qr46qzrv5xswd35uhjuj8ahfcqgf6hak",
DefaultNet: &chaincfg.MainNetParams,
ExpectedError: nil,
ExpectedPkScript: "0020cdbf909e935c855d3e8d1b61aeb9c5e3c03ae8021b286839b1a72f2e48fdba70",
},
}
for _, spec := range specs {
t.Run(fmt.Sprintf("address:%s", spec.Address), func(t *testing.T) {
// Validate Expected PkScript
if spec.ExpectedError == nil {
{
expectedPkScriptRaw, err := hex.DecodeString(spec.ExpectedPkScript)
if err != nil {
t.Fatalf("can't decode expected pkscript %s, Reason: %s", spec.ExpectedPkScript, err)
}
expectedPkScript, err := txscript.ParsePkScript(expectedPkScriptRaw)
if err != nil {
t.Fatalf("invalid expected pkscript %s, Reason: %s", spec.ExpectedPkScript, err)
}
expectedAddress, err := expectedPkScript.Address(spec.DefaultNet)
if err != nil {
t.Fatalf("can't get address from expected pkscript %s, Reason: %s", spec.ExpectedPkScript, err)
}
assert.Equal(t, spec.Address, expectedAddress.EncodeAddress())
}
{
address, err := btcutil.DecodeAddress(spec.Address, spec.DefaultNet)
if err != nil {
t.Fatalf("can't decode address %s(%s),Reason: %s", spec.Address, spec.DefaultNet.Name, err)
}
pkScript, err := txscript.PayToAddrScript(address)
if err != nil {
t.Fatalf("can't get pkscript from address %s(%s),Reason: %s", spec.Address, spec.DefaultNet.Name, err)
}
pkScriptStr := hex.EncodeToString(pkScript)
assert.Equal(t, spec.ExpectedPkScript, pkScriptStr)
}
}
pkScript, err := btcutils.NewPkScript(spec.Address, spec.DefaultNet)
if spec.ExpectedError == anyError {
assert.Error(t, err)
} else if spec.ExpectedError != nil {
assert.ErrorIs(t, err, spec.ExpectedError)
} else {
address, err := btcutils.SafeNewAddress(spec.Address, spec.DefaultNet)
if err != nil {
t.Fatalf("can't create address %s(%s),Reason: %s", spec.Address, spec.DefaultNet.Name, err)
}
// ScriptPubKey from address and from NewPkScript should be the same
assert.Equal(t, address.ScriptPubKey(), pkScript)
// Expected PkScript and New PkScript should be the same
pkScriptStr := hex.EncodeToString(pkScript)
assert.Equal(t, spec.ExpectedPkScript, pkScriptStr)
// Can convert PkScript back to same address
acualPkScript, err := txscript.ParsePkScript(address.ScriptPubKey())
if !assert.NoError(t, err) {
t.Fail()
}
assert.Equal(t, address.Decoded().String(), utils.Must(acualPkScript.Address(spec.DefaultNet)).String())
}
})
}
}
func TestGetAddressTypeFromPkScript(t *testing.T) {
type Spec struct {
PubkeyScript string
ExpectedError error
ExpectedAddressType btcutils.AddressType
}
specs := []Spec{
{
PubkeyScript: "0014602181cc89f7c9f54cb6d7607a3445e3e022895d",
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2WPKH,
},
{
PubkeyScript: "5120ef8d59038dd51093fbfff794f658a07a3697b94d9e6d24e45b28abd88f10e33d",
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2TR,
},
{
PubkeyScript: "a91416eef7e84fb9821db1341b6ccef1c4a4e5ec21e487",
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2SH,
},
{
PubkeyScript: "76a914cecb25b53809991c7beef2d27bc2be49e78c684388ac",
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2PKH,
},
{
PubkeyScript: "0020cdbf909e935c855d3e8d1b61aeb9c5e3c03ae8021b286839b1a72f2e48fdba70",
ExpectedError: nil,
ExpectedAddressType: btcutils.AddressP2WSH,
},
}
for _, spec := range specs {
t.Run(fmt.Sprintf("PkScript:%s", spec.PubkeyScript), func(t *testing.T) {
pkScript, err := hex.DecodeString(spec.PubkeyScript)
if err != nil {
t.Fail()
}
actualAddressType, actualError := btcutils.GetAddressTypeFromPkScript(pkScript)
if spec.ExpectedError != nil {
assert.ErrorIs(t, actualError, spec.ExpectedError)
} else {
assert.Equal(t, spec.ExpectedAddressType, actualAddressType)
}
})
}
}

View File

@@ -0,0 +1,92 @@
package psbtutils
import (
"bytes"
"encoding/base64"
"encoding/hex"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/btcutil/psbt"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
)
const (
// default psbt encoding is hex
DefaultEncoding = EncodingHex
)
type Encoding string
const (
EncodingBase64 Encoding = "base64"
EncodingHex Encoding = "hex"
)
// DecodeString decodes a psbt hex/base64 string into a psbt.Packet
//
// encoding is optional, default is EncodingHex
func DecodeString(psbtStr string, encoding ...Encoding) (*psbt.Packet, error) {
pC, err := Decode([]byte(psbtStr), encoding...)
return pC, errors.WithStack(err)
}
// Decode decodes a psbt hex/base64 byte into a psbt.Packet
//
// encoding is optional, default is EncodingHex
func Decode(psbtB []byte, encoding ...Encoding) (*psbt.Packet, error) {
enc, ok := utils.Optional(encoding)
if !ok {
enc = DefaultEncoding
}
var (
psbtBytes []byte
err error
)
switch enc {
case EncodingBase64, "b64":
psbtBytes = make([]byte, base64.StdEncoding.DecodedLen(len(psbtB)))
_, err = base64.StdEncoding.Decode(psbtBytes, psbtB)
case EncodingHex:
psbtBytes = make([]byte, hex.DecodedLen(len(psbtB)))
_, err = hex.Decode(psbtBytes, psbtB)
default:
return nil, errors.Wrap(errs.Unsupported, "invalid encoding")
}
if err != nil {
return nil, errors.Wrap(err, "can't decode psbt string")
}
pC, err := psbt.NewFromRawBytes(bytes.NewReader(psbtBytes), false)
if err != nil {
return nil, errors.Wrap(err, "can't create psbt from given psbt")
}
return pC, nil
}
// EncodeToString encodes a psbt.Packet into a psbt hex/base64 string
//
// encoding is optional, default is EncodingHex
func EncodeToString(pC *psbt.Packet, encoding ...Encoding) (string, error) {
enc, ok := utils.Optional(encoding)
if !ok {
enc = DefaultEncoding
}
var buf bytes.Buffer
if err := pC.Serialize(&buf); err != nil {
return "", errors.Wrap(err, "can't serialize psbt")
}
switch enc {
case EncodingBase64, "b64":
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
case EncodingHex:
return hex.EncodeToString(buf.Bytes()), nil
default:
return "", errors.Wrap(errs.Unsupported, "invalid encoding")
}
}

View File

@@ -0,0 +1,110 @@
package psbtutils
import (
"math"
"github.com/btcsuite/btcd/btcutil/psbt"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/pkg/btcutils"
)
// TxFee returns satoshis fee of a transaction given the fee rate (sat/vB)
// and the number of inputs and outputs.
func TxFee(feeRate int64, p *psbt.Packet) (int64, error) {
size, err := PSBTSize(p)
if err != nil {
return 0, errors.Wrap(err, "psbt size")
}
return int64(math.Ceil(size * float64(feeRate))), nil
}
func PredictTxFee(feeRate int64, inputs, outputs int) int64 {
/**
TODO: handle edge cases like:
1. when we predict that we need to use unnecessary UTXOs
2. when we predict that we need to use more value than user have, but user do have enough for the actual transaction
Idea for solving this:
- When trying to find the best UTXOs to use, we:
- Will not reject when user's balance is not enough, instead we will return all UTXOs even if it's not enough.
- Will be okay returning excessive UTXOs (say we predict we need 10K satoshis, but actually we only need 5K satoshis, then we will return UTXOs enough for 10K satoshis)
- And then we:
- Construct the actual PSBT, then select UTXOs to use accordingly,
- If the user's balance is not enough, then we will return an error,
- Or if when we predict we expect to use more UTXOs than the actual transaction, then we will just use what's needed.
*/
size := defaultOverhead + 148*float64(inputs) + 43*float64(outputs)
return int64(math.Ceil(size * float64(feeRate)))
}
type txSize struct {
Overhead float64
Inputs float64
Outputs float64
}
const defaultOverhead = 10.5
// Transaction Virtual Sizes Bytes
//
// Reference: https://bitcoinops.org/en/tools/calc-size/
var txSizes = map[btcutils.TransactionType]txSize{
btcutils.TransactionP2WPKH: {
Inputs: 68,
Outputs: 31,
},
btcutils.TransactionP2TR: {
Inputs: 57.5,
Outputs: 43,
},
btcutils.TransactionP2SH: {
Inputs: 91,
Outputs: 32,
},
btcutils.TransactionP2PKH: {
Inputs: 148,
Outputs: 34,
},
btcutils.TransactionP2WSH: {
Inputs: 104.5,
Outputs: 43,
},
}
func PSBTSize(psbt *psbt.Packet) (float64, error) {
if err := psbt.SanityCheck(); err != nil {
return 0, errors.Wrap(errors.Join(err, errs.InvalidArgument), "psbt sanity check")
}
inputs := map[btcutils.TransactionType]int{}
outputs := map[btcutils.TransactionType]int{}
for _, input := range psbt.Inputs {
addrType, err := btcutils.GetAddressTypeFromPkScript(input.WitnessUtxo.PkScript)
if err != nil {
return 0, errors.Wrap(err, "get address type from pk script")
}
inputs[addrType]++
}
for _, output := range psbt.UnsignedTx.TxOut {
addrType, err := btcutils.GetAddressTypeFromPkScript(output.PkScript)
if err != nil {
return 0, errors.Wrap(err, "get address type from pk script")
}
outputs[addrType]++
}
totalSize := defaultOverhead
for txType, txSizeData := range txSizes {
if inputCount, ok := inputs[txType]; ok {
totalSize += txSizeData.Inputs * float64(inputCount)
}
if outputCount, ok := outputs[txType]; ok {
totalSize += txSizeData.Outputs * float64(outputCount)
}
}
return totalSize, nil
}

View File

@@ -0,0 +1,131 @@
package psbtutils_test
import (
"fmt"
"math"
"testing"
"github.com/gaze-network/indexer-network/pkg/btcutils/psbtutils"
"github.com/stretchr/testify/assert"
)
func TestPSBTSize(t *testing.T) {
type Spec struct {
Name string
PSBTString string
ExpectedError error
ExpectedSize float64
}
specs := []Spec{
{
Name: "3-inputs-3-outputs-taproot",
PSBTString: "70736274ff0100fd06010100000003866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910000000000ffffffff866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910100000000ffffffff866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910200000000ffffffff03b0040000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f22020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f4d370f00000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f000000000001012b58020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f0001012b58020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f0001012bcb3c0f00000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f00000000",
ExpectedError: nil,
ExpectedSize: 312,
},
{
Name: "mixed-segwit-taproot",
PSBTString: "70736274ff0100fd230202000000061f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90300000000ffffffff1f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90400000000ffffffff21c8ec368f2aff1a7baf4964e4070f52e7247ae39edfbda3976f8df4da1b72a00000000000ffffffff969e65b705e3d5071f1743a63381b3aa1ec31e1dbbbd63ab594a19ca399a58af0000000000ffffffffcca5cfd28bd6c54a851d97d029560b3047f7c6482fda7b2f2603d56ade8c95890000000000ffffffff1f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90500000000ffffffff0908070000000000001600144850d32c3ff585403790507793125d174a5c28e022020000000000001600144850d32c3ff585403790507793125d174a5c28e022020000000000001600144850d32c3ff585403790507793125d174a5c28e0b03600000000000016001459805fc1fdb9f05e190db569987c95c4f9deaa532a680000000000002251203a9ddeb6a2a327fed0f50d18778b28168e3ddb7fdfd4b05f4e438c9174d76a8d58020000000000001600144850d32c3ff585403790507793125d174a5c28e058020000000000001600144850d32c3ff585403790507793125d174a5c28e058020000000000001600144850d32c3ff585403790507793125d174a5c28e0b21f1e00000000001600144850d32c3ff585403790507793125d174a5c28e0000000000001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f220200000000000016001459805fc1fdb9f05e190db569987c95c4f9deaa53010304830000000001012b22020000000000002251203a9ddeb6a2a327fed0f50d18778b28168e3ddb7fdfd4b05f4e438c9174d76a8d010304830000000001011f06432000000000001600144850d32c3ff585403790507793125d174a5c28e000000000000000000000",
ExpectedError: nil,
ExpectedSize: 699,
},
{
Name: "segwit-transfer-to-legacy",
PSBTString: "70736274ff010074020000000124ba4becfc732f3b4729784a3dd0cc2494ae890d826377fd98aeb0607feb1ace0100000000ffffffff0210270000000000001976a91422bae94117be666b593916527d55bdaf030d756e88ac25f62e000000000016001476d1e072c9b8a18fa1e4be697c175e0c642026ac000000000001011fc51d2f000000000016001476d1e072c9b8a18fa1e4be697c175e0c642026ac01086b024730440220759df9d109298a1ef69b9faa1786f4118f0d4d63a68cd2061e217b6090573f62022053ffa117fc21e5bf20e7d16bb786de52dc0214c9a21af87b4e92a639ef66e997012103e0cb213a46a68b1f463a4858635ee44694ce4b512788833d629840341b1219c9000000",
ExpectedError: nil,
ExpectedSize: 143.5,
},
}
for _, spec := range specs {
t.Run(spec.Name, func(t *testing.T) {
p, err := psbtutils.DecodeString(spec.PSBTString)
assert.NoError(t, err)
size, err := psbtutils.PSBTSize(p)
if spec.ExpectedError != nil {
assert.ErrorIs(t, err, spec.ExpectedError)
} else {
assert.Equal(t, spec.ExpectedSize, size)
}
})
}
}
func TestPredictTxFee(t *testing.T) {
type Spec struct {
FeeRate int64
InputsCount int
OutputsCount int
ExpectedFee int64
}
specs := []Spec{
{
FeeRate: 100,
InputsCount: 1,
OutputsCount: 1,
ExpectedFee: int64(math.Ceil((10.5 + 148 + 43) * 100)),
},
{
FeeRate: 1,
InputsCount: 99,
OutputsCount: 99,
ExpectedFee: int64(math.Ceil((10.5 + (99 * 148) + (99 * 43)) * 1)),
},
}
for _, spec := range specs {
t.Run(fmt.Sprintf("feeRate=%d:inputs=%d:outputs=%d", spec.FeeRate, spec.InputsCount, spec.OutputsCount), func(t *testing.T) {
fee := psbtutils.PredictTxFee(spec.FeeRate, spec.InputsCount, spec.OutputsCount)
assert.Equal(t, spec.ExpectedFee, fee)
})
}
}
func TestTxFee(t *testing.T) {
type Spec struct {
Name string
FeeRate int64
PSBTString string
ExpectedError error
ExpectedFee int64
}
specs := []Spec{
{
Name: "3-inputs-3-outputs-taproot",
FeeRate: 10,
PSBTString: "70736274ff0100fd06010100000003866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910000000000ffffffff866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910100000000ffffffff866c72cfeef533940eaee49b68778e6223914ea671411ec387bdb61f620889910200000000ffffffff03b0040000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f22020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f4d370f00000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f000000000001012b58020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f0001012b58020000000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f0001012bcb3c0f00000000002251205b954b2f91ded08c553551037bc71265a69a7586855ba4fdcf785a2494f0c37f00000000",
ExpectedError: nil,
ExpectedFee: 312 * 10,
},
{
Name: "mixed-segwit-taproot",
FeeRate: 20,
PSBTString: "70736274ff0100fd230202000000061f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90300000000ffffffff1f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90400000000ffffffff21c8ec368f2aff1a7baf4964e4070f52e7247ae39edfbda3976f8df4da1b72a00000000000ffffffff969e65b705e3d5071f1743a63381b3aa1ec31e1dbbbd63ab594a19ca399a58af0000000000ffffffffcca5cfd28bd6c54a851d97d029560b3047f7c6482fda7b2f2603d56ade8c95890000000000ffffffff1f34960fef4e73c3c4c023f303c16e06f0eebb268bc0d3bac99fa78c031a45b90500000000ffffffff0908070000000000001600144850d32c3ff585403790507793125d174a5c28e022020000000000001600144850d32c3ff585403790507793125d174a5c28e022020000000000001600144850d32c3ff585403790507793125d174a5c28e0b03600000000000016001459805fc1fdb9f05e190db569987c95c4f9deaa532a680000000000002251203a9ddeb6a2a327fed0f50d18778b28168e3ddb7fdfd4b05f4e438c9174d76a8d58020000000000001600144850d32c3ff585403790507793125d174a5c28e058020000000000001600144850d32c3ff585403790507793125d174a5c28e058020000000000001600144850d32c3ff585403790507793125d174a5c28e0b21f1e00000000001600144850d32c3ff585403790507793125d174a5c28e0000000000001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f58020000000000001600144850d32c3ff585403790507793125d174a5c28e00001011f220200000000000016001459805fc1fdb9f05e190db569987c95c4f9deaa53010304830000000001012b22020000000000002251203a9ddeb6a2a327fed0f50d18778b28168e3ddb7fdfd4b05f4e438c9174d76a8d010304830000000001011f06432000000000001600144850d32c3ff585403790507793125d174a5c28e000000000000000000000",
ExpectedError: nil,
ExpectedFee: 699 * 20,
},
{
Name: "segwit-transfer-to-legacy",
FeeRate: 99,
PSBTString: "70736274ff010074020000000124ba4becfc732f3b4729784a3dd0cc2494ae890d826377fd98aeb0607feb1ace0100000000ffffffff0210270000000000001976a91422bae94117be666b593916527d55bdaf030d756e88ac25f62e000000000016001476d1e072c9b8a18fa1e4be697c175e0c642026ac000000000001011fc51d2f000000000016001476d1e072c9b8a18fa1e4be697c175e0c642026ac01086b024730440220759df9d109298a1ef69b9faa1786f4118f0d4d63a68cd2061e217b6090573f62022053ffa117fc21e5bf20e7d16bb786de52dc0214c9a21af87b4e92a639ef66e997012103e0cb213a46a68b1f463a4858635ee44694ce4b512788833d629840341b1219c9000000",
ExpectedError: nil,
ExpectedFee: int64(math.Ceil((143.5) * 99)),
},
}
for _, spec := range specs {
t.Run(spec.Name, func(t *testing.T) {
p, err := psbtutils.DecodeString(spec.PSBTString)
assert.NoError(t, err)
fee, err := psbtutils.TxFee(spec.FeeRate, p)
if spec.ExpectedError != nil {
assert.ErrorIs(t, err, spec.ExpectedError)
} else {
assert.Equal(t, spec.ExpectedFee, fee)
}
})
}
}

View File

@@ -0,0 +1,35 @@
package psbtutils
import (
"github.com/btcsuite/btcd/btcutil/psbt"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/samber/lo"
)
func IsReadyPSBT(pC *psbt.Packet, feeRate int64) (bool, error) {
// if input = output + fee then it's ready
// Calculate tx fee
fee, err := TxFee(feeRate, pC)
if err != nil {
return false, errors.Wrap(err, "calculate fee")
}
// sum total input and output
totalInputValue := lo.SumBy(pC.Inputs, func(input psbt.PInput) int64 { return input.WitnessUtxo.Value })
totalOutputValue := lo.SumBy(pC.UnsignedTx.TxOut, func(txout *wire.TxOut) int64 { return txout.Value }) + fee
// it's perfect match
if totalInputValue == totalOutputValue {
return true, nil
}
// if input is more than output + fee but not more than 1000 satoshi,
// then it's ready
if totalInputValue > totalOutputValue && totalInputValue-totalOutputValue < 1000 {
return true, nil
}
return false, nil
}

19
pkg/btcutils/signature.go Normal file
View File

@@ -0,0 +1,19 @@
package btcutils
import (
verifier "github.com/bitonicnl/verify-signed-message/pkg"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
)
func VerifySignature(address string, message string, sigBase64 string, network common.Network) error {
_, err := verifier.VerifyWithChain(verifier.SignedMessage{
Address: address,
Message: message,
Signature: sigBase64,
}, network.ChainParams())
if err != nil {
return errors.WithStack(err)
}
return nil
}

View File

@@ -0,0 +1,59 @@
package btcutils
import (
"testing"
"github.com/gaze-network/indexer-network/common"
"github.com/stretchr/testify/assert"
)
func TestVerifySignature(t *testing.T) {
{
message := "Test123"
address := "18J72YSM9pKLvyXX1XAjFXA98zeEvxBYmw"
signature := "Gzhfsw0ItSrrTCChykFhPujeTyAcvVxiXwywxpHmkwFiKuUR2ETbaoFcocmcSshrtdIjfm8oXlJoTOLosZp3Yc8="
network := common.NetworkMainnet
err := VerifySignature(address, message, signature, network)
assert.NoError(t, err)
}
{
address := "tb1qr97cuq4kvq7plfetmxnl6kls46xaka78n2288z"
message := "The outage comes at a time when bitcoin has been fast approaching new highs not seen since June 26, 2019."
signature := "H/bSByRH7BW1YydfZlEx9x/nt4EAx/4A691CFlK1URbPEU5tJnTIu4emuzkgZFwC0ptvKuCnyBThnyLDCqPqT10="
network := common.NetworkTestnet
err := VerifySignature(address, message, signature, network)
assert.NoError(t, err)
}
{
// Missmatch address
address := "tb1qp7y2ywgrv8a4t9h47yphtgj8w759rk6vgd9ran"
message := "The outage comes at a time when bitcoin has been fast approaching new highs not seen since June 26, 2019."
signature := "H/bSByRH7BW1YydfZlEx9x/nt4EAx/4A691CFlK1URbPEU5tJnTIu4emuzkgZFwC0ptvKuCnyBThnyLDCqPqT10="
network := common.NetworkTestnet
err := VerifySignature(address, message, signature, network)
assert.Error(t, err)
}
{
// Missmatch signature
address := "tb1qr97cuq4kvq7plfetmxnl6kls46xaka78n2288z"
message := "The outage comes at a time when bitcoin has been fast approaching new highs not seen since June 26, 2019."
signature := "Gzhfsw0ItSrrTCChykFhPujeTyAcvVxiXwywxpHmkwFiKuUR2ETbaoFcocmcSshrtdIjfm8oXlJoTOLosZp3Yc8="
network := common.NetworkTestnet
err := VerifySignature(address, message, signature, network)
assert.Error(t, err)
}
{
// Missmatch message
address := "tb1qr97cuq4kvq7plfetmxnl6kls46xaka78n2288z"
message := "Hello World"
signature := "H/bSByRH7BW1YydfZlEx9x/nt4EAx/4A691CFlK1URbPEU5tJnTIu4emuzkgZFwC0ptvKuCnyBThnyLDCqPqT10="
network := common.NetworkTestnet
err := VerifySignature(address, message, signature, network)
assert.Error(t, err)
}
}

View File

@@ -0,0 +1,10 @@
package btcutils
const (
// TxVersion is the current latest supported transaction version.
TxVersion = 2
// MaxTxInSequenceNum is the maximum sequence number the sequence field
// of a transaction input can be.
MaxTxInSequenceNum uint32 = 0xffffffff
)

View File

@@ -1,267 +0,0 @@
// lru a lru-cache package modified version of github.com/hashicorp/golang-lru
// TODO: create PR to hashicorp/golang-lru
package lru
import (
"sync"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
const (
// DefaultEvictedBufferSize defines the default buffer size to store evicted key/val
DefaultEvictedBufferSize = 16
)
// Cache is a thread-safe fixed size LRU cache.
type Cache[K comparable, V any] struct {
lru *simplelru.LRU[K, V]
evictedKeys []K
evictedVals []V
onEvictedCB func(k K, v V)
lock sync.RWMutex
}
// New creates an LRU of the given size.
func New[K comparable, V any](size int) (*Cache[K, V], error) {
return NewWithEvict[K, V](size, nil)
}
// NewWithEvict constructs a fixed size cache with the given eviction
// callback.
func NewWithEvict[K comparable, V any](size int, onEvicted func(key K, value V)) (c *Cache[K, V], err error) {
// create a cache with default settings
c = &Cache[K, V]{
onEvictedCB: onEvicted,
}
if onEvicted != nil {
c.initEvictBuffers()
onEvicted = c.onEvicted
}
c.lru, err = simplelru.NewLRU(size, onEvicted)
return
}
func (c *Cache[K, V]) initEvictBuffers() {
c.evictedKeys = make([]K, 0, DefaultEvictedBufferSize)
c.evictedVals = make([]V, 0, DefaultEvictedBufferSize)
}
// onEvicted save evicted key/val and sent in externally registered callback
// outside of critical section
func (c *Cache[K, V]) onEvicted(k K, v V) {
c.evictedKeys = append(c.evictedKeys, k)
c.evictedVals = append(c.evictedVals, v)
}
// Purge is used to completely clear the cache.
func (c *Cache[K, V]) Purge() {
var ks []K
var vs []V
c.lock.Lock()
c.lru.Purge()
if c.onEvictedCB != nil && len(c.evictedKeys) > 0 {
ks, vs = c.evictedKeys, c.evictedVals
c.initEvictBuffers()
}
c.lock.Unlock()
// invoke callback outside of critical section
if c.onEvictedCB != nil {
for i := 0; i < len(ks); i++ {
c.onEvictedCB(ks[i], vs[i])
}
}
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *Cache[K, V]) Add(key K, value V) (evicted bool) {
var k K
var v V
c.lock.Lock()
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return
}
// Get looks up a key's value from the cache.
func (c *Cache[K, V]) Get(key K) (value V, ok bool) {
c.lock.Lock()
value, ok = c.lru.Get(key)
c.lock.Unlock()
return value, ok
}
// MGet looks up a multiple key's values from the cache.
// Returns a slice of value in the same order as the keys. value will be zero-value if key not found.
func (c *Cache[K, V]) MGet(keys []K) (values []V) {
c.lock.Lock()
defer c.lock.Unlock()
values = make([]V, 0, len(keys))
for _, key := range keys {
value, _ := c.lru.Get(key)
values = append(values, value)
}
return values
}
// Contains checks if a key is in the cache, without updating the
// recent-ness or deleting it for being stale.
func (c *Cache[K, V]) Contains(key K) bool {
c.lock.RLock()
containKey := c.lru.Contains(key)
c.lock.RUnlock()
return containKey
}
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache[K, V]) Peek(key K) (value V, ok bool) {
c.lock.RLock()
value, ok = c.lru.Peek(key)
c.lock.RUnlock()
return value, ok
}
// MPeek returns multiple key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache[K, V]) MPeek(keys []K) (values []V) {
c.lock.RLock()
defer c.lock.RUnlock()
values = make([]V, 0, len(keys))
for _, key := range keys {
value, _ := c.lru.Peek(key)
values = append(values, value)
}
return values
}
// ContainsOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache[K, V]) ContainsOrAdd(key K, value V) (ok, evicted bool) {
var k K
var v V
c.lock.Lock()
if c.lru.Contains(key) {
c.lock.Unlock()
return true, false
}
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return false, evicted
}
// PeekOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache[K, V]) PeekOrAdd(key K, value V) (previous V, ok, evicted bool) {
var k K
var v V
c.lock.Lock()
previous, ok = c.lru.Peek(key)
if ok {
c.lock.Unlock()
return previous, true, false
}
evicted = c.lru.Add(key, value)
if c.onEvictedCB != nil && evicted {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted {
c.onEvictedCB(k, v)
}
return
}
// Remove removes the provided key from the cache.
func (c *Cache[K, V]) Remove(key K) (present bool) {
var k K
var v V
c.lock.Lock()
present = c.lru.Remove(key)
if c.onEvictedCB != nil && present {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && present {
c.onEvictedCB(k, v)
}
return
}
// Resize changes the cache size.
func (c *Cache[K, V]) Resize(size int) (evicted int) {
var ks []K
var vs []V
c.lock.Lock()
evicted = c.lru.Resize(size)
if c.onEvictedCB != nil && evicted > 0 {
ks, vs = c.evictedKeys, c.evictedVals
c.initEvictBuffers()
}
c.lock.Unlock()
if c.onEvictedCB != nil && evicted > 0 {
for i := 0; i < len(ks); i++ {
c.onEvictedCB(ks[i], vs[i])
}
}
return evicted
}
// RemoveOldest removes the oldest item from the cache.
func (c *Cache[K, V]) RemoveOldest() (key K, value V, ok bool) {
var k K
var v V
c.lock.Lock()
key, value, ok = c.lru.RemoveOldest()
if c.onEvictedCB != nil && ok {
k, v = c.evictedKeys[0], c.evictedVals[0]
c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0]
}
c.lock.Unlock()
if c.onEvictedCB != nil && ok {
c.onEvictedCB(k, v)
}
return
}
// GetOldest returns the oldest entry
func (c *Cache[K, V]) GetOldest() (key K, value V, ok bool) {
c.lock.RLock()
key, value, ok = c.lru.GetOldest()
c.lock.RUnlock()
return
}
// Keys returns a slice of the keys in the cache, from oldest to newest.
func (c *Cache[K, V]) Keys() []K {
c.lock.RLock()
keys := c.lru.Keys()
c.lock.RUnlock()
return keys
}
// Len returns the number of items in the cache.
func (c *Cache[K, V]) Len() int {
c.lock.RLock()
length := c.lru.Len()
c.lock.RUnlock()
return length
}

View File

@@ -1,368 +0,0 @@
package lru
import (
"crypto/rand"
"math"
"math/big"
"testing"
)
func getRand(tb testing.TB) int64 {
out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
tb.Fatal(err)
}
return out.Int64()
}
func BenchmarkLRU_Rand(b *testing.B) {
l, err := New[int64, int64](8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
trace[i] = getRand(b) % 32768
}
b.ResetTimer()
var hit, miss int
for i := 0; i < 2*b.N; i++ {
if i%2 == 0 {
l.Add(trace[i], trace[i])
} else {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func BenchmarkLRU_Freq(b *testing.B) {
l, err := New[int64, int64](8192)
if err != nil {
b.Fatalf("err: %v", err)
}
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
if i%2 == 0 {
trace[i] = getRand(b) % 16384
} else {
trace[i] = getRand(b) % 32768
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
l.Add(trace[i], trace[i])
}
var hit, miss int
for i := 0; i < b.N; i++ {
if _, ok := l.Get(trace[i]); ok {
hit++
} else {
miss++
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func TestLRU(t *testing.T) {
evictCounter := 0
onEvicted := func(k int, v int) {
if k != v {
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
}
evictCounter++
}
l, err := NewWithEvict(128, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 256; i++ {
l.Add(i, i)
}
if l.Len() != 128 {
t.Fatalf("bad len: %v", l.Len())
}
if evictCounter != 128 {
t.Fatalf("bad evict count: %v", evictCounter)
}
for i, k := range l.Keys() {
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
t.Fatalf("bad key: %v", k)
}
}
for i := 0; i < 128; i++ {
if _, ok := l.Get(i); ok {
t.Fatalf("should be evicted")
}
}
for i := 128; i < 256; i++ {
if _, ok := l.Get(i); !ok {
t.Fatalf("should not be evicted")
}
}
for i := 128; i < 192; i++ {
l.Remove(i)
if _, ok := l.Get(i); ok {
t.Fatalf("should be deleted")
}
}
l.Get(192) // expect 192 to be last key in l.Keys()
for i, k := range l.Keys() {
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
t.Fatalf("out of order key: %v", k)
}
}
{
// test mget
keys := l.Keys()
values := l.MGet(keys)
for i, v := range values {
if keys[i] != v {
t.Fatalf("[%d]bad value: %v:%v", i, keys[i], v)
}
}
}
{
// test mget with missing keys
keys := append([]int{-1}, l.Keys()...)
values := l.MGet(keys)
if len(values) != len(keys) {
t.Fatalf("bad len: %v, expected: %v", len(values), len(keys))
}
if values[0] != 0 {
t.Fatalf("bad value: %v, expected: %v", values[0], 0)
}
for i := 1; i < len(values); i++ {
if keys[i] != values[i] {
t.Fatalf("[%d]bad value: %v:%v", i, keys[i], values[i])
}
}
}
l.Purge()
if l.Len() != 0 {
t.Fatalf("bad len: %v", l.Len())
}
if _, ok := l.Get(200); ok {
t.Fatalf("should contain nothing")
}
}
// test that Add returns true/false if an eviction occurred
func TestLRUAdd(t *testing.T) {
evictCounter := 0
onEvicted := func(k int, v int) {
evictCounter++
}
l, err := NewWithEvict(1, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
if l.Add(1, 1) == true || evictCounter != 0 {
t.Errorf("should not have an eviction")
}
if l.Add(2, 2) == false || evictCounter != 1 {
t.Errorf("should have an eviction")
}
}
// test that Contains doesn't update recent-ness
func TestLRUContains(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if !l.Contains(1) {
t.Errorf("1 should be contained")
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Contains should not have updated recent-ness of 1")
}
}
// test that ContainsOrAdd doesn't update recent-ness
func TestLRUContainsOrAdd(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
contains, evict := l.ContainsOrAdd(1, 1)
if !contains {
t.Errorf("1 should be contained")
}
if evict {
t.Errorf("nothing should be evicted here")
}
l.Add(3, 3)
contains, evict = l.ContainsOrAdd(1, 1)
if contains {
t.Errorf("1 should not have been contained")
}
if !evict {
t.Errorf("an eviction should have occurred")
}
if !l.Contains(1) {
t.Errorf("now 1 should be contained")
}
}
// test that PeekOrAdd doesn't update recent-ness
func TestLRUPeekOrAdd(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
previous, contains, evict := l.PeekOrAdd(1, 1)
if !contains {
t.Errorf("1 should be contained")
}
if evict {
t.Errorf("nothing should be evicted here")
}
if previous != 1 {
t.Errorf("previous is not equal to 1")
}
l.Add(3, 3)
contains, evict = l.ContainsOrAdd(1, 1)
if contains {
t.Errorf("1 should not have been contained")
}
if !evict {
t.Errorf("an eviction should have occurred")
}
if !l.Contains(1) {
t.Errorf("now 1 should be contained")
}
}
// test that Peek doesn't update recent-ness
func TestLRUPeek(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
l.Add(1, 1)
l.Add(2, 2)
if v, ok := l.Peek(1); !ok || v != 1 {
t.Errorf("1 should be set to 1: %v, %v", v, ok)
}
vals := l.MPeek([]int{-1, 1, 2})
if len(vals) != 3 {
t.Errorf("bad len: %v", len(vals))
}
if vals[0] != 0 {
t.Errorf("bad value: %v, expected: %v", vals[0], 0)
}
if vals[1] != 1 || vals[2] != 2 {
t.Errorf("bad vals: %v", vals)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("should not have updated recent-ness of 1")
}
}
// test that Resize can upsize and downsize
func TestLRUResize(t *testing.T) {
onEvictCounter := 0
onEvicted := func(k int, v int) {
onEvictCounter++
}
l, err := NewWithEvict(2, onEvicted)
if err != nil {
t.Fatalf("err: %v", err)
}
// Downsize
l.Add(1, 1)
l.Add(2, 2)
evicted := l.Resize(1)
if evicted != 1 {
t.Errorf("1 element should have been evicted: %v", evicted)
}
if onEvictCounter != 1 {
t.Errorf("onEvicted should have been called 1 time: %v", onEvictCounter)
}
l.Add(3, 3)
if l.Contains(1) {
t.Errorf("Element 1 should have been evicted")
}
// Upsize
evicted = l.Resize(2)
if evicted != 0 {
t.Errorf("0 elements should have been evicted: %v", evicted)
}
l.Add(4, 4)
if !l.Contains(3) || !l.Contains(4) {
t.Errorf("Cache should have contained 2 elements")
}
}
func TestKeysAndMPeek(t *testing.T) {
l, err := New[int, int](2)
if err != nil {
t.Fatalf("err: %v", err)
}
if vals := l.MPeek(l.Keys()); len(vals) != 0 {
t.Errorf("bad len: %v", len(vals))
}
l.Add(1, 1)
l.Add(2, 2)
keys := l.Keys()
if len(keys) != 2 {
t.Errorf("bad len: %v", len(keys))
}
if keys[0] != 1 || keys[1] != 2 {
t.Errorf("bad keys: %v", keys)
}
vals := l.MPeek([]int{-1, 1, 2})
if len(vals) != 3 {
t.Errorf("bad len: %v", len(vals))
}
if vals[0] != 0 {
t.Errorf("bad value: %v, expected: %v", vals[0], 0)
}
if vals[1] != 1 || vals[2] != 2 {
t.Errorf("bad vals: %v", vals)
}
}

View File

@@ -0,0 +1,7 @@
# Proxies IP Range Resources
- Cloudflare - https://www.cloudflare.com/ips/
- GCP Load Balancer - https://cloud.google.com/load-balancing/docs/health-check-concepts#ip-ranges
- GCP Compute Engine, Customer-usable external IP address ranges - https://www.gstatic.com/ipranges/cloud.json
- Other GCP Services - https://cloud.google.com/compute/docs/faq#networking
- Other Resources - https://github.com/lord-alfred/ipranges

View File

@@ -0,0 +1,21 @@
package requestcontext
// requestcontextError implements error interface
var _ error = requestcontextError{}
type requestcontextError struct {
err error
status int
message string
}
func (r requestcontextError) Error() string {
if r.err != nil {
return r.err.Error()
}
return r.message
}
func (r requestcontextError) Unwrap() error {
return r.err
}

View File

@@ -0,0 +1,44 @@
package requestcontext
import (
"context"
"log/slog"
"net/http"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gofiber/fiber/v2"
)
type Response struct {
Result any `json:"result"`
Error string `json:"error,omitempty"`
}
type Option func(ctx context.Context, c *fiber.Ctx) (context.Context, error)
func New(opts ...Option) fiber.Handler {
return func(c *fiber.Ctx) error {
var err error
ctx := c.UserContext()
for i, opt := range opts {
ctx, err = opt(ctx, c)
if err != nil {
rErr := requestcontextError{}
if errors.As(err, &rErr) {
return c.Status(rErr.status).JSON(Response{Error: rErr.message})
}
logger.ErrorContext(ctx, "failed to extract request context",
err,
slog.String("event", "requestcontext/error"),
slog.String("module", "requestcontext"),
slog.Int("optionIndex", i),
)
return c.Status(http.StatusInternalServerError).JSON(Response{Error: "internal server error"})
}
}
c.SetUserContext(ctx)
return c.Next()
}
}

View File

@@ -0,0 +1,150 @@
package requestcontext
import (
"context"
"log/slog"
"net"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gofiber/fiber/v2"
)
type clientIPKey struct{}
type WithClientIPConfig struct {
// [Optional] TrustedProxiesIP is a list of all proxies IP ranges that's between the server and the client.
//
// If it's provided, it will walk backwards from the last IP in `X-Forwarded-For` header
// and use first IP that's not trusted proxy(not in the given IP ranges.)
//
// **If you want to use this option, you should provide all of probable proxies IP ranges.**
//
// This is lowest priority.
TrustedProxiesIP []string `env:"TRUSTED_PROXIES_IP" mapstructure:"trusted_proxies_ip"`
// [Optional] TrustedHeader is a header name for getting client IP. (e.g. X-Real-IP, CF-Connecting-IP, etc.)
//
// This is highest priority, it will ignore rest of the options if it's provided.
TrustedHeader string `env:"TRUSTED_HEADER" mapstructure:"trusted_proxies_header"`
// EnableRejectMalformedRequest return 403 Forbidden if the request is from proxies, but can't extract client IP
EnableRejectMalformedRequest bool `env:"ENABLE_REJECT_MALFORMED_REQUEST" envDefault:"false" mapstructure:"enable_reject_malformed_request"`
}
// WithClientIP setup client IP context with XFF Spoofing prevention support.
//
// If request is from proxies, it will use first IP from `X-Forwarded-For` header by default.
func WithClientIP(config WithClientIPConfig) Option {
var trustedProxies trustedProxy
if len(config.TrustedProxiesIP) > 0 {
proxy, err := newTrustedProxy(config.TrustedProxiesIP)
if err != nil {
logger.Panic("Failed to parse trusted proxies", err)
}
trustedProxies = proxy
}
return func(ctx context.Context, c *fiber.Ctx) (context.Context, error) {
// Extract client IP from given header
if config.TrustedHeader != "" {
headerIP := c.Get(config.TrustedHeader)
// validate ip from header
if ip := net.ParseIP(headerIP); ip != nil {
return context.WithValue(ctx, clientIPKey{}, headerIP), nil
}
}
// Extract client IP from XFF header
rawIPs := c.IPs()
ips := parseIPs(rawIPs)
// If the request is directly from client, we can use direct remote IP address
if len(ips) == 0 {
return context.WithValue(ctx, clientIPKey{}, c.IP()), nil
}
// Walk back and find first IP that's not trusted proxy
if len(trustedProxies) > 0 {
for i := len(ips) - 1; i >= 0; i-- {
if !trustedProxies.IsTrusted(ips[i]) {
return context.WithValue(ctx, clientIPKey{}, ips[i].String()), nil
}
}
// If all IPs are trusted proxies, return first IP in XFF header
return context.WithValue(ctx, clientIPKey{}, rawIPs[0]), nil
}
// Finally, if we can't extract client IP, return forbidden
if config.EnableRejectMalformedRequest {
logger.WarnContext(ctx, "IP Spoofing detected, returning 403 Forbidden",
slog.String("event", "requestcontext/ip_spoofing_detected"),
slog.String("module", "requestcontext/with_clientip"),
slog.String("ip", c.IP()),
slog.Any("ips", rawIPs),
)
return nil, requestcontextError{
status: fiber.StatusForbidden,
message: "not allowed to access",
}
}
// Fallback to first IP in XFF header
return context.WithValue(ctx, clientIPKey{}, rawIPs[0]), nil
}
}
// GetClientIP get clientIP from context. If not found, return empty string
//
// Warning: Request context should be setup before using this function
func GetClientIP(ctx context.Context) string {
if ip, ok := ctx.Value(clientIPKey{}).(string); ok {
return ip
}
return ""
}
type trustedProxy []*net.IPNet
// newTrustedProxy create a new trusted proxies instance for preventing IP spoofing (XFF Attacks)
func newTrustedProxy(ranges []string) (trustedProxy, error) {
nets, err := parseCIDRs(ranges)
if err != nil {
return nil, errors.WithStack(err)
}
return trustedProxy(nets), nil
}
func (t trustedProxy) IsTrusted(ip net.IP) bool {
if ip == nil {
return false
}
for _, r := range t {
if r.Contains(ip) {
return true
}
}
return false
}
func parseCIDRs(ranges []string) ([]*net.IPNet, error) {
nets := make([]*net.IPNet, 0, len(ranges))
for _, r := range ranges {
_, ipnet, err := net.ParseCIDR(r)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse CIDR for %q", r)
}
nets = append(nets, ipnet)
}
return nets, nil
}
func parseIPs(ranges []string) []net.IP {
ip := make([]net.IP, 0, len(ranges))
for _, r := range ranges {
ip = append(ip, net.ParseIP(r))
}
return ip
}

View File

@@ -0,0 +1,47 @@
package requestcontext
import (
"context"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/requestid"
fiberutils "github.com/gofiber/fiber/v2/utils"
)
type requestIdKey struct{}
// GetRequestId get requestId from context. If not found, return empty string
//
// Warning: Request context should be setup before using this function
func GetRequestId(ctx context.Context) string {
if id, ok := ctx.Value(requestIdKey{}).(string); ok {
return id
}
return ""
}
func WithRequestId() Option {
return func(ctx context.Context, c *fiber.Ctx) (context.Context, error) {
// Try to get id from fiber context.
requestId, ok := c.Locals(requestid.ConfigDefault.ContextKey).(string)
if !ok || requestId == "" {
// Try to get id from request, else we generate one
requestId = c.Get(requestid.ConfigDefault.Header, fiberutils.UUID())
// Set new id to response header
c.Set(requestid.ConfigDefault.Header, requestId)
// Add the request ID to locals (fasthttp UserValue storage)
c.Locals(requestid.ConfigDefault.ContextKey, requestId)
}
// Add the request ID to context
ctx = context.WithValue(ctx, requestIdKey{}, requestId)
// Add the requuest ID to context logger
ctx = logger.WithContext(ctx, "requestId", requestId)
return ctx, nil
}
}

View File

@@ -0,0 +1,115 @@
package requestlogger
import (
"log/slog"
"net/http"
"strings"
"time"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/middleware/requestcontext"
"github.com/gofiber/fiber/v2"
)
type Config struct {
WithRequestHeader bool `env:"REQUEST_HEADER" envDefault:"false" mapstructure:"request_header"`
WithRequestQuery bool `env:"REQUEST_QUERY" envDefault:"false" mapstructure:"request_query"`
Disable bool `env:"DISABLE" envDefault:"false" mapstructure:"disable"` // Disable logger level `INFO`
HiddenRequestHeaders []string `env:"HIDDEN_REQUEST_HEADERS" mapstructure:"hidden_request_headers"`
}
// New setup request context and information
func New(config Config) fiber.Handler {
hiddenRequestHeaders := make(map[string]struct{}, len(config.HiddenRequestHeaders))
for _, header := range config.HiddenRequestHeaders {
hiddenRequestHeaders[strings.TrimSpace(strings.ToLower(header))] = struct{}{}
}
return func(c *fiber.Ctx) error {
start := time.Now()
// Continue stack
err := c.Next()
end := time.Now()
latency := end.Sub(start)
status := c.Response().StatusCode()
baseAttrs := []slog.Attr{
slog.String("event", "api_request"),
slog.Int64("latency", latency.Milliseconds()),
slog.String("latencyHuman", latency.String()),
}
// prep request attributes
requestAttributes := []slog.Attr{
slog.Time("time", start),
slog.String("method", c.Method()),
slog.String("host", c.Hostname()),
slog.String("path", c.Path()),
slog.String("route", c.Route().Path),
slog.String("ip", requestcontext.GetClientIP(c.UserContext())),
slog.String("remoteIP", c.Context().RemoteIP().String()),
slog.Any("x-forwarded-for", c.IPs()),
slog.String("user-agent", string(c.Context().UserAgent())),
slog.Any("params", c.AllParams()),
slog.Int("length", len((c.Body()))),
}
// prep response attributes
responseAttributes := []slog.Attr{
slog.Time("time", end),
slog.Int("status", status),
slog.Int("length", len(c.Response().Body())),
}
// request query
if config.WithRequestQuery {
requestAttributes = append(requestAttributes, slog.String("query", string(c.Request().URI().QueryString())))
}
// request headers
if config.WithRequestHeader {
kv := []any{}
for k, v := range c.GetReqHeaders() {
if _, found := hiddenRequestHeaders[strings.ToLower(k)]; found {
continue
}
kv = append(kv, slog.Any(k, v))
}
requestAttributes = append(requestAttributes, slog.Group("header", kv...))
}
level := slog.LevelInfo
if err != nil || status >= http.StatusInternalServerError {
level = slog.LevelError
// error attributes
logErr := err
if logErr == nil {
logErr = fiber.NewError(status)
}
baseAttrs = append(baseAttrs, slog.Any("error", logErr))
}
if config.Disable && level == slog.LevelInfo {
return errors.WithStack(err)
}
logger.LogAttrs(c.UserContext(), level, "Request Completed", append([]slog.Attr{
{
Key: "request",
Value: slog.GroupValue(requestAttributes...),
},
{
Key: "response",
Value: slog.GroupValue(responseAttributes...),
},
}, baseAttrs...)...,
)
return errors.WithStack(err)
}
}

View File

@@ -17,13 +17,3 @@ sql:
sql_package: "pgx/v5"
rename:
id: "Id"
- schema: "./modules/brc20/database/postgresql/migrations"
queries: "./modules/brc20/database/postgresql/queries"
engine: "postgresql"
gen:
go:
package: "gen"
out: "./modules/brc20/internal/repository/postgres/gen"
sql_package: "pgx/v5"
rename:
id: "Id"