Compare commits

...

51 Commits

Author SHA1 Message Date
Planxnx
aace33b382 fix(httpclient): support base url query params 2024-07-04 15:39:04 +07:00
Planxnx
8760baf42b chore: remive unused comment 2024-07-04 00:03:36 +07:00
Planxnx
5aca9f7f19 perf(httpclient): reduce base url parsing operation 2024-07-03 23:58:20 +07:00
Planxnx
07aa84019f fix(httpclient): can't support baseURL path 2024-07-03 23:57:40 +07:00
Thanee Charattrakool
a5fc803371 Merge pull request #29 from gaze-network/develop
feat: release v0.2.4
2024-07-02 15:57:44 +07:00
Planxnx
72ca151fd3 feat(httpclient): support content-encoding 2024-07-02 15:53:18 +07:00
Gaze
53a4d1a4c3 Merge branch 'main' into develop 2024-06-30 21:04:08 +07:00
Gaze
3322f4a034 ci: update action file name 2024-06-30 21:03:57 +07:00
Planxnx
dcb220bddb Merge branch 'main' into develop 2024-06-30 20:17:13 +07:00
gazenw
b6ff7e41bd docs: update README.md 2024-06-30 20:12:44 +07:00
gazenw
7cb717af11 feat(runes): get txs by block range (#28)
* feat(runes): get txs by block range

* feat(runes): validate block range

* perf(runes): limit 10k txs

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>
2024-06-30 18:45:23 +07:00
Gaze
0d1ae0ef5e Merge branch 'main' into develop 2024-06-27 00:12:13 +07:00
Thanee Charattrakool
81ba7792ea fix: create error handler middleware (#27) 2024-06-27 00:11:22 +07:00
Gaze
b5851a39ab Merge branch 'main' into develop 2024-06-22 21:15:06 +07:00
Gaze
b44fb870a3 feat: add query params to req logger 2024-06-22 21:00:02 +07:00
Gaze
373ea50319 feat(logger): support env config 2024-06-20 18:52:56 +07:00
Gaze
a1d7524615 feat(btcutils): make btcutils.Address comparable support 2024-06-14 19:38:01 +07:00
Gaze
415a476478 Merge branch 'main' into develop 2024-06-14 16:55:39 +07:00
Gaze
f63505e173 feat(btcutils): use chain params instead common.network 2024-06-14 16:55:28 +07:00
Gaze
65a69ddb68 Merge remote-tracking branch 'origin/main' into develop 2024-06-14 16:48:48 +07:00
Thanee Charattrakool
4f5d1f077b feat(btcutils): add bitcoin utility functions (#26)
* feat(btcutils): add bitcoin utility functions

* feat(btcutils): add bitcoin signature verification
2024-06-14 16:48:22 +07:00
Gaze
c133006c82 Merge branch 'main' into develop 2024-06-12 23:39:24 +07:00
Thanee Charattrakool
51fd1f6636 feat: move requestip config to http config (#25) 2024-06-12 22:08:03 +07:00
Thanee Charattrakool
a7bc6257c4 feat(api): add request context and logger middleware (#24)
* feat(api): add request context and logger middleware

* feat(api): add cors and favicon middleware

* fix: solve wrapcheck linter warning

* feat: configurable hidden request headers
2024-06-12 21:47:29 +07:00
gazenw
3bb7500c87 feat: update docker version 2024-06-07 13:55:55 +07:00
Gaze
8c92893d4a feat: release v0.2.1 2024-05-31 01:16:34 +07:00
Nut Pinyo
d84e30ed11 fix: implement Shutdown() for processors (#22) 2024-05-31 01:13:12 +07:00
Thanee Charattrakool
d9fa217977 feat: use current indexed block for first prev block (#23)
* feat: use current indexed block for first prev block

* fix: forgot to set next prev header
2024-05-31 01:11:37 +07:00
Nut Pinyo
d4b694aa57 fix: implement Shutdown() for processors (#22) 2024-05-30 23:57:41 +07:00
Gaze
9febf40e81 Merge remote-tracking branch 'origin/main' into develop 2024-05-27 14:33:00 +07:00
Thanee Charattrakool
709b00ec0e build: add Docker cache mound for Go modules (#21)
* build: add cache mount for go modules

* doc(docker): update TZ description

* build: use entrypoint instead cmd exec

* build: add dockerignore

* build: add modules dir to image for migration command

* build: update dockerignore

* doc: fix typo

Co-authored-by: gazenw <163862510+gazenw@users.noreply.github.com>

---------

Co-authored-by: gazenw <163862510+gazenw@users.noreply.github.com>
2024-05-23 17:10:03 +07:00
gazenw
50ae103502 doc: update docker compose example 2024-05-21 14:44:59 +07:00
gazenw
c0242bd555 Update README.md 2024-05-20 18:37:32 +07:00
gazenw
6d4f1d0e87 Release v0.2.0
Release v0.2.0
2024-05-16 14:50:03 +07:00
Gaze
b9fac74026 Merge remote-tracking branch 'origin/main' into develop
# Conflicts:
#	README.md
#	cmd/cmd_run.go
2024-05-16 14:37:37 +07:00
Nut Pinyo
62ecd7ea49 fix: runes tag parsing (#19) 2024-05-16 13:54:13 +07:00
gazenw
66ea2766a0 refactor: Separated modules initiator (#17)
* refactor: separated modules initiator

* fix: able to run with empty modules

* doc: update cmd desc

* refactor: sorting code flow

* fix: invalid apionly flow

* refactor: remove unnecessary flags

* feat: add default value for runes config

* refactor: use config instead bind flag to opts struct

* chore: remove unused comment

* refactor(runes): invoke only when in case

* feat: add http server default port

* feat: add logger context

* doc: update readme

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>
2024-05-15 16:14:29 +07:00
gazenw
575c144428 fix: invalid pgx version (#18)
Co-authored-by: Planxnx <planxnx@users.noreply.github.com>
2024-05-15 03:19:10 +07:00
gazenw
f8fbd67bd8 fix: invalid pgx version (#18)
Co-authored-by: Planxnx <planxnx@users.noreply.github.com>
2024-05-15 03:17:54 +07:00
gazenw
c75b62bdf9 Remove bitcoin indexer (#16)
* doc: update README.md

* fix: remove bitcoin module

* fix: remove more config
2024-05-14 19:29:43 +07:00
gazenw
cc2649dd64 Update README.md: fix datasource 2024-05-13 14:19:32 +07:00
gazenw
d96370454b Remove bitcoin module (#15)
* fix: remove bitcoin module

* fix: remove more config
2024-05-13 14:18:45 +07:00
gazenw
c9a5c6d217 refactor: Generic Indexer (#14)
* refactor(indexer): init generic indexer

* refactor(btc): updatet datasource

* refactor: remove old indexers pkg

* doc: update comment

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>
Co-authored-by: Planxnx <thanee@cleverse.com>
2024-05-08 16:15:33 +07:00
Gaze
86716c1915 feat: auto maxproc only for run command 2024-05-08 14:28:06 +07:00
gazenw
371d1fe008 doc: update README.md 2024-05-08 14:13:21 +07:00
Gaze
c6057d9511 perf(btc): remove unnecessary db index 2024-05-07 21:29:12 +07:00
Gaze
d37be5997b fix: remove parts of README.md 2024-04-30 10:44:53 +07:00
gazenw
fcdecd4046 feat: v0.1.0 release (#13)
* fix: don't remove first block

* fix: make etching_terms nullable

* fix: fix panic if empty pkscript

* chore: change testnet starting block

* feat: more logs

* fix: extract tapscript bug

* feat: more logs

* fix: switch pk to block height

* chore: remove redundant log

* fix: repo

* fix: not found error

* fix: golangci-lint

* feat: add etching tx hash to rune entries

* feat: stop main if indexer failed

* fix: check balance after populating current balance

* fix: sql ambiguous column

* feat: add tx hash and out index in tx output

* fix: actually use transactions to write db

* fix: create rune entry states only during flushes

* fix: mint cap reached off by one

* fix: debug log unsafe

* feat: prevent processing of txs before activation height

* feat: add rune number to rune entry

* feat: include new rune entries in event hash and flushing

* refactor(config): separate init and get config func

Co-authored-by: Gaze <dev@gaze.network>

* feat: remove annoying log

Co-authored-by: Gaze <dev@gaze.network>

* feat: mod tidy

Co-authored-by: Gaze <dev@gaze.network>

* refactor: move main to root

Co-authored-by: Gaze <dev@gaze.network>

* feat(cli): create cli commands

Co-authored-by: Gaze <dev@gaze.network>

* refactor: move main logic to command

Co-authored-by: Gaze <dev@gaze.network>

* doc: remove unused desc

Co-authored-by: Gaze <dev@gaze.network>

* refactor: test structure in runestone_test.go

* fix: edict flaws were ignored

* feat: more tests

* refactor(cli): add local flag

Co-authored-by: Gaze <dev@gaze.network>

* feat: set symbol limit to utf8.MaxRune

* refactor(cli): flags for each module

Co-authored-by: Gaze <dev@gaze.network>

* feat(cli): support db selection

Co-authored-by: Gaze <dev@gaze.network>

* fix: remove temp code

Co-authored-by: Gaze <dev@gaze.network>

* fix: get data from cache in processor first, then dg

* feat(cli): add version command

Co-authored-by: Gaze <dev@gaze.network>

* doc(cli): add refactor plan

Co-authored-by: Gaze <dev@gaze.network>

* refactor(cli): rename files

Co-authored-by: Gaze <dev@gaze.network>

* feat: add main.go

Co-authored-by: Gaze <dev@gaze.network>

* feat: more tests

* feat: add overflow err

* feat: finish runestone tests

* refactor(cli): separate protocol config and cli flag

Co-authored-by: Gaze <dev@gaze.network>

* chore(btc): update example config

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add get block header to datasource interface

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): reorg handling

Co-authored-by: Gaze <dev@gaze.network>

* fix: interface

Co-authored-by: Gaze <dev@gaze.network>

* fix: rename postgres config key

* fix: migrated runes indexer integration to new cli

* fix: commit every block

* feat(btc): add revert data query

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add revert data to processor

Co-authored-by: Gaze <dev@gaze.network>

* feat: implement public errors

* fix: use errs in api

* refactor: move api and usecase outside of internal

* feat: add custom opcode check for datapush

* fix: break if input utxo is not P2TR

* fix: zero len destination case

* fix: get the rest of transaction data in GetTransaction

* refactor: create subscription utils tools

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add btc_database from datasource

Co-authored-by: Gaze <dev@gaze.network>

* doc(btc): add note

Co-authored-by: Gaze <dev@gaze.network>

* wip(btc): imple prepare range func

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add pg queries for datasource

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): update queries

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): implement repo for get blocks

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): update dg

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): return nil if errors

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): update fetch async for db datasource

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add get block header from db for reorg handling

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add todo notes

Co-authored-by: Gaze <dev@gaze.network>

* feat: implement get tx by hash

* fix: rename func

* fix: rename func

* fix: rename func

* fix: fix get transaction by hash

* feat: integrate bitcoin client db to main

* fix: reduce chunk size

* fix: stop main if bitcoin indexer failed

* fix: stop main if runes indexer failed

* fix: move stop() inside goroutine

* chore: add log

* fix: duplicate rune entry number

* feat(btc): add witness utils

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): witness datamodel parsing

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): invalid table name

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): remove uniqte index for hash

Co-authored-by: Gaze <dev@gaze.network>

* doc: add todo

Co-authored-by: Gaze <dev@gaze.network>

* feat(logger): remove error verbose

Co-authored-by: Gaze <dev@gaze.network>

* feat: support postgresql db

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add err notfound

Co-authored-by: Gaze <dev@gaze.network>

* fix: invalid pgx version

Co-authored-by: Gaze <dev@gaze.network>

* fix: invalid indexer flow

Co-authored-by: Gaze <dev@gaze.network>

* feat: refactor runes api

* feat: implement http server

* fix: mount runes api

* fix: error handler

* fix: first empty state error

Co-authored-by: Gaze <dev@gaze.network>

* fix: off by one confirmation

* ci: ignore RollBack error

* fix: change WithPublicMessage to be prefix

* feat: bump cstream version

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): nullable pkscript

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): change rollback style

Co-authored-by: Gaze <dev@gaze.network>

* refactor: move runes out of internal

* feat: rename id field to runeId in rune transaction

* feat(btc): update index

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add default current block

Co-authored-by: Gaze <dev@gaze.network>

* doc: add note

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): use int64 to store sequence

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): upgrade data type for numbers

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc):  upgrade data type for idx

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): get indexed block impl

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): add common.ZeroHash

Co-authored-by: Gaze <dev@gaze.network>

* feat: add chainparam

* feat: implement get transactions

* fix: wrong condition for non-OP_RETURN output

* feat(btc): add verify indexer states

Co-authored-by: Gaze <dev@gaze.network>

* refactor: sorting code

Co-authored-by: Gaze <dev@gaze.network>

* feat: fix interface

* feat(btc): update chuunk size

Co-authored-by: Gaze <dev@gaze.network>

* feat: add rune_etched column in rune transaction

* fix: missing field in create

* feat: add runeEtched in get transactions

* feat: implement get token info

* feat: add holders count in token info

* feat: implement get holders

* fix: return a new repository when beginning a new tx

* fix: rename type

* feat: add pkscript to outpoint balance

* feat: implement get utxos by address api

* fix: spend outpoint bug

* feat: implement get balances by address batch

* feat: sort balances result by amount

* ci: create Dockerfile

Co-authored-by: Gaze <dev@gaze.network>

* ci: add arg run

Co-authored-by: Gaze <dev@gaze.network>

* perf: add automaxprocs

Co-authored-by: Gaze <dev@gaze.network>

* chore: add performance logging

Co-authored-by: Gaze <dev@gaze.network>

* chore: add performance logger for debyug

Co-authored-by: Gaze <dev@gaze.network>

* fix: empty etched at

* fix: revert data sequentially

* fix: remove unused funcs

* fix: main.go

* feat: add flag --api-only to run cmd

* fix: create index

* fix: don't add zero mint to unallocated

* fix: ignore zero burn amount

* feat(reorg): add reorg detail

Co-authored-by: Gaze <dev@gaze.network>

* fix: wrong index type

* feat: implement reporting client to report runes blocks

* feat: implement report node

* feat(runes): add latest block api

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): use logger warn

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): txout aren't revert if it's have to revert spent

Co-authored-by: Gaze <dev@gaze.network>

* fix: annoying error when unsubscribe fetcher

Co-authored-by: Gaze <dev@gaze.network>

* refactor(btc): readable code

Co-authored-by: Gaze <dev@gaze.network>

* fix(indexer): fix subscription closed before process when success fetch

Co-authored-by: Gaze <dev@gaze.network>

* fix: remove module enum

* fix: increase max reorg limit

* feat: add starting height for runes mainnet

* fix(btc): fix `with` modified same row twice

Co-authored-by: Gaze <dev@gaze.network>

* fix(runes): handling latest block not found

Co-authored-by: Gaze <dev@gaze.network>

* feat: add decimals in get transactions

* fix: wrong condition

* feat: add more index

* feat: implement get transactions by pkscript

* feat: allow query by rune id too

* feat: more comments

* perf(btc): bitcoin indexer performance optimization (#4)

* feat(btc): not null to witness

Co-authored-by: Gaze <dev@gaze.network>

* perf(btc): add batch insert txin

Co-authored-by: Gaze <dev@gaze.network>

* perf(btc): batch insert txout

Co-authored-by: Gaze <dev@gaze.network>

* perf(btc): batch insert transaction

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): remove old queries

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): typo

Co-authored-by: Gaze <dev@gaze.network>

* perf(btc): batch insert blocks (#5)

Co-authored-by: Gaze <gazenw@users.noreply.github.com>

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>

* feat(btc): Duplicate coinbase transaction handling (#7)

* feat(btc): tx_hash can duplicated in block v1

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): duplicate tx  will use same txin/txout from previous tx

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): prevent revert block v1 data

if you really want to revert the data before the block version 2, you should reset the database and reindex the data instead.

Co-authored-by: Gaze <dev@gaze.network>

* doc(btc): update list duplicate tx hash

Co-authored-by: Gaze <dev@gaze.network>

* doc(btc): update docs

Co-authored-by: Gaze <dev@gaze.network>

* fix(btc): use last v1 block instead

Co-authored-by: Gaze <dev@gaze.network>

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>

* feat: add ping handler

* fix: type

Co-authored-by: Gaze <dev@gaze.network>

* doc: add refactor note

Co-authored-by: Gaze <dev@gaze.network>

* ci: add golang linter and test runner gh action

* ci: use go-test-action@v0

* ci: annotate test result

* ci: update running flag

* fix: try to fix malformed import path

* feat: add mock test

* ci: remove annotation ci

* ci: add annotate test result

* chore: remove unused

* feat: try testify

* feat: remove test

* ci: add go test on macos, windows and go latest version

* ci: test building

* feat: remove mock code

* ci: add sqlc diff checker action (#10)

* feat: Graceful shutdown (#8)

* feat: add shutdown function for indexer

Co-authored-by: Gaze <dev@gaze.network>

* feat: add force shutdown

Co-authored-by: Gaze <dev@gaze.network>

* revert

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): remove unused

Co-authored-by: Gaze <dev@gaze.network>

* style: go fmt

Co-authored-by: Gaze <dev@gaze.network>

* feat: separate context for worker and application

* feat: increase force shutdown timeout

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): update logging

Co-authored-by: Gaze <dev@gaze.network>

* feat(btc): update shutdown function

Co-authored-by: Gaze <dev@gaze.network>

* feat: remove wg for shutdown

Co-authored-by: Gaze <dev@gaze.network>

* feat: refactor shutdown flow

Co-authored-by: Gaze <dev@gaze.network>

* feat: update shutdown flow

Co-authored-by: Gaze <dev@gaze.network>

* feat: update maming

Co-authored-by: Gaze <dev@gaze.network>

* feat: update force shutdown logic

Co-authored-by: Gaze <dev@gaze.network>

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>

* feat: check reporting config name

* fix: use db config in bitcoin module for runes datasource

* Add migrate commands (#2)

* feat: add migrate up

* feat: add down migration

* fix: example

* feat: change description

* fix: hardcode migration source directory

* Update README.md for public release. (#11)

* feat: initial draft for README.md

* fix: remove some sections

* feat: add block reporting to first description

* fix: reduce redundancy

* feat: update README.md

* Update README.md

* feat: update README.md

* fix: update config.yaml in README

* fix: remove redundant words

* fix: change default datasource

* fix: config.yaml comments

* feat: update README.md

* refactor(logger): format logging (#12)

* feat(logger): format main logger

* feat(logger): use duration ms for gcp output

* refactor(logger): bitcoin node logger

* refactor(logger): indexer logger

* refactor(logger): fix cmd logger

* refactor(logger): logger in config pacakge

* refactor(logger): set pgx error log level debug

* refactor(logger): btcclient datasource

* refactor: processor name

* refactor(logger): runese logger

* refactor(logger): update logger

* fix(runes): wrong btc db datasource

* refactor(logger): remove unnecessary debug log

* refactor: update logger in indexer

* fix(logger): deadlock in load()

* fix: remove unused

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>

* feat(btc): remove unused func

* fix: fix golangci-lint error

* fix(pg): update logger level

* doc: update config example

* feat: go mod tidy

* doc: update readme

* fix: panic cause didn't handle error

* doc: update example config

* doc: update example config in readme

* feat(logger): only log error stacktrace when debug mode is on

* feat(reporting): handling invalid config error

* feat(pg): handling invalid config error

* fix: panic in get_token_info

---------

Co-authored-by: Gaze <gazenw@users.noreply.github.com>
Co-authored-by: Planxnx <thanee@cleverse.com>
Co-authored-by: Thanee Charattrakool <37617738+Planxnx@users.noreply.github.com>
2024-04-29 15:16:10 +07:00
Thanee Charattrakool
5f9cdd5af1 feat: introduce opensource license (GPL v3) 2024-04-27 03:14:24 +07:00
Thanee Charattrakool
ac9132b163 ci: Add Golang Lint and Test CI (#9)
* ci: add golang linter and test runner gh action

* ci: use go-test-action@v0

* ci: annotate test result

* ci: update running flag

* fix: try to fix malformed import path

* feat: add mock test

* ci: remove annotation ci

* ci: add annotate test result

* chore: remove unused

* feat: try testify

* feat: remove test

* ci: add go test on macos, windows and go latest version

* ci: test building

* feat: remove mock code
2024-04-25 15:27:47 +07:00
Gaze
142f6bda69 doc: add contribution, PR template and guideline doc
Co-authored-by: Gaze <dev@gaze.network>
2024-04-25 01:42:18 +07:00
153 changed files with 16810 additions and 5 deletions

18
.dockerignore Normal file
View File

@@ -0,0 +1,18 @@
.git
.gitignore
.github
.vscode
**/*.md
**/*.log
.DS_Store
# Docker
Dockerfile
.dockerignore
docker-compose.yml
# Go
.golangci.yaml
cmd.local
config.*.y*ml
config.y*ml

1
.github/CODE_OF_CONDUCT.md vendored Normal file
View File

@@ -0,0 +1 @@
# Contributor Covenant Code of Conduct

34
.github/CONTRIBUTING.md vendored Normal file
View File

@@ -0,0 +1,34 @@
# Contributing
Please note: we have a [code of conduct](https://github.com/gaze-network/gaze-indexer/blob/main/.github/CODE_OF_CONDUCT.md), please follow it in all your interactions with the Gaze Network project.
## Pull Requests or Commits
#### Message structured
```plaintext
<type>(optional scope):<description>
```
The `<type>` must be one of the following:
> feat:, refactor:, fix:, doc:, style:, perf:, test:, chore:, ci:, build:
- feat(runes): add Runes module to the project
- refactor: change project structure
- fix(btc): fix chain reorganization issue
- doc: update \`run\` command documentation
- style: fix linting issues
- perf: improve performance of the bitcoin node datasource
- test(runes): add unit tests for etching logic
- chore: bump dependencies versions
- ci: update CI configuration
- build: update Dockerfile to use alpine
# 👍 Contribute
If you want to say **thank you** and/or support the active development of `Fiber`:
1. Add a [GitHub Star](https://github.com/gaze-network/gaze-indexer/stargazers) to the project.
2. Follow and mention our [Twitter (𝕏)](https://twitter.com/Gaze_Network).
3. Write a review or tutorial on [Medium](https://medium.com/), [Dev.to](https://dev.to/) or personal blog.

22
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,22 @@
## Description
Please provide a clear and concise description of the changes you've made and the problem they address. Include the purpose of the change, any relevant issues it solves, and the benefits it brings to the project. If this change introduces new features or adjustments, highlight them here.
Fixes # (issue)
## Type of change
What types of changes does your code introduce to Appium?
_Put an `x` in the boxes that apply_
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] Enhancement (improvement to existing features and functionality)
- [ ] Documentation update (changes to documentation)
- [ ] Performance improvement (non-breaking change which improves efficiency)
- [ ] Code consistency (non-breaking change which improves code reliability and robustness)
## Commit formatting
Please follow the commit message conventions for an easy way to identify the purpose or intention of a commit. Check out our commit message conventions in the [CONTRIBUTING.md](https://github.com/gaze-network/gaze-indexer/blob/main/.github/CONTRIBUTING.md#pull-requests-or-commits)

77
.github/workflows/code-analysis.yml vendored Normal file
View File

@@ -0,0 +1,77 @@
name: Code Analysis & Test
on:
workflow_dispatch:
pull_request:
branches:
- develop
- main
paths:
- "go.mod"
- "go.sum"
- "**.go"
- ".golangci.yaml"
- ".github/workflows/code-analysis.yml"
jobs:
lint:
strategy:
matrix:
os: ["ubuntu-latest"]
name: Lint (${{ matrix.os }})
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: "0"
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: "go.mod"
cache-dependency-path: "**/*.sum"
cache: true # caching and restoring go modules and build outputs.
- name: Lint
uses: reviewdog/action-golangci-lint@v2
with: # https://github.com/reviewdog/action-golangci-lint#inputs
go_version_file: "go.mod"
workdir: ./
golangci_lint_flags: "--config=./.golangci.yaml --verbose --new-from-rev=${{ github.event.pull_request.base.sha }}"
fail_on_error: true
test:
strategy:
matrix:
os: ["ubuntu-latest", "macos-latest", "windows-latest"]
go-version: ["1.22.x", "1.x"] # minimum version and latest version
name: Test (${{ matrix.os }}/${{ matrix.go-version }})
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: "0"
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go-version }}
cache: true # caching and restoring go modules and build outputs.
- run: echo "GOVERSION=$(go version)" >> $GITHUB_ENV
- name: Build
run: go build -v ./...
- name: Test
run: go test -json ./... > test_output.json
- name: Summary Test Results
if: always()
uses: robherley/go-test-action@v0
with:
fromJSONFile: test_output.json
- name: Annotate Test Results
if: always()
uses: guyarb/golang-test-annotations@v0.5.1
with:
test-results: test_output.json

28
.github/workflows/sqlc-verify.yml vendored Normal file
View File

@@ -0,0 +1,28 @@
name: Sqlc ORM Framework Verify
on:
workflow_dispatch:
pull_request:
branches:
- develop
- main
paths:
- "sqlc.yaml"
- "**.sql"
- ".github/workflows/sqlc-verify.yml"
jobs:
sqlc-diff:
name: Sqlc Diff Checker
runs-on: "ubuntu-latest" # "self-hosted", "ubuntu-latest", "macos-latest", "windows-latest"
steps:
- uses: actions/checkout@v4
with:
fetch-depth: "0"
- name: Setup Sqlc
uses: sqlc-dev/setup-sqlc@v4
with:
sqlc-version: "1.26.0"
- name: Check Diff
run: sqlc diff

View File

@@ -51,6 +51,8 @@ linters:
- prealloc # performance - Find slice declarations that could potentially be pre-allocated, https://github.com/alexkohler/prealloc
- gosec # bugs - Inspects source code for security problems
- wrapcheck # style, error - Checks that errors returned from external packages are wrapped, we should wrap the error from external library
- depguard # import - Go linter that checks if package imports are in a list of acceptable packages.
- sloglint # style, format Ensure consistent code style when using log/slog.
### Annoying Linters
# - dupl # style - code clone detection
@@ -66,20 +68,36 @@ linters-settings:
misspell:
locale: US
ignore-words: []
errcheck:
exclude-functions:
- (github.com/jackc/pgx/v5.Tx).Rollback
wrapcheck:
ignoreSigs:
- .Errorf(
- errors.New(
- errors.Unwrap(
- errors.Join(
- .Wrap(
- .Wrapf(
- .WithMessage(
- .WithMessagef(
- .WithStack(
- errs.NewPublicError(
- errs.WithPublicMessage(
- withstack.WithStackDepth(
ignoreSigRegexps:
- \.New.*Error\(
ignorePackageGlobs:
- "github.com/gofiber/fiber/*"
goconst:
ignore-tests: true
min-occurrences: 5
depguard:
rules:
main:
# Packages that are not allowed.
deny:
- pkg: "github.com/pkg/errors"
desc: Should be replaced by "cockroachdb/errors" or "cleverse/go-utilities" package
sloglint:
attr-only: true
key-naming-case: snake
args-on-sep-lines: true

3
.vscode/extensions.json vendored Normal file
View File

@@ -0,0 +1,3 @@
{
"recommendations": ["dotenv.dotenv-vscode", "golang.go"]
}

82
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,82 @@
{
"editor.formatOnSave": true,
"files.exclude": {
"**/.git": true,
"**/.svn": true,
"**/.hg": true,
"**/CVS": true,
"**/.DS_Store": true
},
"search.exclude": {
"**/node_modules": true,
"**/build": true,
"**/dist": true
},
"[json]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
// Golang
"[go]": {
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": "explicit"
},
"editor.codeLens": true
},
"go.useLanguageServer": true,
"go.lintTool": "golangci-lint",
"go.lintFlags": ["--fix"],
"go.lintOnSave": "package",
"go.toolsManagement.autoUpdate": true,
"gopls": {
"formatting.gofumpt": true, // https://github.com/mvdan/gofumpt
"ui.codelenses": {
"gc_details": true
},
"build.directoryFilters": ["-**/node_modules"],
"ui.semanticTokens": true,
"ui.completion.usePlaceholders": false,
"ui.diagnostic.analyses": {
// https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md
// "fieldalignment": false,
"nilness": true,
"shadow": false,
"unusedparams": true,
"unusedvariable": true,
"unusedwrite": true, // ineffective assignment
"useany": true
},
"ui.diagnostic.staticcheck": false, // use golangci-lint instead
"ui.diagnostic.annotations": {
// CMD+P and run command `Go: Toggle gc details`
"bounds": true,
"escape": true,
"inline": true,
"nil": true
},
"ui.documentation.hoverKind": "FullDocumentation"
},
"go.editorContextMenuCommands": {
// Right click on code to use this command
"toggleTestFile": false,
"addTags": false,
"removeTags": false,
"fillStruct": true,
"testAtCursor": false,
"testFile": false,
"testPackage": false,
"generateTestForFunction": true,
"generateTestForFile": false,
"generateTestForPackage": false,
"addImport": false,
"testCoverage": false,
"playground": false,
"debugTestAtCursor": false,
"benchmarkAtCursor": false
},
"dotenv.enableAutocloaking": false,
"protoc": {
"options": ["--proto_path=pb"]
}
}

28
Dockerfile Normal file
View File

@@ -0,0 +1,28 @@
FROM golang:1.22 as builder
WORKDIR /app
COPY go.mod go.sum ./
RUN --mount=type=cache,target=/go/pkg/mod/ go mod download
COPY ./ ./
ENV GOOS=linux
ENV CGO_ENABLED=0
RUN --mount=type=cache,target=/go/pkg/mod/ \
go build -o main ./main.go
FROM alpine:latest
WORKDIR /app
RUN apk --no-cache add ca-certificates tzdata
COPY --from=builder /app/main .
COPY --from=builder /app/modules ./modules
# You can set TZ identifier to change the timezone, See https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
# ENV TZ=US/Central
ENTRYPOINT ["/app/main"]

674
LICENSE Normal file
View File

@@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

156
README.md
View File

@@ -1 +1,155 @@
# Gaze Indexer Network
<!-- omit from toc -->
# Gaze Indexer
Gaze Indexer is an open-source and modular indexing client for Bitcoin meta-protocols with **Unified Consistent APIs** across fungible token protocols.
Gaze Indexer is built with **modularity** in mind, allowing users to run all modules in one monolithic instance with a single command, or as a distributed cluster of micro-services.
Gaze Indexer serves as a foundation for building ANY meta-protocol indexers, with efficient data fetching, reorg detection, and database migration tool.
This allows developers to focus on what **truly** matters: Meta-protocol indexing logic. New meta-protocols can be easily added by implementing new modules.
- [Modules](#modules)
- [1. Runes](#1-runes)
- [Installation](#installation)
- [Prerequisites](#prerequisites)
- [1. Hardware Requirements](#1-hardware-requirements)
- [2. Prepare Bitcoin Core RPC server.](#2-prepare-bitcoin-core-rpc-server)
- [3. Prepare database.](#3-prepare-database)
- [4. Prepare `config.yaml` file.](#4-prepare-configyaml-file)
- [Install with Docker (recommended)](#install-with-docker-recommended)
- [Install from source](#install-from-source)
## Modules
### 1. Runes
The Runes Indexer is our first meta-protocol indexer. It indexes Runes states, transactions, runestones, and balances using Bitcoin transactions.
It comes with a set of APIs for querying historical Runes data. See our [API Reference](https://api-docs.gaze.network) for full details.
## Installation
### Prerequisites
#### 1. Hardware Requirements
Each module requires different hardware requirements.
| Module | CPU | RAM |
| ------ | --------- | ---- |
| Runes | 0.5 cores | 1 GB |
#### 2. Prepare Bitcoin Core RPC server.
Gaze Indexer needs to fetch transaction data from a Bitcoin Core RPC, either self-hosted or using managed providers like QuickNode.
To self host a Bitcoin Core, see https://bitcoin.org/en/full-node.
#### 3. Prepare database.
Gaze Indexer has first-class support for PostgreSQL. If you wish to use other databases, you can implement your own database repository that satisfies each module's Data Gateway interface.
Here is our minimum database disk space requirement for each module.
| Module | Database Storage (current) | Database Storage (in 1 year) |
| ------ | -------------------------- | ---------------------------- |
| Runes | 10 GB | 150 GB |
#### 4. Prepare `config.yaml` file.
```yaml
# config.yaml
logger:
output: TEXT # Output format for logs. current supported formats: "TEXT" | "JSON" | "GCP"
debug: false
# Network to run the indexer on. Current supported networks: "mainnet" | "testnet"
network: mainnet
# Bitcoin Core RPC configuration options.
bitcoin_node:
host: "" # [Required] Host of Bitcoin Core RPC (without https://)
user: "" # Username to authenticate with Bitcoin Core RPC
pass: "" # Password to authenticate with Bitcoin Core RPC
disable_tls: false # Set to true to disable tls
# Block reporting configuration options. See Block Reporting section for more details.
reporting:
disabled: false # Set to true to disable block reporting to Gaze Network. Default is false.
base_url: "https://indexer.api.gaze.network" # Defaults to "https://indexer.api.gaze.network" if left empty
name: "" # [Required if not disabled] Name of this indexer to show on the Gaze Network dashboard
website_url: "" # Public website URL to show on the dashboard. Can be left empty.
indexer_api_url: "" # Public url to access this indexer's API. Can be left empty if you want to keep your indexer private.
# HTTP server configuration options.
http_server:
port: 8080 # Port to run the HTTP server on for modules with HTTP API handlers.
# Meta-protocol modules configuration options.
modules:
# Configuration options for Runes module. Can be removed if not used.
runes:
database: "postgres" # Database to store Runes data. current supported databases: "postgres"
datasource: "bitcoin-node" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node".
api_handlers: # API handlers to enable. current supported handlers: "http"
- http
postgres:
host: "localhost"
port: 5432
user: "postgres"
password: "password"
db_name: "postgres"
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.
```
### Install with Docker (recommended)
We will be using `docker-compose` for our installation guide. Make sure the `docker-compose.yaml` file is in the same directory as the `config.yaml` file.
```yaml
# docker-compose.yaml
services:
gaze-indexer:
image: ghcr.io/gaze-network/gaze-indexer:v0.2.1
container_name: gaze-indexer
restart: unless-stopped
ports:
- 8080:8080 # Expose HTTP server port to host
volumes:
- "./config.yaml:/app/config.yaml" # mount config.yaml file to the container as "/app/config.yaml"
command: ["/app/main", "run", "--modules", "runes"] # Put module flags after "run" commands to select which modules to run.
```
### Install from source
1. Install `go` version 1.22 or higher. See Go installation guide [here](https://go.dev/doc/install).
2. Clone this repository.
```bash
git clone https://github.com/gaze-network/gaze-indexer.git
cd gaze-indexer
```
3. Build the main binary.
```bash
# Get dependencies
go mod download
# Build the main binary
go build -o gaze main.go
```
4. Run database migrations with the `migrate` command and module flags.
```bash
./gaze migrate up --runes --database postgres://postgres:password@localhost:5432/postgres
```
5. Start the indexer with the `run` command and module flags.
```bash
./gaze run --modules runes
```
If `config.yaml` is not located at `./app/config.yaml`, use the `--config` flag to specify the path to the `config.yaml` file.
```bash
./gaze run --modules runes --config /path/to/config.yaml
```

59
cmd/cmd.go Normal file
View File

@@ -0,0 +1,59 @@
package cmd
import (
"context"
"log/slog"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/spf13/cobra"
)
var (
// root command
cmd = &cobra.Command{
Use: "gaze",
Long: `Description of gaze indexer`,
}
// sub-commands
cmds = []*cobra.Command{
NewVersionCommand(),
NewRunCommand(),
NewMigrateCommand(),
}
)
// Execute runs the root command
func Execute(ctx context.Context) {
var configFile string
// Add global flags
flags := cmd.PersistentFlags()
flags.StringVar(&configFile, "config", "", "config file, E.g. `./config.yaml`")
flags.String("network", "mainnet", "network to connect to, E.g. `mainnet` or `testnet`")
// Bind flags to configuration
config.BindPFlag("network", flags.Lookup("network"))
// Initialize configuration and logger on start command
cobra.OnInitialize(func() {
// Initialize configuration
config := config.Parse(configFile)
// Initialize logger
if err := logger.Init(config.Logger); err != nil {
logger.PanicContext(ctx, "Something went wrong, can't init logger", slogx.Error(err), slog.Any("config", config.Logger))
}
})
// Register sub-commands
cmd.AddCommand(cmds...)
// Execute command
if err := cmd.ExecuteContext(ctx); err != nil {
// Cobra will print the error message by default
logger.DebugContext(ctx, "Error executing command", slogx.Error(err))
}
}

20
cmd/cmd_migrate.go Normal file
View File

@@ -0,0 +1,20 @@
package cmd
import (
"github.com/gaze-network/indexer-network/cmd/migrate"
_ "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/spf13/cobra"
)
func NewMigrateCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "migrate",
Short: "Migrate database schema",
}
cmd.AddCommand(
migrate.NewMigrateUpCommand(),
migrate.NewMigrateDownCommand(),
)
return cmd
}

263
cmd/cmd_run.go Normal file
View File

@@ -0,0 +1,263 @@
package cmd
import (
"context"
"fmt"
"log/slog"
"net/http"
"os"
"os/signal"
"runtime"
"strings"
"syscall"
"time"
"github.com/btcsuite/btcd/rpcclient"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/modules/runes"
"github.com/gaze-network/indexer-network/pkg/automaxprocs"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/middleware/errorhandler"
"github.com/gaze-network/indexer-network/pkg/middleware/requestcontext"
"github.com/gaze-network/indexer-network/pkg/middleware/requestlogger"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/compress"
"github.com/gofiber/fiber/v2/middleware/cors"
"github.com/gofiber/fiber/v2/middleware/favicon"
fiberrecover "github.com/gofiber/fiber/v2/middleware/recover"
"github.com/gofiber/fiber/v2/middleware/requestid"
"github.com/samber/do/v2"
"github.com/samber/lo"
"github.com/spf13/cobra"
)
// Register Modules
var Modules = do.Package(
do.LazyNamed("runes", runes.New),
)
func NewRunCommand() *cobra.Command {
// Create command
runCmd := &cobra.Command{
Use: "run",
Short: "Start indexer-network service",
RunE: func(cmd *cobra.Command, args []string) error {
if err := automaxprocs.Init(); err != nil {
logger.Error("Failed to set GOMAXPROCS", slogx.Error(err))
}
return runHandler(cmd, args)
},
}
// Add local flags
flags := runCmd.Flags()
flags.Bool("api-only", false, "Run only API server")
flags.String("modules", "", "Enable specific modules to run. E.g. `runes,brc20`")
// Bind flags to configuration
config.BindPFlag("api_only", flags.Lookup("api-only"))
config.BindPFlag("enable_modules", flags.Lookup("modules"))
return runCmd
}
const (
shutdownTimeout = 60 * time.Second
)
func runHandler(cmd *cobra.Command, _ []string) error {
conf := config.Load()
// Validate inputs and configurations
{
if !conf.Network.IsSupported() {
return errors.Wrapf(errs.Unsupported, "%q network is not supported", conf.Network.String())
}
}
// Initialize application process context
ctx, stop := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
defer stop()
injector := do.New(Modules)
do.ProvideValue(injector, conf)
do.ProvideValue(injector, ctx)
// Initialize Bitcoin RPC client
do.Provide(injector, func(i do.Injector) (*rpcclient.Client, error) {
conf := do.MustInvoke[config.Config](i)
client, err := rpcclient.New(&rpcclient.ConnConfig{
Host: conf.BitcoinNode.Host,
User: conf.BitcoinNode.User,
Pass: conf.BitcoinNode.Pass,
DisableTLS: conf.BitcoinNode.DisableTLS,
HTTPPostMode: true,
}, nil)
if err != nil {
return nil, errors.Wrap(err, "invalid Bitcoin node configuration")
}
// Check Bitcoin RPC connection
{
start := time.Now()
logger.InfoContext(ctx, "Connecting to Bitcoin Core RPC Server...", slogx.String("host", conf.BitcoinNode.Host))
if err := client.Ping(); err != nil {
return nil, errors.Wrapf(err, "can't connect to Bitcoin Core RPC Server %q", conf.BitcoinNode.Host)
}
logger.InfoContext(ctx, "Connected to Bitcoin Core RPC Server", slog.Duration("latency", time.Since(start)))
}
return client, nil
})
// Initialize reporting client
do.Provide(injector, func(i do.Injector) (*reportingclient.ReportingClient, error) {
conf := do.MustInvoke[config.Config](i)
if conf.Reporting.Disabled {
return nil, nil
}
reportingClient, err := reportingclient.New(conf.Reporting)
if err != nil {
if errors.Is(err, errs.InvalidArgument) {
return nil, errors.Wrap(err, "invalid reporting configuration")
}
return nil, errors.Wrap(err, "can't create reporting client")
}
return reportingClient, nil
})
// Initialize HTTP server
do.Provide(injector, func(i do.Injector) (*fiber.App, error) {
app := fiber.New(fiber.Config{
AppName: "Gaze Indexer",
ErrorHandler: func(c *fiber.Ctx, err error) error {
logger.ErrorContext(c.UserContext(), "Something went wrong, unhandled api error",
slogx.String("event", "api_unhandled_error"),
slogx.Error(err),
)
return errors.WithStack(c.Status(http.StatusInternalServerError).JSON(fiber.Map{
"error": "Internal Server Error",
}))
},
})
app.
Use(favicon.New()).
Use(cors.New()).
Use(requestid.New()).
Use(requestcontext.New(
requestcontext.WithRequestId(),
requestcontext.WithClientIP(conf.HTTPServer.RequestIP),
)).
Use(requestlogger.New(conf.HTTPServer.Logger)).
Use(fiberrecover.New(fiberrecover.Config{
EnableStackTrace: true,
StackTraceHandler: func(c *fiber.Ctx, e interface{}) {
buf := make([]byte, 1024) // bufLen = 1024
buf = buf[:runtime.Stack(buf, false)]
logger.ErrorContext(c.UserContext(), "Something went wrong, panic in http handler", slogx.Any("panic", e), slog.String("stacktrace", string(buf)))
},
})).
Use(errorhandler.New()).
Use(compress.New(compress.Config{
Level: compress.LevelDefault,
}))
// Health check
app.Get("/", func(c *fiber.Ctx) error {
return errors.WithStack(c.SendStatus(http.StatusOK))
})
return app, nil
})
// Initialize worker context to separate worker's lifecycle from main process
ctxWorker, stopWorker := context.WithCancel(context.Background())
defer stopWorker()
// Add logger context
ctxWorker = logger.WithContext(ctxWorker, slogx.Stringer("network", conf.Network))
// Run modules
{
modules := lo.Uniq(conf.EnableModules)
modules = lo.Map(modules, func(item string, _ int) string { return strings.TrimSpace(item) })
modules = lo.Filter(modules, func(item string, _ int) bool { return item != "" })
for _, module := range modules {
ctx := logger.WithContext(ctxWorker, slogx.String("module", module))
indexer, err := do.InvokeNamed[indexer.IndexerWorker](injector, module)
if err != nil {
if errors.Is(err, do.ErrServiceNotFound) {
return errors.Errorf("Module %q is not supported", module)
}
return errors.Wrapf(err, "can't init module %q", module)
}
// Run Indexer
if !conf.APIOnly {
go func() {
// stop main process if indexer stopped
defer stop()
logger.InfoContext(ctx, "Starting Gaze Indexer")
if err := indexer.Run(ctx); err != nil {
logger.PanicContext(ctx, "Something went wrong, error during running indexer", slogx.Error(err))
}
}()
}
}
}
// Run API server
httpServer := do.MustInvoke[*fiber.App](injector)
go func() {
// stop main process if API stopped
defer stop()
logger.InfoContext(ctx, "Started HTTP server", slog.Int("port", conf.HTTPServer.Port))
if err := httpServer.Listen(fmt.Sprintf(":%d", conf.HTTPServer.Port)); err != nil {
logger.PanicContext(ctx, "Something went wrong, error during running HTTP server", slogx.Error(err))
}
}()
// Stop application if worker context is done
go func() {
<-ctxWorker.Done()
defer stop()
logger.InfoContext(ctx, "Gaze Indexer Worker is stopped. Stopping application...")
}()
logger.InfoContext(ctxWorker, "Gaze Indexer started")
// Wait for interrupt signal to gracefully stop the server
<-ctx.Done()
// Force shutdown if timeout exceeded or got signal again
go func() {
defer os.Exit(1)
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
defer stop()
select {
case <-ctx.Done():
logger.FatalContext(ctx, "Received exit signal again. Force shutdown...")
case <-time.After(shutdownTimeout + 15*time.Second):
logger.FatalContext(ctx, "Shutdown timeout exceeded. Force shutdown...")
}
}()
if err := injector.Shutdown(); err != nil {
logger.PanicContext(ctx, "Failed while gracefully shutting down", slogx.Error(err))
}
return nil
}

47
cmd/cmd_version.go Normal file
View File

@@ -0,0 +1,47 @@
package cmd
import (
"fmt"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/constants"
"github.com/gaze-network/indexer-network/modules/runes"
"github.com/spf13/cobra"
)
var versions = map[string]string{
"": constants.Version,
"runes": runes.Version,
}
type versionCmdOptions struct {
Modules string
}
func NewVersionCommand() *cobra.Command {
opts := &versionCmdOptions{}
cmd := &cobra.Command{
Use: "version",
Short: "Show indexer-network version",
RunE: func(cmd *cobra.Command, args []string) error {
return versionHandler(opts, cmd, args)
},
}
flags := cmd.Flags()
flags.StringVar(&opts.Modules, "module", "", `Show version of a specific module. E.g. "runes"`)
return cmd
}
func versionHandler(opts *versionCmdOptions, _ *cobra.Command, _ []string) error {
version, ok := versions[opts.Modules]
if !ok {
// fmt.Fprintln(cmd.ErrOrStderr(), "Unknown module")
return errors.Wrap(errs.Unsupported, "Invalid module name")
}
fmt.Println(version)
return nil
}

125
cmd/migrate/cmd_down.go Normal file
View File

@@ -0,0 +1,125 @@
package migrate
import (
"fmt"
"net/url"
"strconv"
"strings"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/samber/lo"
"github.com/spf13/cobra"
)
type migrateDownCmdOptions struct {
DatabaseURL string
Runes bool
All bool
}
type migrateDownCmdArgs struct {
N int
}
func (a *migrateDownCmdArgs) ParseArgs(args []string) error {
if len(args) > 0 {
// assume args already validated by cobra to be len(args) <= 1
n, err := strconv.Atoi(args[0])
if err != nil {
return errors.Wrap(err, "failed to parse N")
}
if n < 0 {
return errors.New("N must be a positive integer")
}
a.N = n
}
return nil
}
func NewMigrateDownCommand() *cobra.Command {
opts := &migrateDownCmdOptions{}
cmd := &cobra.Command{
Use: "down [N]",
Short: "Apply all or N down migrations",
Args: cobra.MaximumNArgs(1),
Example: `gaze migrate down --database "postgres://postgres:postgres@localhost:5432/gaze-indexer?sslmode=disable"`,
RunE: func(cmd *cobra.Command, args []string) error {
// args already validated by cobra
var downArgs migrateDownCmdArgs
if err := downArgs.ParseArgs(args); err != nil {
return errors.Wrap(err, "failed to parse args")
}
return migrateDownHandler(opts, cmd, downArgs)
},
}
flags := cmd.Flags()
flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes down migrations")
flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on")
flags.BoolVar(&opts.All, "all", false, "Confirm apply ALL down migrations without prompt")
return cmd
}
func migrateDownHandler(opts *migrateDownCmdOptions, _ *cobra.Command, args migrateDownCmdArgs) error {
if opts.DatabaseURL == "" {
return errors.New("--database is required")
}
databaseURL, err := url.Parse(opts.DatabaseURL)
if err != nil {
return errors.Wrap(err, "failed to parse database URL")
}
if _, ok := supportedDrivers[databaseURL.Scheme]; !ok {
return errors.Errorf("unsupported database driver: %s", databaseURL.Scheme)
}
// prevent accidental down all migrations
if args.N == 0 && !opts.All {
input := ""
fmt.Print("Are you sure you want to apply all down migrations? (y/N):")
fmt.Scanln(&input)
if !lo.Contains([]string{"y", "yes"}, strings.ToLower(input)) {
return nil
}
}
applyDownMigrations := func(module string, sourcePath string, migrationTable string) error {
newDatabaseURL := cloneURLWithQuery(databaseURL, url.Values{"x-migrations-table": {migrationTable}})
sourceURL := "file://" + sourcePath
m, err := migrate.New(sourceURL, newDatabaseURL.String())
if err != nil {
if strings.Contains(err.Error(), "no such file or directory") {
return errors.Wrap(errs.InternalError, "migrations directory not found")
}
return errors.Wrap(err, "failed to open database")
}
m.Log = &consoleLogger{
prefix: fmt.Sprintf("[%s] ", module),
}
if args.N == 0 {
m.Log.Printf("Applying down migrations...\n")
err = m.Down()
} else {
m.Log.Printf("Applying %d down migrations...\n", args.N)
err = m.Steps(-args.N)
}
if err != nil {
if !errors.Is(err, migrate.ErrNoChange) {
return errors.Wrapf(err, "failed to apply %s down migrations", module)
}
m.Log.Printf("No more down migrations to apply\n")
}
return nil
}
if opts.Runes {
if err := applyDownMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
return nil
}

110
cmd/migrate/cmd_up.go Normal file
View File

@@ -0,0 +1,110 @@
package migrate
import (
"fmt"
"net/url"
"strconv"
"strings"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/spf13/cobra"
)
type migrateUpCmdOptions struct {
DatabaseURL string
Runes bool
}
type migrateUpCmdArgs struct {
N int
}
func (a *migrateUpCmdArgs) ParseArgs(args []string) error {
if len(args) > 0 {
// assume args already validated by cobra to be len(args) <= 1
n, err := strconv.Atoi(args[0])
if err != nil {
return errors.Wrap(err, "failed to parse N")
}
a.N = n
}
return nil
}
func NewMigrateUpCommand() *cobra.Command {
opts := &migrateUpCmdOptions{}
cmd := &cobra.Command{
Use: "up [N]",
Short: "Apply all or N up migrations",
Args: cobra.MaximumNArgs(1),
Example: `gaze migrate up --database "postgres://postgres:postgres@localhost:5432/gaze-indexer?sslmode=disable"`,
RunE: func(cmd *cobra.Command, args []string) error {
// args already validated by cobra
var upArgs migrateUpCmdArgs
if err := upArgs.ParseArgs(args); err != nil {
return errors.Wrap(err, "failed to parse args")
}
return migrateUpHandler(opts, cmd, upArgs)
},
}
flags := cmd.Flags()
flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes up migrations")
flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on")
return cmd
}
func migrateUpHandler(opts *migrateUpCmdOptions, _ *cobra.Command, args migrateUpCmdArgs) error {
if opts.DatabaseURL == "" {
return errors.New("--database is required")
}
databaseURL, err := url.Parse(opts.DatabaseURL)
if err != nil {
return errors.Wrap(err, "failed to parse database URL")
}
if _, ok := supportedDrivers[databaseURL.Scheme]; !ok {
return errors.Errorf("unsupported database driver: %s", databaseURL.Scheme)
}
applyUpMigrations := func(module string, sourcePath string, migrationTable string) error {
newDatabaseURL := cloneURLWithQuery(databaseURL, url.Values{"x-migrations-table": {migrationTable}})
sourceURL := "file://" + sourcePath
m, err := migrate.New(sourceURL, newDatabaseURL.String())
if err != nil {
if strings.Contains(err.Error(), "no such file or directory") {
return errors.Wrap(errs.InternalError, "migrations directory not found")
}
return errors.Wrap(err, "failed to open database")
}
m.Log = &consoleLogger{
prefix: fmt.Sprintf("[%s] ", module),
}
if args.N == 0 {
m.Log.Printf("Applying up migrations...\n")
err = m.Up()
} else {
m.Log.Printf("Applying %d up migrations...\n", args.N)
err = m.Steps(args.N)
}
if err != nil {
if !errors.Is(err, migrate.ErrNoChange) {
return errors.Wrapf(err, "failed to apply %s up migrations", module)
}
m.Log.Printf("Migrations already up-to-date\n")
}
return nil
}
if opts.Runes {
if err := applyUpMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil {
return errors.WithStack(err)
}
}
return nil
}

22
cmd/migrate/logger.go Normal file
View File

@@ -0,0 +1,22 @@
package migrate
import (
"fmt"
"github.com/golang-migrate/migrate/v4"
)
var _ migrate.Logger = (*consoleLogger)(nil)
type consoleLogger struct {
prefix string
verbose bool
}
func (l *consoleLogger) Printf(format string, v ...interface{}) {
fmt.Printf(l.prefix+format, v...)
}
func (l *consoleLogger) Verbose() bool {
return l.verbose
}

24
cmd/migrate/migrate.go Normal file
View File

@@ -0,0 +1,24 @@
package migrate
import "net/url"
const (
runesMigrationSource = "modules/runes/database/postgresql/migrations"
)
func cloneURLWithQuery(u *url.URL, newQuery url.Values) *url.URL {
clone := *u
query := clone.Query()
for key, values := range newQuery {
for _, value := range values {
query.Add(key, value)
}
}
clone.RawQuery = query.Encode()
return &clone
}
var supportedDrivers = map[string]struct{}{
"postgres": {},
"postgresql": {},
}

View File

4
common/bitcoin.go Normal file
View File

@@ -0,0 +1,4 @@
package common
// HalvingInterval is the number of blocks between each halving event.
const HalvingInterval = 210_000

99
common/errs/errs.go Normal file
View File

@@ -0,0 +1,99 @@
package errs
import (
"github.com/cockroachdb/errors"
)
// set depth to 10 to skip runtime stacks and current file.
const depth = 10
// Common Application Errors
var (
// NotFound is returned when a resource is not found
NotFound = errors.NewWithDepth(depth, "not found")
// InternalError is returned when internal logic got error
InternalError = errors.NewWithDepth(depth, "internal error")
// SomethingWentWrong is returned when got some bug or unexpected case
//
// inherited error from InternalError,
// so errors.Is(err, InternalError) == true
SomethingWentWrong = errors.WrapWithDepth(depth, InternalError, "something went wrong")
// Skippable is returned when got an error but it can be skipped or ignored and continue
Skippable = errors.NewWithDepth(depth, "skippable")
// Unsupported is returned when a feature or result is not supported
Unsupported = errors.NewWithDepth(depth, "unsupported")
// NotSupported is returned when a feature or result is not supported
// alias of Unsupported
NotSupported = Unsupported
// Unauthorized is returned when a request is unauthorized
Unauthorized = errors.NewWithDepth(depth, "unauthorized")
// Timeout is returned when a connection to a resource timed out
Timeout = errors.NewWithDepth(depth, "timeout")
// BadRequest is returned when a request is invalid
BadRequest = errors.NewWithDepth(depth, "bad request")
// InvalidArgument is returned when an argument is invalid
//
// inherited error from BadRequest,
// so errors.Is(err, BadRequest) == true
InvalidArgument = errors.WrapWithDepth(depth, BadRequest, "invalid argument")
// ArgumentRequired is returned when an argument is required
//
// inherited error from BadRequest,
// so errors.Is(err, BadRequest) == true
ArgumentRequired = errors.WrapWithDepth(depth, BadRequest, "argument required")
// Duplicate is returned when a resource already exists
Duplicate = errors.NewWithDepth(depth, "duplicate")
// Unimplemented is returned when a feature or method is not implemented
//
// inherited error from Unsupported,
// so errors.Is(err, Unsupported) == true
Unimplemented = errors.WrapWithDepth(depth, Unsupported, "unimplemented")
)
// Business Logic errors
var (
// Overflow is returned when an overflow error occurs
//
// inherited error from InternalError,
// so errors.Is(err, InternalError) == true
Overflow = errors.WrapWithDepth(depth, InternalError, "overflow")
// OverflowUint64 is returned when an uint64 overflow error occurs
//
// inherited error from Overflow,
// so errors.Is(err, Overflow) == true
OverflowUint32 = errors.WrapWithDepth(depth, Overflow, "overflow uint32")
// OverflowUint64 is returned when an uint64 overflow error occurs
//
// inherited error from Overflow,
// so errors.Is(err, Overflow) == true
OverflowUint64 = errors.WrapWithDepth(depth, Overflow, "overflow uint64")
// OverflowUint128 is returned when an uint128 overflow error occurs
//
// inherited error from Overflow,
// so errors.Is(err, Overflow) == true
OverflowUint128 = errors.WrapWithDepth(depth, Overflow, "overflow uint128")
// InvalidState is returned when a state is invalid
InvalidState = errors.NewWithDepth(depth, "invalid state")
// ConflictSetting is returned when an indexer setting is conflicted
ConflictSetting = errors.NewWithDepth(depth, "conflict setting")
// Closed is returned when a resource is closed
Closed = errors.NewWithDepth(depth, "closed")
)

View File

@@ -0,0 +1,43 @@
package errs
import (
"fmt"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/errors/withstack"
)
// PublicError is an error that, when caught by error handler, should return a user-friendly error response to the user. Responses vary between each protocol (http, grpc, etc.).
type PublicError struct {
err error
message string
}
func (p PublicError) Error() string {
return p.err.Error()
}
func (p PublicError) Message() string {
return p.message
}
func (p PublicError) Unwrap() error {
return p.err
}
func NewPublicError(message string) error {
return withstack.WithStackDepth(&PublicError{err: errors.New(message), message: message}, 1)
}
func WithPublicMessage(err error, prefix string) error {
if err == nil {
return nil
}
var message string
if prefix != "" {
message = fmt.Sprintf("%s: %s", prefix, err.Error())
} else {
message = err.Error()
}
return withstack.WithStackDepth(&PublicError{err: err, message: message}, 1)
}

12
common/hash.go Normal file
View File

@@ -0,0 +1,12 @@
package common
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
)
// Zero value of chainhash.Hash
var (
ZeroHash = *utils.Must(chainhash.NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000"))
NullHash = ZeroHash
)

33
common/network.go Normal file
View File

@@ -0,0 +1,33 @@
package common
import "github.com/btcsuite/btcd/chaincfg"
type Network string
const (
NetworkMainnet Network = "mainnet"
NetworkTestnet Network = "testnet"
)
var supportedNetworks = map[Network]struct{}{
NetworkMainnet: {},
NetworkTestnet: {},
}
var chainParams = map[Network]*chaincfg.Params{
NetworkMainnet: &chaincfg.MainNetParams,
NetworkTestnet: &chaincfg.TestNet3Params,
}
func (n Network) IsSupported() bool {
_, ok := supportedNetworks[n]
return ok
}
func (n Network) ChainParams() *chaincfg.Params {
return chainParams[n]
}
func (n Network) String() string {
return string(n)
}

49
config.example.yaml Normal file
View File

@@ -0,0 +1,49 @@
logger:
output: TEXT # Output format for logs. current supported formats: "TEXT" | "JSON" | "GCP"
debug: false
# Network to run the indexer on. Current supported networks: "mainnet" | "testnet"
network: mainnet
# Bitcoin Core RPC configuration options.
bitcoin_node:
host: "" # [Required] Host of Bitcoin Core RPC (without https://)
user: "" # Username to authenticate with Bitcoin Core RPC
pass: "" # Password to authenticate with Bitcoin Core RPC
disable_tls: false # Set to true to disable tls
# Block reporting configuration options. See Block Reporting section for more details.
reporting:
disabled: false # Set to true to disable block reporting to Gaze Network. Default is false.
base_url: "https://indexer.api.gaze.network" # Defaults to "https://indexer.api.gaze.network" if left empty
name: "" # [Required if not disabled] Name of this indexer to show on the Gaze Network dashboard
website_url: "" # Public website URL to show on the dashboard. Can be left empty.
indexer_api_url: "" # Public url to access this indexer's API. Can be left empty if you want to keep your indexer private.
# HTTP server configuration options.
http_server:
port: 8080 # Port to run the HTTP server on for modules with HTTP API handlers.
logger:
disable: false # disable logger if logger level is `INFO`
request_header: false
request_query: false
requestip: # Client IP extraction configuration options. This is unnecessary if you don't care about the real client IP or if you're not using a reverse proxy.
trusted_proxies_ip: # Cloudflare, GCP Public LB. See: server/internal/middleware/requestcontext/PROXY-IP.md
trusted_proxies_header: # X-Real-IP, CF-Connecting-IP
enable_reject_malformed_request: false # return 403 if request is malformed (invalid IP)
# Meta-protocol modules configuration options.
modules:
# Configuration options for Runes module. Can be removed if not used.
runes:
database: "postgres" # Database to store Runes data. current supported databases: "postgres"
datasource: "database" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node".
api_handlers: # API handlers to enable. current supported handlers: "http"
- http
postgres:
host: "localhost"
port: 5432
user: "postgres"
password: "password"
db_name: "postgres"
# url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above.

View File

@@ -0,0 +1,5 @@
package constants
const (
Version = "v0.2.1"
)

View File

@@ -0,0 +1,294 @@
package datasources
import (
"bytes"
"context"
"encoding/hex"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/rpcclient"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/internal/subscription"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
cstream "github.com/planxnx/concurrent-stream"
"github.com/samber/lo"
)
const (
blockStreamChunkSize = 5
)
// Make sure to implement the BitcoinDatasource interface
var _ Datasource[*types.Block] = (*BitcoinNodeDatasource)(nil)
// BitcoinNodeDatasource fetch data from Bitcoin node for Bitcoin Indexer
type BitcoinNodeDatasource struct {
btcclient *rpcclient.Client
}
// NewBitcoinNode create new BitcoinNodeDatasource with Bitcoin Core RPC Client
func NewBitcoinNode(btcclient *rpcclient.Client) *BitcoinNodeDatasource {
return &BitcoinNodeDatasource{
btcclient: btcclient,
}
}
func (p BitcoinNodeDatasource) Name() string {
return "bitcoin_node"
}
// Fetch polling blocks from Bitcoin node
//
// - from: block height to start fetching, if -1, it will start from genesis block
// - to: block height to stop fetching, if -1, it will fetch until the latest block
func (d *BitcoinNodeDatasource) Fetch(ctx context.Context, from, to int64) ([]*types.Block, error) {
ch := make(chan []*types.Block)
subscription, err := d.FetchAsync(ctx, from, to, ch)
if err != nil {
return nil, errors.WithStack(err)
}
defer subscription.Unsubscribe()
blocks := make([]*types.Block, 0)
for {
select {
case b, ok := <-ch:
if !ok {
return blocks, nil
}
blocks = append(blocks, b...)
case <-subscription.Done():
if err := ctx.Err(); err != nil {
return nil, errors.Wrap(err, "context done")
}
return blocks, nil
case err := <-subscription.Err():
if err != nil {
return nil, errors.Wrap(err, "got error while fetch async")
}
return blocks, nil
case <-ctx.Done():
return nil, errors.Wrap(ctx.Err(), "context done")
}
}
}
// FetchAsync polling blocks from Bitcoin node asynchronously (non-blocking)
//
// - from: block height to start fetching, if -1, it will start from genesis block
// - to: block height to stop fetching, if -1, it will fetch until the latest block
func (d *BitcoinNodeDatasource) FetchAsync(ctx context.Context, from, to int64, ch chan<- []*types.Block) (*subscription.ClientSubscription[[]*types.Block], error) {
ctx = logger.WithContext(ctx,
slogx.String("package", "datasources"),
slogx.String("datasource", d.Name()),
)
from, to, skip, err := d.prepareRange(from, to)
if err != nil {
return nil, errors.Wrap(err, "failed to prepare fetch range")
}
subscription := subscription.NewSubscription(ch)
if skip {
if err := subscription.UnsubscribeWithContext(ctx); err != nil {
return nil, errors.Wrap(err, "failed to unsubscribe")
}
return subscription.Client(), nil
}
// Create parallel stream
out := make(chan []*types.Block)
stream := cstream.NewStream(ctx, 8, out)
// create slice of block height to fetch
blockHeights := make([]int64, 0, to-from+1)
for i := from; i <= to; i++ {
blockHeights = append(blockHeights, i)
}
// Wait for stream to finish and close out channel
go func() {
defer close(out)
_ = stream.Wait()
}()
// Fan-out blocks to subscription channel
go func() {
defer func() {
// add a bit delay to prevent shutdown before client receive all blocks
time.Sleep(100 * time.Millisecond)
subscription.Unsubscribe()
}()
for {
select {
case data, ok := <-out:
// stream closed
if !ok {
return
}
// empty blocks
if len(data) == 0 {
continue
}
// send blocks to subscription channel
if err := subscription.Send(ctx, data); err != nil {
if errors.Is(err, errs.Closed) {
return
}
logger.WarnContext(ctx, "Failed to send bitcoin blocks to subscription client",
slogx.Int64("start", data[0].Header.Height),
slogx.Int64("end", data[len(data)-1].Header.Height),
slogx.Error(err),
)
}
case <-ctx.Done():
return
}
}
}()
// Parallel fetch blocks from Bitcoin node until complete all block heights
// or subscription is done.
go func() {
defer stream.Close()
done := subscription.Done()
chunks := lo.Chunk(blockHeights, blockStreamChunkSize)
for _, chunk := range chunks {
// TODO: Implement throttling logic to control the rate of fetching blocks (block/sec)
chunk := chunk
select {
case <-done:
return
case <-ctx.Done():
return
default:
stream.Go(func() []*types.Block {
startAt := time.Now()
defer func() {
logger.DebugContext(ctx, "Fetched chunk of blocks from Bitcoin node",
slogx.Int("total_blocks", len(chunk)),
slogx.Int64("from", chunk[0]),
slogx.Int64("to", chunk[len(chunk)-1]),
slogx.Duration("duration", time.Since(startAt)),
)
}()
// TODO: should concurrent fetch block or not ?
blocks := make([]*types.Block, 0, len(chunk))
for _, height := range chunk {
hash, err := d.btcclient.GetBlockHash(height)
if err != nil {
logger.ErrorContext(ctx, "Can't get block hash from Bitcoin node rpc", slogx.Error(err), slogx.Int64("height", height))
if err := subscription.SendError(ctx, errors.Wrapf(err, "failed to get block hash: height: %d", height)); err != nil {
logger.WarnContext(ctx, "Failed to send datasource error to subscription client", slogx.Error(err))
}
return nil
}
block, err := d.btcclient.GetBlock(hash)
if err != nil {
logger.ErrorContext(ctx, "Can't get block data from Bitcoin node rpc", slogx.Error(err), slogx.Int64("height", height))
if err := subscription.SendError(ctx, errors.Wrapf(err, "failed to get block: height: %d, hash: %s", height, hash)); err != nil {
logger.WarnContext(ctx, "Failed to send datasource error to subscription client", slogx.Error(err))
}
return nil
}
blocks = append(blocks, types.ParseMsgBlock(block, height))
}
return blocks
})
}
}
}()
return subscription.Client(), nil
}
func (d *BitcoinNodeDatasource) prepareRange(fromHeight, toHeight int64) (start, end int64, skip bool, err error) {
start = fromHeight
end = toHeight
// get current bitcoin block height
latestBlockHeight, err := d.btcclient.GetBlockCount()
if err != nil {
return -1, -1, false, errors.Wrap(err, "failed to get block count")
}
// set start to genesis block height
if start < 0 {
start = 0
}
// set end to current bitcoin block height if
// - end is -1
// - end is greater that current bitcoin block height
if end < 0 || end > latestBlockHeight {
end = latestBlockHeight
}
// if start is greater than end, skip this round
if start > end {
return -1, -1, true, nil
}
return start, end, false, nil
}
// GetTransaction fetch transaction from Bitcoin node
func (d *BitcoinNodeDatasource) GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error) {
rawTxVerbose, err := d.btcclient.GetRawTransactionVerbose(&txHash)
if err != nil {
return nil, errors.Wrap(err, "failed to get raw transaction")
}
blockHash, err := chainhash.NewHashFromStr(rawTxVerbose.BlockHash)
if err != nil {
return nil, errors.Wrap(err, "failed to parse block hash")
}
block, err := d.btcclient.GetBlockVerboseTx(blockHash)
if err != nil {
return nil, errors.Wrap(err, "failed to get block header")
}
// parse tx
txBytes, err := hex.DecodeString(rawTxVerbose.Hex)
if err != nil {
return nil, errors.Wrap(err, "failed to decode transaction hex")
}
var msgTx wire.MsgTx
if err := msgTx.Deserialize(bytes.NewReader(txBytes)); err != nil {
return nil, errors.Wrap(err, "failed to deserialize transaction")
}
var txIndex uint32
for i, tx := range block.Tx {
if tx.Hex == rawTxVerbose.Hex {
txIndex = uint32(i)
break
}
}
return types.ParseMsgTx(&msgTx, block.Height, *blockHash, txIndex), nil
}
// GetBlockHeader fetch block header from Bitcoin node
func (d *BitcoinNodeDatasource) GetBlockHeader(ctx context.Context, height int64) (types.BlockHeader, error) {
hash, err := d.btcclient.GetBlockHash(height)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to get block hash")
}
block, err := d.btcclient.GetBlockHeader(hash)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to get block header")
}
return types.ParseMsgBlockHeader(*block, height), nil
}

View File

@@ -0,0 +1,16 @@
package datasources
import (
"context"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/internal/subscription"
)
// Datasource is an interface for indexer data sources.
type Datasource[T any] interface {
Name() string
Fetch(ctx context.Context, from, to int64) ([]T, error)
FetchAsync(ctx context.Context, from, to int64, ch chan<- []T) (*subscription.ClientSubscription[[]T], error)
GetBlockHeader(ctx context.Context, height int64) (types.BlockHeader, error)
}

257
core/indexer/indexer.go Normal file
View File

@@ -0,0 +1,257 @@
package indexer
import (
"context"
"log/slog"
"sync"
"time"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/datasources"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
)
const (
maxReorgLookBack = 1000
// pollingInterval is the default polling interval for the indexer polling worker
pollingInterval = 15 * time.Second
)
// Indexer generic indexer for fetching and processing data
type Indexer[T Input] struct {
Processor Processor[T]
Datasource datasources.Datasource[T]
currentBlock types.BlockHeader
quitOnce sync.Once
quit chan struct{}
done chan struct{}
}
// New create new generic indexer
func New[T Input](processor Processor[T], datasource datasources.Datasource[T]) *Indexer[T] {
return &Indexer[T]{
Processor: processor,
Datasource: datasource,
quit: make(chan struct{}),
done: make(chan struct{}),
}
}
func (i *Indexer[T]) Shutdown() error {
return i.ShutdownWithContext(context.Background())
}
func (i *Indexer[T]) ShutdownWithTimeout(timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
return i.ShutdownWithContext(ctx)
}
func (i *Indexer[T]) ShutdownWithContext(ctx context.Context) (err error) {
i.quitOnce.Do(func() {
close(i.quit)
select {
case <-i.done:
case <-time.After(180 * time.Second):
err = errors.Wrap(errs.Timeout, "indexer shutdown timeout")
case <-ctx.Done():
err = errors.Wrap(ctx.Err(), "indexer shutdown context canceled")
}
})
return
}
func (i *Indexer[T]) Run(ctx context.Context) (err error) {
defer close(i.done)
ctx = logger.WithContext(ctx,
slog.String("package", "indexers"),
slog.String("processor", i.Processor.Name()),
slog.String("datasource", i.Datasource.Name()),
)
// set to -1 to start from genesis block
i.currentBlock, err = i.Processor.CurrentBlock(ctx)
if err != nil {
if !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "can't init state, failed to get indexer current block")
}
i.currentBlock.Height = -1
}
ticker := time.NewTicker(pollingInterval)
defer ticker.Stop()
for {
select {
case <-i.quit:
logger.InfoContext(ctx, "Got quit signal, stopping indexer")
if err := i.Processor.Shutdown(ctx); err != nil {
logger.ErrorContext(ctx, "Failed to shutdown processor", slogx.Error(err))
return errors.Wrap(err, "processor shutdown failed")
}
return nil
case <-ctx.Done():
return nil
case <-ticker.C:
if err := i.process(ctx); err != nil {
logger.ErrorContext(ctx, "Indexer failed while processing", slogx.Error(err))
return errors.Wrap(err, "process failed")
}
logger.DebugContext(ctx, "Waiting for next polling interval")
}
}
}
func (i *Indexer[T]) process(ctx context.Context) (err error) {
// height range to fetch data
from, to := i.currentBlock.Height+1, int64(-1)
logger.InfoContext(ctx, "Start fetching input data", slog.Int64("from", from))
ch := make(chan []T)
subscription, err := i.Datasource.FetchAsync(ctx, from, to, ch)
if err != nil {
return errors.Wrap(err, "failed to fetch input data")
}
defer subscription.Unsubscribe()
for {
select {
case <-i.quit:
return nil
case inputs := <-ch:
// empty inputs
if len(inputs) == 0 {
continue
}
firstInput := inputs[0]
firstInputHeader := firstInput.BlockHeader()
startAt := time.Now()
ctx := logger.WithContext(ctx,
slogx.Int64("from", firstInputHeader.Height),
slogx.Int64("to", inputs[len(inputs)-1].BlockHeader().Height),
)
// validate reorg from first input
{
remoteBlockHeader := firstInputHeader
if !remoteBlockHeader.PrevBlock.IsEqual(&i.currentBlock.Hash) {
logger.WarnContext(ctx, "Detected chain reorganization. Searching for fork point...",
slogx.String("event", "reorg_detected"),
slogx.Stringer("current_hash", i.currentBlock.Hash),
slogx.Stringer("expected_hash", remoteBlockHeader.PrevBlock),
)
var (
start = time.Now()
targetHeight = i.currentBlock.Height - 1
beforeReorgBlockHeader = types.BlockHeader{
Height: -1,
}
)
for n := 0; n < maxReorgLookBack; n++ {
// TODO: concurrent fetch
indexedHeader, err := i.Processor.GetIndexedBlock(ctx, targetHeight)
if err != nil {
return errors.Wrapf(err, "failed to get indexed block, height: %d", targetHeight)
}
remoteHeader, err := i.Datasource.GetBlockHeader(ctx, targetHeight)
if err != nil {
return errors.Wrapf(err, "failed to get remote block header, height: %d", targetHeight)
}
// Found no reorg block
if indexedHeader.Hash.IsEqual(&remoteHeader.Hash) {
beforeReorgBlockHeader = remoteHeader
break
}
// Walk back to find fork point
targetHeight -= 1
}
// Reorg look back limit reached
if beforeReorgBlockHeader.Height < 0 {
return errors.Wrap(errs.SomethingWentWrong, "reorg look back limit reached")
}
logger.InfoContext(ctx, "Found reorg fork point, starting to revert data...",
slogx.String("event", "reorg_forkpoint"),
slogx.Int64("since", beforeReorgBlockHeader.Height+1),
slogx.Int64("total_blocks", i.currentBlock.Height-beforeReorgBlockHeader.Height),
slogx.Duration("search_duration", time.Since(start)),
)
// Revert all data since the reorg block
start = time.Now()
if err := i.Processor.RevertData(ctx, beforeReorgBlockHeader.Height+1); err != nil {
return errors.Wrap(err, "failed to revert data")
}
// Set current block to before reorg block and
// end current round to fetch again
i.currentBlock = beforeReorgBlockHeader
logger.Info("Fixing chain reorganization completed",
slogx.Int64("current_block", i.currentBlock.Height),
slogx.Duration("duration", time.Since(start)),
)
return nil
}
}
// validate is input is continuous and no reorg
prevHeader := i.currentBlock
for i, input := range inputs {
header := input.BlockHeader()
if header.Height != prevHeader.Height+1 {
return errors.Wrapf(errs.InternalError, "input is not continuous, input[%d] height: %d, input[%d] height: %d", i-1, prevHeader.Height, i, header.Height)
}
if !header.PrevBlock.IsEqual(&prevHeader.Hash) {
logger.WarnContext(ctx, "Chain Reorganization occurred in the middle of batch fetching inputs, need to try to fetch again")
// end current round
return nil
}
prevHeader = header
}
ctx = logger.WithContext(ctx, slog.Int("total_inputs", len(inputs)))
// Start processing input
logger.InfoContext(ctx, "Processing inputs")
if err := i.Processor.Process(ctx, inputs); err != nil {
return errors.WithStack(err)
}
// Update current state
i.currentBlock = inputs[len(inputs)-1].BlockHeader()
logger.InfoContext(ctx, "Processed inputs successfully",
slogx.String("event", "processed_inputs"),
slogx.Int64("current_block", i.currentBlock.Height),
slogx.Duration("duration", time.Since(startAt)),
)
case <-subscription.Done():
// end current round
if err := ctx.Err(); err != nil {
return errors.Wrap(err, "context done")
}
return nil
case <-ctx.Done():
return errors.WithStack(ctx.Err())
case err := <-subscription.Err():
if err != nil {
return errors.Wrap(err, "got error while fetch async")
}
}
}
}

42
core/indexer/interface.go Normal file
View File

@@ -0,0 +1,42 @@
package indexer
import (
"context"
"time"
"github.com/gaze-network/indexer-network/core/types"
)
type Input interface {
BlockHeader() types.BlockHeader
}
type Processor[T Input] interface {
Name() string
// Process processes the input data and indexes it.
Process(ctx context.Context, inputs []T) error
// CurrentBlock returns the latest indexed block header.
CurrentBlock(ctx context.Context) (types.BlockHeader, error)
// GetIndexedBlock returns the indexed block header by the specified block height.
GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error)
// RevertData revert synced data to the specified block height for re-indexing.
RevertData(ctx context.Context, from int64) error
// VerifyStates verifies the states of the indexed data and the indexer
// to ensure the last shutdown was graceful and no missing data.
VerifyStates(ctx context.Context) error
// Shutdown gracefully stops the processor. Database connections, network calls, leftover states, etc. should be closed and cleaned up here.
Shutdown(ctx context.Context) error
}
type IndexerWorker interface {
Shutdown() error
ShutdownWithTimeout(timeout time.Duration) error
ShutdownWithContext(ctx context.Context) (err error)
Run(ctx context.Context) (err error)
}

View File

@@ -0,0 +1,51 @@
package types
import (
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/samber/lo"
)
type BlockHeader struct {
Hash chainhash.Hash
Height int64
Version int32
PrevBlock chainhash.Hash
MerkleRoot chainhash.Hash
Timestamp time.Time
Bits uint32
Nonce uint32
}
func ParseMsgBlockHeader(src wire.BlockHeader, height int64) BlockHeader {
hash := src.BlockHash()
return BlockHeader{
Hash: hash,
Height: height,
Version: src.Version,
PrevBlock: src.PrevBlock,
MerkleRoot: src.MerkleRoot,
Timestamp: src.Timestamp,
Bits: src.Bits,
Nonce: src.Nonce,
}
}
type Block struct {
Header BlockHeader
Transactions []*Transaction
}
func (b *Block) BlockHeader() BlockHeader {
return b.Header
}
func ParseMsgBlock(src *wire.MsgBlock, height int64) *Block {
hash := src.Header.BlockHash()
return &Block{
Header: ParseMsgBlockHeader(src.Header, height),
Transactions: lo.Map(src.Transactions, func(item *wire.MsgTx, index int) *Transaction { return ParseMsgTx(item, height, hash, uint32(index)) }),
}
}

View File

@@ -0,0 +1,73 @@
package types
import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/samber/lo"
)
type Transaction struct {
BlockHeight int64
BlockHash chainhash.Hash
Index uint32
TxHash chainhash.Hash
Version int32
LockTime uint32
TxIn []*TxIn
TxOut []*TxOut
}
type TxIn struct {
SignatureScript []byte
Witness [][]byte
Sequence uint32
PreviousOutIndex uint32
PreviousOutTxHash chainhash.Hash
}
type TxOut struct {
PkScript []byte
Value int64
}
func (o TxOut) IsOpReturn() bool {
return len(o.PkScript) > 0 && o.PkScript[0] == txscript.OP_RETURN
}
// ParseMsgTx parses btcd/wire.MsgTx to Transaction.
func ParseMsgTx(src *wire.MsgTx, blockHeight int64, blockHash chainhash.Hash, index uint32) *Transaction {
return &Transaction{
BlockHeight: blockHeight,
BlockHash: blockHash,
Index: index,
TxHash: src.TxHash(),
Version: src.Version,
LockTime: src.LockTime,
TxIn: lo.Map(src.TxIn, func(item *wire.TxIn, _ int) *TxIn {
return ParseTxIn(item)
}),
TxOut: lo.Map(src.TxOut, func(item *wire.TxOut, _ int) *TxOut {
return ParseTxOut(item)
}),
}
}
// ParseTxIn parses btcd/wire.TxIn to TxIn.
func ParseTxIn(src *wire.TxIn) *TxIn {
return &TxIn{
SignatureScript: src.SignatureScript,
Witness: src.Witness,
Sequence: src.Sequence,
PreviousOutIndex: src.PreviousOutPoint.Index,
PreviousOutTxHash: src.PreviousOutPoint.Hash,
}
}
// ParseTxOut parses btcd/wire.TxOut to TxOut.
func ParseTxOut(src *wire.TxOut) *TxOut {
return &TxOut{
PkScript: src.PkScript,
Value: src.Value,
}
}

View File

@@ -1 +0,0 @@
package core

View File

@@ -0,0 +1,34 @@
# Database Migration
We've used the golang-migrate library to manage the database migration.
### Install golang-migrate
```shell
$ brew install golang-migrate
```
### Commands
#### Create new database sequence
```shell
$ migrate create -ext sql -dir . -seq file_name
```
#### Up version database
```shell
$ migrate -source file://. -database "postgres://postgres:$PASSWORD@localhost:5432/postgres?sslmode=disable" up
```
#### Down version database 1 version
```shell
$ migrate -source file://. -database "postgres://postgres:$PASSWORD@localhost:5432/postgres?sslmode=disable" down 1
```
### References:
- Golang-Migrate: https://github.com/golang-migrate
- Connection string: https://www.connectionstrings.com/postgresql/

82
go.mod
View File

@@ -1,3 +1,85 @@
module github.com/gaze-network/indexer-network
go 1.22
require (
github.com/Cleverse/go-utilities/utils v0.0.0-20240119201306-d71eb577ef11
github.com/btcsuite/btcd v0.24.0
github.com/btcsuite/btcd/btcutil v1.1.5
github.com/btcsuite/btcd/btcutil/psbt v1.1.9
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0
github.com/cockroachdb/errors v1.11.1
github.com/gaze-network/uint128 v1.3.0
github.com/gofiber/fiber/v2 v2.52.4
github.com/golang-migrate/migrate/v4 v4.17.1
github.com/jackc/pgx/v5 v5.5.5
github.com/mcosta74/pgx-slog v0.3.0
github.com/planxnx/concurrent-stream v0.1.5
github.com/samber/do/v2 v2.0.0-beta.7
github.com/samber/lo v1.39.0
github.com/shopspring/decimal v1.3.1
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.18.2
github.com/stretchr/testify v1.9.0
github.com/valyala/fasthttp v1.51.0
go.uber.org/automaxprocs v1.5.3
golang.org/x/sync v0.7.0
)
require (
github.com/andybalholm/brotli v1.0.5 // indirect
github.com/bitonicnl/verify-signed-message v0.7.1
github.com/btcsuite/btcd/btcec/v2 v2.3.3 // indirect
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/uuid v1.5.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx v3.6.2+incompatible // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/klauspost/compress v1.17.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/samber/go-type-to-string v1.4.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/tcplisten v1.0.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

325
go.sum Normal file
View File

@@ -0,0 +1,325 @@
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Cleverse/go-utilities/utils v0.0.0-20240119201306-d71eb577ef11 h1:Xpbu03JdzqWEXcL6xr43Wxjnwh/Txt16WXJ7IlzvoxA=
github.com/Cleverse/go-utilities/utils v0.0.0-20240119201306-d71eb577ef11/go.mod h1:ft8CEDBt0csuZ+yM/bKf7ZlV6lWvWY/TFXzp7+Ze9Jw=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/bitonicnl/verify-signed-message v0.7.1 h1:1Qku9k9WgzobjqBY7tT3CLjWxtTJZxkYNhOV6QeCTjY=
github.com/bitonicnl/verify-signed-message v0.7.1/go.mod h1:PR60twfJIaHEo9Wb6eJBh8nBHEZIQQx8CvRwh0YmEPk=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A=
github.com/btcsuite/btcd v0.24.0 h1:gL3uHE/IaFj6fcZSu03SvqPMSx7s/dPzfpG/atRwWdo=
github.com/btcsuite/btcd v0.24.0/go.mod h1:K4IDc1593s8jKXIF7yS7yCTSxrknB9z0STzc2j6XgE4=
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
github.com/btcsuite/btcd/btcec/v2 v2.3.3 h1:6+iXlDKE8RMtKsvK0gshlXIuPbyWM/h84Ensb7o3sC0=
github.com/btcsuite/btcd/btcec/v2 v2.3.3/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8=
github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00=
github.com/btcsuite/btcd/btcutil/psbt v1.1.9 h1:UmfOIiWMZcVMOLaN+lxbbLSuoINGS1WmK1TZNI0b4yk=
github.com/btcsuite/btcd/btcutil/psbt v1.1.9/go.mod h1:ehBEvU91lxSlXtA+zZz3iFYx7Yq9eqnKx4/kSrnsvMY=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/dhui/dktest v0.4.1 h1:/w+IWuDXVymg3IrRJCHHOkMK10m9aNVMOyD0X12YVTg=
github.com/dhui/dktest v0.4.1/go.mod h1:DdOqcUpL7vgyP4GlF3X3w7HbSlz8cEQzwewPveYEQbA=
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0=
github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gaze-network/uint128 v1.3.0 h1:25qtRiDKQXa+mD5rN0nbUkbvY26/uzfSF97eWvhIr0I=
github.com/gaze-network/uint128 v1.3.0/go.mod h1:zAwwcnoRUNiiQj0vjLmHgNgJ+w2RUgzMAJgl8d7tRug=
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/gofiber/fiber/v2 v2.52.4 h1:P+T+4iK7VaqUsq2PALYEfBBo6bJZ4q3FP8cZ84EggTM=
github.com/gofiber/fiber/v2 v2.52.4/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-migrate/migrate/v4 v4.17.1 h1:4zQ6iqL6t6AiItphxJctQb3cFqWiSpMnX7wLTPnnYO4=
github.com/golang-migrate/migrate/v4 v4.17.1/go.mod h1:m8hinFyWBn0SA4QKHuKh175Pm9wjmxj3S2Mia7dbXzM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o=
github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mcosta74/pgx-slog v0.3.0 h1:v7nl8XKE4ObGxZfYUUs8uUWrimvNib2V4P7Mp0WjSyw=
github.com/mcosta74/pgx-slog v0.3.0/go.mod h1:73/rhilX7+ybQ9RH/BZBtOkTDiGAH1yBrcatN6jQW5E=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/planxnx/concurrent-stream v0.1.5 h1:qSMM27m7AApvalS0rSmovxOtDCnLy0/HinYJPe3oQfQ=
github.com/planxnx/concurrent-stream v0.1.5/go.mod h1:vxnW2qxkCLppMo5+Zns3b5/CiVxYQjXRLVFGJ9xvkXk=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/samber/do/v2 v2.0.0-beta.7 h1:tmdLOVSCbTA6uGWLU5poi/nZvMRh5QxXFJ9vHytU+Jk=
github.com/samber/do/v2 v2.0.0-beta.7/go.mod h1:+LpV3vu4L81Q1JMZNSkMvSkW9lt4e5eJoXoZHkeBS4c=
github.com/samber/go-type-to-string v1.4.0 h1:KXphToZgiFdnJQxryU25brhlh/CqY/cwJVeX2rfmow0=
github.com/samber/go-type-to-string v1.4.0/go.mod h1:jpU77vIDoIxkahknKDoEx9C8bQ1ADnh2sotZ8I4QqBU=
github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA=
github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.51.0 h1:8b30A5JlZ6C7AS81RsWjYMQmrZG6feChmgAolCl1SqA=
github.com/valyala/fasthttp v1.51.0/go.mod h1:oI2XroL+lI7vdXyYoQk03bXBThfFl2cVdIA3Xl7cH8g=
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d h1:N0hmiNbwsSNwHBAvR3QB5w25pUwH4tK0Y/RltD1j1h4=
golang.org/x/exp v0.0.0-20240525044651-4c93da0ed11d/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

133
internal/config/config.go Normal file
View File

@@ -0,0 +1,133 @@
package config
import (
"context"
"log/slog"
"strings"
"sync"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
runesconfig "github.com/gaze-network/indexer-network/modules/runes/config"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/middleware/requestcontext"
"github.com/gaze-network/indexer-network/pkg/middleware/requestlogger"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/spf13/pflag"
"github.com/spf13/viper"
)
var (
isInit bool
mu sync.Mutex
config = &Config{
Logger: logger.Config{
Output: "TEXT",
},
Network: common.NetworkMainnet,
HTTPServer: HTTPServerConfig{
Port: 8080,
},
BitcoinNode: BitcoinNodeClient{
User: "user",
Pass: "pass",
},
Modules: Modules{
Runes: runesconfig.Config{
Datasource: "bitcoin-node",
Database: "postgres",
},
},
}
)
type Config struct {
EnableModules []string `mapstructure:"enable_modules"`
APIOnly bool `mapstructure:"api_only"`
Logger logger.Config `mapstructure:"logger"`
BitcoinNode BitcoinNodeClient `mapstructure:"bitcoin_node"`
Network common.Network `mapstructure:"network"`
HTTPServer HTTPServerConfig `mapstructure:"http_server"`
Modules Modules `mapstructure:"modules"`
Reporting reportingclient.Config `mapstructure:"reporting"`
}
type BitcoinNodeClient struct {
Host string `mapstructure:"host"`
User string `mapstructure:"user"`
Pass string `mapstructure:"pass"`
DisableTLS bool `mapstructure:"disable_tls"`
}
type Modules struct {
Runes runesconfig.Config `mapstructure:"runes"`
}
type HTTPServerConfig struct {
Port int `mapstructure:"port"`
Logger requestlogger.Config `mapstructure:"logger"`
RequestIP requestcontext.WithClientIPConfig `mapstructure:"requestip"`
}
// Parse parse the configuration from environment variables
func Parse(configFile ...string) Config {
mu.Lock()
defer mu.Unlock()
return parse(configFile...)
}
// Load returns the loaded configuration
func Load() Config {
mu.Lock()
defer mu.Unlock()
if isInit {
return *config
}
return parse()
}
// BindPFlag binds a specific key to a pflag (as used by cobra).
// Example (where serverCmd is a Cobra instance):
//
// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
func BindPFlag(key string, flag *pflag.Flag) {
if err := viper.BindPFlag(key, flag); err != nil {
logger.Panic("Something went wrong, failed to bind flag for config", slog.String("package", "config"), slogx.Error(err))
}
}
// SetDefault sets the default value for this key.
// SetDefault is case-insensitive for a key.
// Default only used when no value is provided by the user via flag, config or ENV.
func SetDefault(key string, value any) { viper.SetDefault(key, value) }
func parse(configFile ...string) Config {
ctx := logger.WithContext(context.Background(), slog.String("package", "config"))
if len(configFile) > 0 && configFile[0] != "" {
viper.SetConfigFile(configFile[0])
} else {
viper.AddConfigPath("./")
viper.SetConfigName("config")
}
viper.AutomaticEnv()
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
if err := viper.ReadInConfig(); err != nil {
var errNotfound viper.ConfigFileNotFoundError
if errors.As(err, &errNotfound) {
logger.WarnContext(ctx, "Config file not found, use default config value", slogx.Error(err))
} else {
logger.PanicContext(ctx, "Invalid config file", slogx.Error(err))
}
}
if err := viper.Unmarshal(&config); err != nil {
logger.PanicContext(ctx, "Something went wrong, failed to unmarshal config", slogx.Error(err))
}
isInit = true
return *config
}

View File

@@ -0,0 +1,37 @@
package postgres
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgxpool"
)
// Make sure that interfaces are compatible with the pgx package
var (
_ DB = (*pgx.Conn)(nil)
_ DB = (*pgxpool.Conn)(nil)
)
// Queryable is an interface that can be used to execute queries and commands
type Queryable interface {
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
QueryRow(context.Context, string, ...interface{}) pgx.Row
}
// TxQueryable is an interface that can be used to execute queries and commands within a transaction
type TxQueryable interface {
Queryable
Begin(context.Context) (pgx.Tx, error)
BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error)
}
// DB is an interface that can be used to execute queries and commands, and also to send batches
type DB interface {
Queryable
TxQueryable
SendBatch(ctx context.Context, b *pgx.Batch) (br pgx.BatchResults)
Ping(ctx context.Context) error
}

View File

@@ -0,0 +1,127 @@
package postgres
import (
"context"
"fmt"
"github.com/Cleverse/go-utilities/utils"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/jackc/pgx/v5/tracelog"
pgxslog "github.com/mcosta74/pgx-slog"
)
const (
DefaultMaxConns = 16
DefaultMinConns = 0
DefaultLogLevel = tracelog.LogLevelError
)
type Config struct {
Host string `mapstructure:"host"` // Default is 127.0.0.1
Port string `mapstructure:"port"` // Default is 5432
User string `mapstructure:"user"` // Default is empty
Password string `mapstructure:"password"` // Default is empty
DBName string `mapstructure:"db_name"` // Default is postgres
SSLMode string `mapstructure:"ssl_mode"` // Default is prefer
URL string `mapstructure:"url"` // If URL is provided, other fields are ignored
MaxConns int32 `mapstructure:"max_conns"` // Default is 16
MinConns int32 `mapstructure:"min_conns"` // Default is 0
Debug bool `mapstructure:"debug"`
}
// New creates a new connection to the database
func New(ctx context.Context, conf Config) (*pgx.Conn, error) {
// Prepare connection pool configuration
connConfig, err := pgx.ParseConfig(conf.String())
if err != nil {
return nil, errors.Join(errs.InvalidArgument, errors.Wrap(err, "failed while parse config"))
}
connConfig.Tracer = conf.QueryTracer()
// Create a new connection
conn, err := pgx.ConnectConfig(ctx, connConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to create a new connection")
}
// Test the connection
if err := conn.Ping(ctx); err != nil {
return nil, errors.Wrap(err, "failed to connect to the database")
}
return conn, nil
}
// NewPool creates a new connection pool to the database
func NewPool(ctx context.Context, conf Config) (*pgxpool.Pool, error) {
// Prepare connection pool configuration
connConfig, err := pgxpool.ParseConfig(conf.String())
if err != nil {
return nil, errors.Join(errs.InvalidArgument, errors.Wrap(err, "failed while parse config"))
}
connConfig.MaxConns = utils.Default(conf.MaxConns, DefaultMaxConns)
connConfig.MinConns = utils.Default(conf.MinConns, DefaultMinConns)
connConfig.ConnConfig.Tracer = conf.QueryTracer()
// Create a new connection pool
connPool, err := pgxpool.NewWithConfig(ctx, connConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to create a new connection pool")
}
// Test the connection
if err := connPool.Ping(ctx); err != nil {
return nil, errors.Wrap(err, "failed to connect to the database")
}
return connPool, nil
}
// String returns the connection string (DSN format or URL format)
func (conf Config) String() string {
if conf.Host == "" {
conf.Host = "127.0.0.1"
}
if conf.Port == "" {
conf.Port = "5432"
}
if conf.SSLMode == "" {
conf.SSLMode = "prefer"
}
if conf.DBName == "" {
conf.DBName = "postgres"
}
// Construct DSN
connString := fmt.Sprintf("host=%s dbname=%s port=%s sslmode=%s", conf.Host, conf.DBName, conf.Port, conf.SSLMode)
if conf.User != "" {
connString = fmt.Sprintf("%s user=%s", connString, conf.User)
}
if conf.Password != "" {
connString = fmt.Sprintf("%s password=%s", connString, conf.Password)
}
// Prefer URL over DSN format
if conf.URL != "" {
connString = conf.URL
}
return connString
}
func (conf Config) QueryTracer() pgx.QueryTracer {
loglevel := DefaultLogLevel
if conf.Debug {
loglevel = tracelog.LogLevelTrace
}
return &tracelog.TraceLog{
Logger: pgxslog.NewLogger(logger.With("package", "postgres")),
LogLevel: loglevel,
}
}

View File

@@ -0,0 +1,31 @@
package subscription
import "context"
// ClientSubscription is a subscription that can be used by the client to unsubscribe from the subscription.
type ClientSubscription[T any] struct {
subscription *Subscription[T]
}
func (c *ClientSubscription[T]) Unsubscribe() {
c.subscription.Unsubscribe()
}
func (c *ClientSubscription[T]) UnsubscribeWithContext(ctx context.Context) (err error) {
return c.subscription.UnsubscribeWithContext(ctx)
}
// Err returns the error channel of the subscription.
func (c *ClientSubscription[T]) Err() <-chan error {
return c.subscription.Err()
}
// Done returns the done channel of the subscription
func (c *ClientSubscription[T]) Done() <-chan struct{} {
return c.subscription.Done()
}
// IsClosed returns status of the subscription
func (c *ClientSubscription[T]) IsClosed() bool {
return c.subscription.IsClosed()
}

View File

@@ -0,0 +1,132 @@
package subscription
import (
"context"
"sync"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
)
// SubscriptionBufferSize is the buffer size of the subscription channel.
// It is used to prevent blocking the client dispatcher when the client is slow to consume values.
var SubscriptionBufferSize = 8
// Subscription is a subscription to a stream of values from the client dispatcher.
// It has two channels: one for values, and one for errors.
type Subscription[T any] struct {
// The channel which the subscription sends values.
channel chan<- T
// The in channel receives values from client dispatcher.
in chan T
// The error channel receives the error from the client dispatcher.
err chan error
quiteOnce sync.Once
// Closing of the subscription is requested by sending on 'quit'. This is handled by
// the forwarding loop, which closes 'forwardDone' when it has stopped sending to
// sub.channel. Finally, 'unsubDone' is closed after unsubscribing on the server side.
quit chan struct{}
quitDone chan struct{}
}
func NewSubscription[T any](channel chan<- T) *Subscription[T] {
subscription := &Subscription[T]{
channel: channel,
in: make(chan T, SubscriptionBufferSize),
err: make(chan error, SubscriptionBufferSize),
quit: make(chan struct{}),
quitDone: make(chan struct{}),
}
go func() {
subscription.run()
}()
return subscription
}
func (s *Subscription[T]) Unsubscribe() {
_ = s.UnsubscribeWithContext(context.Background())
}
func (s *Subscription[T]) UnsubscribeWithContext(ctx context.Context) (err error) {
s.quiteOnce.Do(func() {
select {
case s.quit <- struct{}{}:
<-s.quitDone
case <-ctx.Done():
err = ctx.Err()
}
})
return errors.WithStack(err)
}
// Client returns a client subscription for this subscription.
func (s *Subscription[T]) Client() *ClientSubscription[T] {
return &ClientSubscription[T]{
subscription: s,
}
}
// Err returns the error channel of the subscription.
func (s *Subscription[T]) Err() <-chan error {
return s.err
}
// Done returns the done channel of the subscription
func (s *Subscription[T]) Done() <-chan struct{} {
return s.quitDone
}
// IsClosed returns status of the subscription
func (s *Subscription[T]) IsClosed() bool {
select {
case <-s.quitDone:
return true
default:
return false
}
}
// Send sends a value to the subscription channel. If the subscription is closed, it returns an error.
func (s *Subscription[T]) Send(ctx context.Context, value T) error {
select {
case s.in <- value:
case <-s.quitDone:
return errors.Wrap(errs.Closed, "subscription is closed")
case <-ctx.Done():
return errors.WithStack(ctx.Err())
}
return nil
}
// SendError sends an error to the subscription error channel. If the subscription is closed, it returns an error.
func (s *Subscription[T]) SendError(ctx context.Context, err error) error {
select {
case s.err <- err:
case <-s.quitDone:
return errors.Wrap(errs.Closed, "subscription is closed")
case <-ctx.Done():
return errors.WithStack(ctx.Err())
}
return nil
}
// run starts the forwarding loop for the subscription.
func (s *Subscription[T]) run() {
defer close(s.quitDone)
for {
select {
case <-s.quit:
return
case value := <-s.in:
select {
case s.channel <- value:
case <-s.quit:
return
}
}
}
}

17
main.go Normal file
View File

@@ -0,0 +1,17 @@
package main
import (
"context"
"os"
"os/signal"
"syscall"
"github.com/gaze-network/indexer-network/cmd"
)
func main() {
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
defer stop()
cmd.Execute(ctx)
}

View File

@@ -1 +0,0 @@
package bitcoin

View File

11
modules/runes/api/api.go Normal file
View File

@@ -0,0 +1,11 @@
package api
import (
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/runes/api/httphandler"
"github.com/gaze-network/indexer-network/modules/runes/usecase"
)
func NewHTTPHandler(network common.Network, usecase *usecase.Usecase) *httphandler.HttpHandler {
return httphandler.New(network, usecase)
}

View File

@@ -0,0 +1,116 @@
package httphandler
import (
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getBalancesByAddressRequest struct {
Wallet string `params:"wallet"`
Id string `query:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getBalancesByAddressRequest) Validate() error {
var errList []error
if r.Wallet == "" {
errList = append(errList, errors.New("'wallet' is required"))
}
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type balance struct {
Amount uint128.Uint128 `json:"amount"`
Id runes.RuneId `json:"id"`
Name runes.SpacedRune `json:"name"`
Symbol string `json:"symbol"`
Decimals uint8 `json:"decimals"`
}
type getBalancesByAddressResult struct {
List []balance `json:"list"`
BlockHeight uint64 `json:"blockHeight"`
}
type getBalancesByAddressResponse = HttpResponse[getBalancesByAddressResult]
func (h *HttpHandler) GetBalancesByAddress(ctx *fiber.Ctx) (err error) {
var req getBalancesByAddressRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
pkScript, ok := resolvePkScript(h.network, req.Wallet)
if !ok {
return errs.NewPublicError("unable to resolve pkscript from \"wallet\"")
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
balances, err := h.usecase.GetBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByPkScript")
}
runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id)
if ok {
// filter out balances that don't match the requested rune id
for key := range balances {
if key != runeId {
delete(balances, key)
}
}
}
balanceRuneIds := lo.Keys(balances)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), balanceRuneIds)
if err != nil {
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
balanceList := make([]balance, 0, len(balances))
for id, b := range balances {
runeEntry := runeEntries[id]
balanceList = append(balanceList, balance{
Amount: b.Amount,
Id: id,
Name: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Decimals: runeEntry.Divisibility,
})
}
slices.SortFunc(balanceList, func(i, j balance) int {
return j.Amount.Cmp(i.Amount)
})
resp := getBalancesByAddressResponse{
Result: &getBalancesByAddressResult{
BlockHeight: blockHeight,
List: balanceList,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,139 @@
package httphandler
import (
"context"
"fmt"
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
"golang.org/x/sync/errgroup"
)
type getBalanceQuery struct {
Wallet string `json:"wallet"`
Id string `json:"id"`
BlockHeight uint64 `json:"blockHeight"`
}
type getBalancesByAddressBatchRequest struct {
Queries []getBalanceQuery `json:"queries"`
}
func (r getBalancesByAddressBatchRequest) Validate() error {
var errList []error
for _, query := range r.Queries {
if query.Wallet == "" {
errList = append(errList, errors.Errorf("queries[%d]: 'wallet' is required"))
}
if query.Id != "" && !isRuneIdOrRuneName(query.Id) {
errList = append(errList, errors.Errorf("queries[%d]: 'id' is not valid rune id or rune name"))
}
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type getBalancesByAddressBatchResult struct {
List []*getBalancesByAddressResult `json:"list"`
}
type getBalancesByAddressBatchResponse = HttpResponse[getBalancesByAddressBatchResult]
func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) {
var req getBalancesByAddressBatchRequest
if err := ctx.BodyParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
var latestBlockHeight uint64
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
latestBlockHeight = uint64(blockHeader.Height)
processQuery := func(ctx context.Context, query getBalanceQuery, queryIndex int) (*getBalancesByAddressResult, error) {
pkScript, ok := resolvePkScript(h.network, query.Wallet)
if !ok {
return nil, errs.NewPublicError(fmt.Sprintf("unable to resolve pkscript from \"queries[%d].wallet\"", queryIndex))
}
blockHeight := query.BlockHeight
if blockHeight == 0 {
blockHeight = latestBlockHeight
}
balances, err := h.usecase.GetBalancesByPkScript(ctx, pkScript, blockHeight)
if err != nil {
return nil, errors.Wrap(err, "error during GetBalancesByPkScript")
}
runeId, ok := h.resolveRuneId(ctx, query.Id)
if ok {
// filter out balances that don't match the requested rune id
for key := range balances {
if key != runeId {
delete(balances, key)
}
}
}
balanceRuneIds := lo.Keys(balances)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx, balanceRuneIds)
if err != nil {
return nil, errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
balanceList := make([]balance, 0, len(balances))
for id, b := range balances {
runeEntry := runeEntries[id]
balanceList = append(balanceList, balance{
Amount: b.Amount,
Id: id,
Name: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Decimals: runeEntry.Divisibility,
})
}
slices.SortFunc(balanceList, func(i, j balance) int {
return j.Amount.Cmp(i.Amount)
})
result := getBalancesByAddressResult{
BlockHeight: blockHeight,
List: balanceList,
}
return &result, nil
}
results := make([]*getBalancesByAddressResult, len(req.Queries))
eg, ectx := errgroup.WithContext(ctx.UserContext())
for i, query := range req.Queries {
i := i
query := query
eg.Go(func() error {
result, err := processQuery(ectx, query, i)
if err != nil {
return errors.Wrapf(err, "error during processQuery for query %d", i)
}
results[i] = result
return nil
})
}
if err := eg.Wait(); err != nil {
return errors.WithStack(err)
}
resp := getBalancesByAddressBatchResponse{
Result: &getBalancesByAddressBatchResult{
List: results,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,50 @@
package httphandler
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gofiber/fiber/v2"
)
var startingBlockHeader = map[common.Network]types.BlockHeader{
common.NetworkMainnet: {
Height: 839999,
Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000172014ba58d66455762add0512355ad651207918494ab")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000000000001dcce6ce7c8a45872cafd1fb04732b447a14a91832591")),
},
common.NetworkTestnet: {
Height: 2583200,
Hash: *utils.Must(chainhash.NewHashFromStr("000000000006c5f0dfcd9e0e81f27f97a87aef82087ffe69cd3c390325bb6541")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000668f3bafac992f53424774515440cb47e1cb9e73af3f496139e28")),
},
}
type getCurrentBlockResult struct {
Hash string `json:"hash"`
Height int64 `json:"height"`
}
type getCurrentBlockResponse = HttpResponse[getCurrentBlockResult]
func (h *HttpHandler) GetCurrentBlock(ctx *fiber.Ctx) (err error) {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
if !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeader = startingBlockHeader[h.network]
}
resp := getCurrentBlockResponse{
Result: &getCurrentBlockResult{
Hash: blockHeader.Hash.String(),
Height: blockHeader.Height,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,114 @@
package httphandler
import (
"encoding/hex"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
"github.com/shopspring/decimal"
)
type getHoldersRequest struct {
Id string `params:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getHoldersRequest) Validate() error {
var errList []error
if !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type holdingBalance struct {
Address string `json:"address"`
PkScript string `json:"pkScript"`
Amount uint128.Uint128 `json:"amount"`
Percent float64 `json:"percent"`
}
type getHoldersResult struct {
BlockHeight uint64 `json:"blockHeight"`
TotalSupply uint128.Uint128 `json:"totalSupply"`
MintedAmount uint128.Uint128 `json:"mintedAmount"`
List []holdingBalance `json:"list"`
}
type getHoldersResponse = HttpResponse[getHoldersResult]
func (h *HttpHandler) GetHolders(ctx *fiber.Ctx) (err error) {
var req getHoldersRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
var runeId runes.RuneId
if req.Id != "" {
var ok bool
runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id)
if !ok {
return errs.NewPublicError("unable to resolve rune id from \"id\"")
}
}
runeEntry, err := h.usecase.GetRuneEntryByRuneIdAndHeight(ctx.UserContext(), runeId, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetHoldersByHeight")
}
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByRuneId")
}
totalSupply, err := runeEntry.Supply()
if err != nil {
return errors.Wrap(err, "cannot get total supply of rune")
}
mintedAmount, err := runeEntry.MintedAmount()
if err != nil {
return errors.Wrap(err, "cannot get minted amount of rune")
}
list := make([]holdingBalance, 0, len(holdingBalances))
for _, balance := range holdingBalances {
address := addressFromPkScript(balance.PkScript, h.network)
amount := decimal.NewFromBigInt(balance.Amount.Big(), 0)
percent := amount.Div(decimal.NewFromBigInt(totalSupply.Big(), 0))
list = append(list, holdingBalance{
Address: address,
PkScript: hex.EncodeToString(balance.PkScript),
Amount: balance.Amount,
Percent: percent.InexactFloat64(),
})
}
resp := getHoldersResponse{
Result: &getHoldersResult{
BlockHeight: blockHeight,
TotalSupply: totalSupply,
MintedAmount: mintedAmount,
List: list,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,165 @@
package httphandler
import (
"slices"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getTokenInfoRequest struct {
Id string `params:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getTokenInfoRequest) Validate() error {
var errList []error
if !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type entryTerms struct {
Amount uint128.Uint128 `json:"amount"`
Cap uint128.Uint128 `json:"cap"`
HeightStart *uint64 `json:"heightStart"`
HeightEnd *uint64 `json:"heightEnd"`
OffsetStart *uint64 `json:"offsetStart"`
OffsetEnd *uint64 `json:"offsetEnd"`
}
type entry struct {
Divisibility uint8 `json:"divisibility"`
Premine uint128.Uint128 `json:"premine"`
Rune runes.Rune `json:"rune"`
Spacers uint32 `json:"spacers"`
Symbol string `json:"symbol"`
Terms entryTerms `json:"terms"`
Turbo bool `json:"turbo"`
}
type tokenInfoExtend struct {
Entry entry `json:"entry"`
}
type getTokenInfoResult struct {
Id runes.RuneId `json:"id"`
Name runes.SpacedRune `json:"name"` // rune name
Symbol string `json:"symbol"`
TotalSupply uint128.Uint128 `json:"totalSupply"`
CirculatingSupply uint128.Uint128 `json:"circulatingSupply"`
MintedAmount uint128.Uint128 `json:"mintedAmount"`
BurnedAmount uint128.Uint128 `json:"burnedAmount"`
Decimals uint8 `json:"decimals"`
DeployedAt uint64 `json:"deployedAt"` // unix timestamp
DeployedAtHeight uint64 `json:"deployedAtHeight"`
CompletedAt *uint64 `json:"completedAt"` // unix timestamp
CompletedAtHeight *uint64 `json:"completedAtHeight"`
HoldersCount int `json:"holdersCount"`
Extend tokenInfoExtend `json:"extend"`
}
type getTokenInfoResponse = HttpResponse[getTokenInfoResult]
func (h *HttpHandler) GetTokenInfo(ctx *fiber.Ctx) (err error) {
var req getTokenInfoRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
var runeId runes.RuneId
if req.Id != "" {
var ok bool
runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id)
if !ok {
return errs.NewPublicError("unable to resolve rune id from \"id\"")
}
}
runeEntry, err := h.usecase.GetRuneEntryByRuneIdAndHeight(ctx.UserContext(), runeId, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetTokenInfoByHeight")
}
holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByRuneId")
}
holdingBalances = lo.Filter(holdingBalances, func(b *entity.Balance, _ int) bool {
return !b.Amount.IsZero()
})
// sort by amount descending
slices.SortFunc(holdingBalances, func(i, j *entity.Balance) int {
return j.Amount.Cmp(i.Amount)
})
totalSupply, err := runeEntry.Supply()
if err != nil {
return errors.Wrap(err, "cannot get total supply of rune")
}
mintedAmount, err := runeEntry.MintedAmount()
if err != nil {
return errors.Wrap(err, "cannot get minted amount of rune")
}
circulatingSupply := mintedAmount.Sub(runeEntry.BurnedAmount)
terms := lo.FromPtr(runeEntry.Terms)
resp := getTokenInfoResponse{
Result: &getTokenInfoResult{
Id: runeId,
Name: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
TotalSupply: totalSupply,
CirculatingSupply: circulatingSupply,
MintedAmount: mintedAmount,
BurnedAmount: runeEntry.BurnedAmount,
Decimals: runeEntry.Divisibility,
DeployedAt: uint64(runeEntry.EtchedAt.Unix()),
DeployedAtHeight: runeEntry.EtchingBlock,
CompletedAt: lo.Ternary(runeEntry.CompletedAt.IsZero(), nil, lo.ToPtr(uint64(runeEntry.CompletedAt.Unix()))),
CompletedAtHeight: runeEntry.CompletedAtHeight,
HoldersCount: len(holdingBalances),
Extend: tokenInfoExtend{
Entry: entry{
Divisibility: runeEntry.Divisibility,
Premine: runeEntry.Premine,
Rune: runeEntry.SpacedRune.Rune,
Spacers: runeEntry.SpacedRune.Spacers,
Symbol: string(runeEntry.Symbol),
Terms: entryTerms{
Amount: lo.FromPtr(terms.Amount),
Cap: lo.FromPtr(terms.Cap),
HeightStart: terms.HeightStart,
HeightEnd: terms.HeightEnd,
OffsetStart: terms.OffsetStart,
OffsetEnd: terms.OffsetEnd,
},
Turbo: runeEntry.Turbo,
},
},
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,297 @@
package httphandler
import (
"encoding/hex"
"fmt"
"slices"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getTransactionsRequest struct {
Wallet string `query:"wallet"`
Id string `query:"id"`
FromBlock int64 `query:"fromBlock"`
ToBlock int64 `query:"toBlock"`
}
func (r getTransactionsRequest) Validate() error {
var errList []error
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
}
if r.FromBlock < -1 {
errList = append(errList, errors.Errorf("invalid fromBlock range"))
}
if r.ToBlock < -1 {
errList = append(errList, errors.Errorf("invalid toBlock range"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type txInputOutput struct {
PkScript string `json:"pkScript"`
Address string `json:"address"`
Id runes.RuneId `json:"id"`
Amount uint128.Uint128 `json:"amount"`
Decimals uint8 `json:"decimals"`
Index uint32 `json:"index"`
}
type terms struct {
Amount *uint128.Uint128 `json:"amount"`
Cap *uint128.Uint128 `json:"cap"`
HeightStart *uint64 `json:"heightStart"`
HeightEnd *uint64 `json:"heightEnd"`
OffsetStart *uint64 `json:"offsetStart"`
OffsetEnd *uint64 `json:"offsetEnd"`
}
type etching struct {
Divisibility *uint8 `json:"divisibility"`
Premine *uint128.Uint128 `json:"premine"`
Rune *runes.Rune `json:"rune"`
Spacers *uint32 `json:"spacers"`
Symbol *string `json:"symbol"`
Terms *terms `json:"terms"`
Turbo bool `json:"turbo"`
}
type edict struct {
Id runes.RuneId `json:"id"`
Amount uint128.Uint128 `json:"amount"`
Output int `json:"output"`
}
type runestone struct {
Cenotaph bool `json:"cenotaph"`
Flaws []string `json:"flaws"`
Etching *etching `json:"etching"`
Edicts []edict `json:"edicts"`
Mint *runes.RuneId `json:"mint"`
Pointer *uint64 `json:"pointer"`
}
type runeTransactionExtend struct {
RuneEtched bool `json:"runeEtched"`
Runestone *runestone `json:"runestone"`
}
type amountWithDecimal struct {
Amount uint128.Uint128 `json:"amount"`
Decimals uint8 `json:"decimals"`
}
type transaction struct {
TxHash chainhash.Hash `json:"txHash"`
BlockHeight uint64 `json:"blockHeight"`
Index uint32 `json:"index"`
Timestamp int64 `json:"timestamp"`
Inputs []txInputOutput `json:"inputs"`
Outputs []txInputOutput `json:"outputs"`
Mints map[string]amountWithDecimal `json:"mints"`
Burns map[string]amountWithDecimal `json:"burns"`
Extend runeTransactionExtend `json:"extend"`
}
type getTransactionsResult struct {
List []transaction `json:"list"`
}
type getTransactionsResponse = HttpResponse[getTransactionsResult]
func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) {
var req getTransactionsRequest
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
var pkScript []byte
if req.Wallet != "" {
var ok bool
pkScript, ok = resolvePkScript(h.network, req.Wallet)
if !ok {
return errs.NewPublicError("unable to resolve pkscript from \"wallet\"")
}
}
var runeId runes.RuneId
if req.Id != "" {
var ok bool
runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id)
if !ok {
return errs.NewPublicError("unable to resolve rune id from \"id\"")
}
}
// default to latest block
if req.ToBlock == 0 {
req.ToBlock = -1
}
// get latest block height if block height is -1
if req.FromBlock == -1 || req.ToBlock == -1 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
if req.FromBlock == -1 {
req.FromBlock = blockHeader.Height
}
if req.ToBlock == -1 {
req.ToBlock = blockHeader.Height
}
}
// validate block height range
if req.FromBlock > req.ToBlock {
return errs.NewPublicError(fmt.Sprintf("fromBlock must be less than or equal to toBlock, got fromBlock=%d, toBlock=%d", req.FromBlock, req.ToBlock))
}
txs, err := h.usecase.GetRuneTransactions(ctx.UserContext(), pkScript, runeId, uint64(req.FromBlock), uint64(req.ToBlock))
if err != nil {
return errors.Wrap(err, "error during GetRuneTransactions")
}
var allRuneIds []runes.RuneId
for _, tx := range txs {
for id := range tx.Mints {
allRuneIds = append(allRuneIds, id)
}
for id := range tx.Burns {
allRuneIds = append(allRuneIds, id)
}
for _, input := range tx.Inputs {
allRuneIds = append(allRuneIds, input.RuneId)
}
for _, output := range tx.Outputs {
allRuneIds = append(allRuneIds, output.RuneId)
}
}
allRuneIds = lo.Uniq(allRuneIds)
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), allRuneIds)
if err != nil {
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
txList := make([]transaction, 0, len(txs))
for _, tx := range txs {
respTx := transaction{
TxHash: tx.Hash,
BlockHeight: tx.BlockHeight,
Index: tx.Index,
Timestamp: tx.Timestamp.Unix(),
Inputs: make([]txInputOutput, 0, len(tx.Inputs)),
Outputs: make([]txInputOutput, 0, len(tx.Outputs)),
Mints: make(map[string]amountWithDecimal, len(tx.Mints)),
Burns: make(map[string]amountWithDecimal, len(tx.Burns)),
Extend: runeTransactionExtend{
RuneEtched: tx.RuneEtched,
Runestone: nil,
},
}
for _, input := range tx.Inputs {
address := addressFromPkScript(input.PkScript, h.network)
respTx.Inputs = append(respTx.Inputs, txInputOutput{
PkScript: hex.EncodeToString(input.PkScript),
Address: address,
Id: input.RuneId,
Amount: input.Amount,
Decimals: runeEntries[input.RuneId].Divisibility,
Index: input.Index,
})
}
for _, output := range tx.Outputs {
address := addressFromPkScript(output.PkScript, h.network)
respTx.Outputs = append(respTx.Outputs, txInputOutput{
PkScript: hex.EncodeToString(output.PkScript),
Address: address,
Id: output.RuneId,
Amount: output.Amount,
Decimals: runeEntries[output.RuneId].Divisibility,
Index: output.Index,
})
}
for id, amount := range tx.Mints {
respTx.Mints[id.String()] = amountWithDecimal{
Amount: amount,
Decimals: runeEntries[id].Divisibility,
}
}
for id, amount := range tx.Burns {
respTx.Burns[id.String()] = amountWithDecimal{
Amount: amount,
Decimals: runeEntries[id].Divisibility,
}
}
if tx.Runestone != nil {
var e *etching
if tx.Runestone.Etching != nil {
var symbol *string
if tx.Runestone.Etching.Symbol != nil {
symbol = lo.ToPtr(string(*tx.Runestone.Etching.Symbol))
}
var t *terms
if tx.Runestone.Etching.Terms != nil {
t = &terms{
Amount: tx.Runestone.Etching.Terms.Amount,
Cap: tx.Runestone.Etching.Terms.Cap,
HeightStart: tx.Runestone.Etching.Terms.HeightStart,
HeightEnd: tx.Runestone.Etching.Terms.HeightEnd,
OffsetStart: tx.Runestone.Etching.Terms.OffsetStart,
OffsetEnd: tx.Runestone.Etching.Terms.OffsetEnd,
}
}
e = &etching{
Divisibility: tx.Runestone.Etching.Divisibility,
Premine: tx.Runestone.Etching.Premine,
Rune: tx.Runestone.Etching.Rune,
Spacers: tx.Runestone.Etching.Spacers,
Symbol: symbol,
Terms: t,
Turbo: tx.Runestone.Etching.Turbo,
}
}
respTx.Extend.Runestone = &runestone{
Cenotaph: tx.Runestone.Cenotaph,
Flaws: lo.Ternary(tx.Runestone.Cenotaph, tx.Runestone.Flaws.CollectAsString(), nil),
Etching: e,
Edicts: lo.Map(tx.Runestone.Edicts, func(ed runes.Edict, _ int) edict {
return edict{
Id: ed.Id,
Amount: ed.Amount,
Output: ed.Output,
}
}),
Mint: tx.Runestone.Mint,
Pointer: tx.Runestone.Pointer,
}
}
txList = append(txList, respTx)
}
// sort by block height ASC, then index ASC
slices.SortFunc(txList, func(t1, t2 transaction) int {
if t1.BlockHeight != t2.BlockHeight {
return int(t1.BlockHeight - t2.BlockHeight)
}
return int(t1.Index - t2.Index)
})
resp := getTransactionsResponse{
Result: &getTransactionsResult{
List: txList,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,146 @@
package httphandler
import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/gofiber/fiber/v2"
"github.com/samber/lo"
)
type getUTXOsByAddressRequest struct {
Wallet string `params:"wallet"`
Id string `query:"id"`
BlockHeight uint64 `query:"blockHeight"`
}
func (r getUTXOsByAddressRequest) Validate() error {
var errList []error
if r.Wallet == "" {
errList = append(errList, errors.New("'wallet' is required"))
}
if r.Id != "" && !isRuneIdOrRuneName(r.Id) {
errList = append(errList, errors.New("'id' is not valid rune id or rune name"))
}
return errs.WithPublicMessage(errors.Join(errList...), "validation error")
}
type runeBalance struct {
RuneId runes.RuneId `json:"runeId"`
Rune runes.SpacedRune `json:"rune"`
Symbol string `json:"symbol"`
Amount uint128.Uint128 `json:"amount"`
Divisibility uint8 `json:"divisibility"`
}
type utxoExtend struct {
Runes []runeBalance `json:"runes"`
}
type utxo struct {
TxHash chainhash.Hash `json:"txHash"`
OutputIndex uint32 `json:"outputIndex"`
Extend utxoExtend `json:"extend"`
}
type getUTXOsByAddressResult struct {
List []utxo `json:"list"`
BlockHeight uint64 `json:"blockHeight"`
}
type getUTXOsByAddressResponse = HttpResponse[getUTXOsByAddressResult]
func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) {
var req getUTXOsByAddressRequest
if err := ctx.ParamsParser(&req); err != nil {
return errors.WithStack(err)
}
if err := ctx.QueryParser(&req); err != nil {
return errors.WithStack(err)
}
if err := req.Validate(); err != nil {
return errors.WithStack(err)
}
pkScript, ok := resolvePkScript(h.network, req.Wallet)
if !ok {
return errs.NewPublicError("unable to resolve pkscript from \"wallet\"")
}
blockHeight := req.BlockHeight
if blockHeight == 0 {
blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext())
if err != nil {
return errors.Wrap(err, "error during GetLatestBlock")
}
blockHeight = uint64(blockHeader.Height)
}
outPointBalances, err := h.usecase.GetUnspentOutPointBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight)
if err != nil {
return errors.Wrap(err, "error during GetBalancesByPkScript")
}
outPointBalanceRuneIds := lo.Map(outPointBalances, func(outPointBalance *entity.OutPointBalance, _ int) runes.RuneId {
return outPointBalance.RuneId
})
runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), outPointBalanceRuneIds)
if err != nil {
return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch")
}
groupedBalances := lo.GroupBy(outPointBalances, func(outPointBalance *entity.OutPointBalance) wire.OutPoint {
return outPointBalance.OutPoint
})
utxoList := make([]utxo, 0, len(groupedBalances))
for outPoint, balances := range groupedBalances {
runeBalances := make([]runeBalance, 0, len(balances))
for _, balance := range balances {
runeEntry := runeEntries[balance.RuneId]
runeBalances = append(runeBalances, runeBalance{
RuneId: balance.RuneId,
Rune: runeEntry.SpacedRune,
Symbol: string(runeEntry.Symbol),
Amount: balance.Amount,
Divisibility: runeEntry.Divisibility,
})
}
utxoList = append(utxoList, utxo{
TxHash: outPoint.Hash,
OutputIndex: outPoint.Index,
Extend: utxoExtend{
Runes: runeBalances,
},
})
}
// filter by req.Id if exists
{
runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id)
if ok {
utxoList = lo.Filter(utxoList, func(u utxo, _ int) bool {
for _, runeBalance := range u.Extend.Runes {
if runeBalance.RuneId == runeId {
return true
}
}
return false
})
}
}
resp := getUTXOsByAddressResponse{
Result: &getUTXOsByAddressResult{
BlockHeight: blockHeight,
List: utxoList,
},
}
return errors.WithStack(ctx.JSON(resp))
}

View File

@@ -0,0 +1,114 @@
package httphandler
import (
"context"
"encoding/hex"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/txscript"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/modules/runes/usecase"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
)
type HttpHandler struct {
usecase *usecase.Usecase
network common.Network
}
func New(network common.Network, usecase *usecase.Usecase) *HttpHandler {
return &HttpHandler{
usecase: usecase,
network: network,
}
}
type HttpResponse[T any] struct {
Error *string `json:"error"`
Result *T `json:"result,omitempty"`
}
func resolvePkScript(network common.Network, wallet string) ([]byte, bool) {
if wallet == "" {
return nil, false
}
defaultNet := func() *chaincfg.Params {
switch network {
case common.NetworkMainnet:
return &chaincfg.MainNetParams
case common.NetworkTestnet:
return &chaincfg.TestNet3Params
}
panic("invalid network")
}()
// attempt to parse as address
address, err := btcutil.DecodeAddress(wallet, defaultNet)
if err == nil {
pkScript, err := txscript.PayToAddrScript(address)
if err != nil {
return nil, false
}
return pkScript, true
}
// attempt to parse as pkscript
pkScript, err := hex.DecodeString(wallet)
if err != nil {
return nil, false
}
return pkScript, true
}
// TODO: extract this function somewhere else
// addressFromPkScript returns the address from the given pkScript. If the pkScript is invalid or not standard, it returns empty string.
func addressFromPkScript(pkScript []byte, network common.Network) string {
_, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, network.ChainParams())
if err != nil {
logger.Debug("unable to extract address from pkscript", slogx.Error(err))
return ""
}
if len(addrs) != 1 {
logger.Debug("invalid number of addresses extracted from pkscript. Expected only 1.", slogx.Int("numAddresses", len(addrs)))
return ""
}
return addrs[0].EncodeAddress()
}
func (h *HttpHandler) resolveRuneId(ctx context.Context, id string) (runes.RuneId, bool) {
if id == "" {
return runes.RuneId{}, false
}
// attempt to parse as rune id
runeId, err := runes.NewRuneIdFromString(id)
if err == nil {
return runeId, true
}
// attempt to parse as rune
rune, err := runes.NewRuneFromString(id)
if err == nil {
runeId, err := h.usecase.GetRuneIdFromRune(ctx, rune)
if err != nil {
return runes.RuneId{}, false
}
return runeId, true
}
return runes.RuneId{}, false
}
func isRuneIdOrRuneName(id string) bool {
if _, err := runes.NewRuneIdFromString(id); err == nil {
return true
}
if _, err := runes.NewRuneFromString(id); err == nil {
return true
}
return false
}

View File

@@ -0,0 +1,18 @@
package httphandler
import (
"github.com/gofiber/fiber/v2"
)
func (h *HttpHandler) Mount(router fiber.Router) error {
r := router.Group("/v2/runes")
r.Post("/balances/wallet/batch", h.GetBalancesByAddressBatch)
r.Get("/balances/wallet/:wallet", h.GetBalancesByAddress)
r.Get("/transactions", h.GetTransactions)
r.Get("/holders/:id", h.GetHolders)
r.Get("/info/:id", h.GetTokenInfo)
r.Get("/utxos/wallet/:wallet", h.GetUTXOsByAddress)
r.Get("/block", h.GetCurrentBlock)
return nil
}

View File

@@ -0,0 +1,10 @@
package config
import "github.com/gaze-network/indexer-network/internal/postgres"
type Config struct {
Datasource string `mapstructure:"datasource"` // Datasource to fetch bitcoin data for Meta-Protocol e.g. `bitcoin-node`
Database string `mapstructure:"database"` // Database to store runes data.
APIHandlers []string `mapstructure:"api_handlers"` // List of API handlers to enable. (e.g. `http`)
Postgres postgres.Config `mapstructure:"postgres"`
}

View File

@@ -0,0 +1,27 @@
package runes
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/core/types"
)
const (
Version = "v0.0.1"
DBVersion = 1
EventHashVersion = 1
)
var startingBlockHeader = map[common.Network]types.BlockHeader{
common.NetworkMainnet: {
Height: 839999,
Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000172014ba58d66455762add0512355ad651207918494ab")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000000000001dcce6ce7c8a45872cafd1fb04732b447a14a91832591")),
},
common.NetworkTestnet: {
Height: 2583200,
Hash: *utils.Must(chainhash.NewHashFromStr("000000000006c5f0dfcd9e0e81f27f97a87aef82087ffe69cd3c390325bb6541")),
PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000668f3bafac992f53424774515440cb47e1cb9e73af3f496139e28")),
},
}

View File

@@ -0,0 +1,14 @@
BEGIN;
DROP TABLE IF EXISTS "runes_indexer_stats";
DROP TABLE IF EXISTS "runes_indexer_db_version";
DROP TABLE IF EXISTS "runes_processor_state";
DROP TABLE IF EXISTS "runes_indexed_blocks";
DROP TABLE IF EXISTS "runes_entries";
DROP TABLE IF EXISTS "runes_entry_states";
DROP TABLE IF EXISTS "runes_transactions";
DROP TABLE IF EXISTS "runes_runestones";
DROP TABLE IF EXISTS "runes_outpoint_balances";
DROP TABLE IF EXISTS "runes_balances";
COMMIT;

View File

@@ -0,0 +1,122 @@
BEGIN;
-- Indexer Client Information
CREATE TABLE IF NOT EXISTS "runes_indexer_stats" (
"id" BIGSERIAL PRIMARY KEY,
"client_version" TEXT NOT NULL,
"network" TEXT NOT NULL,
"created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS "runes_indexer_state" (
"id" BIGSERIAL PRIMARY KEY,
"db_version" INT NOT NULL,
"event_hash_version" INT NOT NULL,
"created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS runes_indexer_state_created_at_idx ON "runes_indexer_state" USING BTREE ("created_at" DESC);
-- Runes data
CREATE TABLE IF NOT EXISTS "runes_indexed_blocks" (
"height" INT NOT NULL PRIMARY KEY,
"hash" TEXT NOT NULL,
"prev_hash" TEXT NOT NULL,
"event_hash" TEXT NOT NULL,
"cumulative_event_hash" TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS "runes_entries" (
"rune_id" TEXT NOT NULL PRIMARY KEY,
"number" BIGINT NOT NULL, -- sequential number of the rune starting from 0
"rune" TEXT NOT NULL,
"spacers" INT NOT NULL,
"premine" DECIMAL NOT NULL,
"symbol" INT NOT NULL,
"divisibility" SMALLINT NOT NULL,
"terms" BOOLEAN NOT NULL, -- if true, then minting term exists for this entry
"terms_amount" DECIMAL,
"terms_cap" DECIMAL,
"terms_height_start" INT,
"terms_height_end" INT,
"terms_offset_start" INT,
"terms_offset_end" INT,
"turbo" BOOLEAN NOT NULL,
"etching_block" INT NOT NULL,
"etching_tx_hash" TEXT NOT NULL,
"etched_at" TIMESTAMP NOT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS runes_entries_rune_idx ON "runes_entries" USING BTREE ("rune");
CREATE UNIQUE INDEX IF NOT EXISTS runes_entries_number_idx ON "runes_entries" USING BTREE ("number");
CREATE TABLE IF NOT EXISTS "runes_entry_states" (
"rune_id" TEXT NOT NULL,
"block_height" INT NOT NULL,
"mints" DECIMAL NOT NULL,
"burned_amount" DECIMAL NOT NULL,
"completed_at" TIMESTAMP,
"completed_at_height" INT,
PRIMARY KEY ("rune_id", "block_height")
);
CREATE TABLE IF NOT EXISTS "runes_transactions" (
"hash" TEXT NOT NULL PRIMARY KEY,
"block_height" INT NOT NULL,
"index" INT NOT NULL,
"timestamp" TIMESTAMP NOT NULL,
"inputs" JSONB NOT NULL,
"outputs" JSONB NOT NULL,
"mints" JSONB NOT NULL,
"burns" JSONB NOT NULL,
"rune_etched" BOOLEAN NOT NULL
);
CREATE INDEX IF NOT EXISTS runes_transactions_block_height_idx ON "runes_transactions" USING BTREE ("block_height");
CREATE INDEX IF NOT EXISTS runes_transactions_jsonb_idx ON "runes_transactions" USING GIN ("inputs", "outputs", "mints", "burns");
CREATE TABLE IF NOT EXISTS "runes_runestones" (
"tx_hash" TEXT NOT NULL PRIMARY KEY,
"block_height" INT NOT NULL,
"etching" BOOLEAN NOT NULL,
"etching_divisibility" SMALLINT,
"etching_premine" DECIMAL,
"etching_rune" TEXT,
"etching_spacers" INT,
"etching_symbol" INT,
"etching_terms" BOOLEAN,
"etching_terms_amount" DECIMAL,
"etching_terms_cap" DECIMAL,
"etching_terms_height_start" INT,
"etching_terms_height_end" INT,
"etching_terms_offset_start" INT,
"etching_terms_offset_end" INT,
"etching_turbo" BOOLEAN,
"edicts" JSONB NOT NULL DEFAULT '[]',
"mint" TEXT,
"pointer" INT,
"cenotaph" BOOLEAN NOT NULL,
"flaws" INT NOT NULL
);
CREATE TABLE IF NOT EXISTS "runes_outpoint_balances" (
"rune_id" TEXT NOT NULL,
"pkscript" TEXT NOT NULL,
"tx_hash" TEXT NOT NULL,
"tx_idx" INT NOT NULL, -- output index
"amount" DECIMAL NOT NULL,
"block_height" INT NOT NULL, -- block height when this output was created
"spent_height" INT, -- block height when this output was spent
PRIMARY KEY ("rune_id", "tx_hash", "tx_idx")
);
CREATE INDEX IF NOT EXISTS runes_outpoint_balances_tx_hash_tx_idx_idx ON "runes_outpoint_balances" USING BTREE ("tx_hash", "tx_idx");
CREATE INDEX IF NOT EXISTS runes_outpoint_balances_pkscript_block_height_spent_height_idx ON "runes_outpoint_balances" USING BTREE ("pkscript", "block_height", "spent_height");
CREATE TABLE IF NOT EXISTS "runes_balances" (
"pkscript" TEXT NOT NULL,
"block_height" INT NOT NULL,
"rune_id" TEXT NOT NULL,
"amount" DECIMAL NOT NULL,
PRIMARY KEY ("pkscript", "rune_id", "block_height")
);
COMMIT;

View File

@@ -0,0 +1,119 @@
-- name: GetBalancesByPkScript :many
WITH balances AS (
SELECT DISTINCT ON (rune_id) * FROM runes_balances WHERE pkscript = $1 AND block_height <= $2 ORDER BY rune_id, block_height DESC
)
SELECT * FROM balances WHERE amount > 0;
-- name: GetBalancesByRuneId :many
WITH balances AS (
SELECT DISTINCT ON (pkscript) * FROM runes_balances WHERE rune_id = $1 AND block_height <= $2 ORDER BY pkscript, block_height DESC
)
SELECT * FROM balances WHERE amount > 0;
-- name: GetBalanceByPkScriptAndRuneId :one
SELECT * FROM runes_balances WHERE pkscript = $1 AND rune_id = $2 AND block_height <= $3 ORDER BY block_height DESC LIMIT 1;
-- name: GetOutPointBalancesAtOutPoint :many
SELECT * FROM runes_outpoint_balances WHERE tx_hash = $1 AND tx_idx = $2;
-- name: GetUnspentOutPointBalancesByPkScript :many
SELECT * FROM runes_outpoint_balances WHERE pkscript = @pkScript AND block_height <= @block_height AND (spent_height IS NULL OR spent_height > @block_height);
-- name: GetRuneEntriesByRuneIds :many
WITH states AS (
-- select latest state
SELECT DISTINCT ON (rune_id) * FROM runes_entry_states WHERE rune_id = ANY(@rune_ids::text[]) ORDER BY rune_id, block_height DESC
)
SELECT * FROM runes_entries
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
WHERE runes_entries.rune_id = ANY(@rune_ids::text[]);
-- name: GetRuneEntriesByRuneIdsAndHeight :many
WITH states AS (
-- select latest state
SELECT DISTINCT ON (rune_id) * FROM runes_entry_states WHERE rune_id = ANY(@rune_ids::text[]) AND block_height <= @height ORDER BY rune_id, block_height DESC
)
SELECT * FROM runes_entries
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
WHERE runes_entries.rune_id = ANY(@rune_ids::text[]) AND etching_block <= @height;
-- name: GetRuneIdFromRune :one
SELECT rune_id FROM runes_entries WHERE rune = $1;
-- name: GetRuneTransactions :many
SELECT * FROM runes_transactions
LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash
WHERE (
@filter_pk_script::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
OR runes_transactions.outputs @> @pk_script_param::JSONB
OR runes_transactions.inputs @> @pk_script_param::JSONB
) AND (
@filter_rune_id::BOOLEAN = FALSE -- if @filter_rune_id is TRUE, apply rune_id filter
OR runes_transactions.outputs @> @rune_id_param::JSONB
OR runes_transactions.inputs @> @rune_id_param::JSONB
OR runes_transactions.mints ? @rune_id
OR runes_transactions.burns ? @rune_id
OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = @rune_id_block_height AND runes_transactions.index = @rune_id_tx_index)
) AND (
@from_block <= runes_transactions.block_height AND runes_transactions.block_height <= @to_block
)
ORDER BY runes_transactions.block_height DESC LIMIT 10000;
-- name: CountRuneEntries :one
SELECT COUNT(*) FROM runes_entries;
-- name: CreateRuneEntry :exec
INSERT INTO runes_entries (rune_id, rune, number, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18);
-- name: CreateRuneEntryState :exec
INSERT INTO runes_entry_states (rune_id, block_height, mints, burned_amount, completed_at, completed_at_height) VALUES ($1, $2, $3, $4, $5, $6);
-- name: CreateRuneTransaction :exec
INSERT INTO runes_transactions (hash, block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9);
-- name: CreateRunestone :exec
INSERT INTO runes_runestones (tx_hash, block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21);
-- name: CreateOutPointBalances :batchexec
INSERT INTO runes_outpoint_balances (rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height) VALUES ($1, $2, $3, $4, $5, $6, $7);
-- name: SpendOutPointBalances :exec
UPDATE runes_outpoint_balances SET spent_height = $1 WHERE tx_hash = $2 AND tx_idx = $3;
-- name: CreateRuneBalanceAtBlock :batchexec
INSERT INTO runes_balances (pkscript, block_height, rune_id, amount) VALUES ($1, $2, $3, $4);
-- name: GetLatestIndexedBlock :one
SELECT * FROM runes_indexed_blocks ORDER BY height DESC LIMIT 1;
-- name: GetIndexedBlockByHeight :one
SELECT * FROM runes_indexed_blocks WHERE height = $1;
-- name: CreateIndexedBlock :exec
INSERT INTO runes_indexed_blocks (hash, height, prev_hash, event_hash, cumulative_event_hash) VALUES ($1, $2, $3, $4, $5);
-- name: DeleteIndexedBlockSinceHeight :exec
DELETE FROM runes_indexed_blocks WHERE height >= $1;
-- name: DeleteRuneEntriesSinceHeight :exec
DELETE FROM runes_entries WHERE etching_block >= $1;
-- name: DeleteRuneEntryStatesSinceHeight :exec
DELETE FROM runes_entry_states WHERE block_height >= $1;
-- name: DeleteRuneTransactionsSinceHeight :exec
DELETE FROM runes_transactions WHERE block_height >= $1;
-- name: DeleteRunestonesSinceHeight :exec
DELETE FROM runes_runestones WHERE block_height >= $1;
-- name: DeleteOutPointBalancesSinceHeight :exec
DELETE FROM runes_outpoint_balances WHERE block_height >= $1;
-- name: UnspendOutPointBalancesSinceHeight :exec
UPDATE runes_outpoint_balances SET spent_height = NULL WHERE spent_height >= $1;
-- name: DeleteRuneBalancesSinceHeight :exec
DELETE FROM runes_balances WHERE block_height >= $1;

View File

@@ -0,0 +1,11 @@
-- name: GetLatestIndexerState :one
SELECT * FROM runes_indexer_state ORDER BY created_at DESC LIMIT 1;
-- name: SetIndexerState :exec
INSERT INTO runes_indexer_state (db_version, event_hash_version) VALUES ($1, $2);
-- name: GetLatestIndexerStats :one
SELECT "client_version", "network" FROM runes_indexer_stats ORDER BY id DESC LIMIT 1;
-- name: UpdateIndexerStats :exec
INSERT INTO runes_indexer_stats (client_version, network) VALUES ($1, $2);

View File

@@ -0,0 +1,15 @@
package datagateway
import (
"context"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
)
type IndexerInfoDataGateway interface {
GetLatestIndexerState(ctx context.Context) (entity.IndexerState, error)
GetLatestIndexerStats(ctx context.Context) (version string, network common.Network, err error)
SetIndexerState(ctx context.Context, state entity.IndexerState) error
UpdateIndexerStats(ctx context.Context, clientVersion string, network common.Network) error
}

View File

@@ -0,0 +1,81 @@
package datagateway
import (
"context"
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
)
type RunesDataGateway interface {
RunesReaderDataGateway
RunesWriterDataGateway
// BeginRunesTx returns a new RunesDataGateway with transaction enabled. All write operations performed in this datagateway must be committed to persist changes.
BeginRunesTx(ctx context.Context) (RunesDataGatewayWithTx, error)
}
type RunesDataGatewayWithTx interface {
RunesDataGateway
Tx
}
type RunesReaderDataGateway interface {
GetLatestBlock(ctx context.Context) (types.BlockHeader, error)
GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error)
// GetRuneTransactions returns the runes transactions, filterable by pkScript, runeId and height. If pkScript, runeId or height is zero value, that filter is ignored.
GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64) ([]*entity.RuneTransaction, error)
GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error)
GetUnspentOutPointBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.OutPointBalance, error)
// GetRuneIdFromRune returns the RuneId for the given rune. Returns errs.NotFound if the rune entry is not found.
GetRuneIdFromRune(ctx context.Context, rune runes.Rune) (runes.RuneId, error)
// GetRuneEntryByRuneId returns the RuneEntry for the given runeId. Returns errs.NotFound if the rune entry is not found.
GetRuneEntryByRuneId(ctx context.Context, runeId runes.RuneId) (*runes.RuneEntry, error)
// GetRuneEntryByRuneIdBatch returns the RuneEntries for the given runeIds.
GetRuneEntryByRuneIdBatch(ctx context.Context, runeIds []runes.RuneId) (map[runes.RuneId]*runes.RuneEntry, error)
// GetRuneEntryByRuneIdAndHeight returns the RuneEntry for the given runeId and block height. Returns errs.NotFound if the rune entry is not found.
GetRuneEntryByRuneIdAndHeight(ctx context.Context, runeId runes.RuneId, blockHeight uint64) (*runes.RuneEntry, error)
// GetRuneEntryByRuneIdAndHeightBatch returns the RuneEntries for the given runeIds and block height.
GetRuneEntryByRuneIdAndHeightBatch(ctx context.Context, runeIds []runes.RuneId, blockHeight uint64) (map[runes.RuneId]*runes.RuneEntry, error)
// CountRuneEntries returns the number of existing rune entries.
CountRuneEntries(ctx context.Context) (uint64, error)
// GetBalancesByPkScript returns the balances for the given pkScript at the given blockHeight.
GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[runes.RuneId]*entity.Balance, error)
// GetBalancesByRuneId returns the balances for the given runeId at the given blockHeight.
// Cannot use []byte as map key, so we're returning as slice.
GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64) ([]*entity.Balance, error)
// GetBalancesByPkScriptAndRuneId returns the balance for the given pkScript and runeId at the given blockHeight.
GetBalanceByPkScriptAndRuneId(ctx context.Context, pkScript []byte, runeId runes.RuneId, blockHeight uint64) (*entity.Balance, error)
}
type RunesWriterDataGateway interface {
CreateRuneEntry(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error
CreateRuneEntryState(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error
CreateOutPointBalances(ctx context.Context, outPointBalances []*entity.OutPointBalance) error
SpendOutPointBalances(ctx context.Context, outPoint wire.OutPoint, blockHeight uint64) error
CreateRuneBalances(ctx context.Context, params []CreateRuneBalancesParams) error
CreateRuneTransaction(ctx context.Context, tx *entity.RuneTransaction) error
CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error
// TODO: collapse these into a single function (ResetStateToHeight)?
DeleteIndexedBlockSinceHeight(ctx context.Context, height uint64) error
DeleteRuneEntriesSinceHeight(ctx context.Context, height uint64) error
DeleteRuneEntryStatesSinceHeight(ctx context.Context, height uint64) error
DeleteRuneTransactionsSinceHeight(ctx context.Context, height uint64) error
DeleteRunestonesSinceHeight(ctx context.Context, height uint64) error
DeleteOutPointBalancesSinceHeight(ctx context.Context, height uint64) error
UnspendOutPointBalancesSinceHeight(ctx context.Context, height uint64) error
DeleteRuneBalancesSinceHeight(ctx context.Context, height uint64) error
}
type CreateRuneBalancesParams struct {
PkScript []byte
RuneId runes.RuneId
Balance uint128.Uint128
BlockHeight uint64
}

View File

@@ -0,0 +1,12 @@
package datagateway
import "context"
type Tx interface {
// Commit commits the DB transaction. All changes made after Begin() will be persisted. Calling Commit() will close the current transaction.
// If Commit() is called without a prior Begin(), it must be a no-op.
Commit(ctx context.Context) error
// Rollback rolls back the DB transaction. All changes made after Begin() will be discarded.
// Rollback() must be safe to call even if no transaction is active. Hence, a defer Rollback() is safe, even if Commit() was called prior with non-error conditions.
Rollback(ctx context.Context) error
}

372
modules/runes/event_hash.go Normal file
View File

@@ -0,0 +1,372 @@
package runes
import (
"bytes"
"encoding/hex"
"slices"
"strconv"
"strings"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
// TODO: implement test to ensure that the event hash is calculated the same way for same version
func (p *Processor) calculateEventHash(header types.BlockHeader) (chainhash.Hash, error) {
payload, err := p.getHashPayload(header)
if err != nil {
return chainhash.Hash{}, errors.Wrap(err, "failed to get hash payload")
}
return chainhash.DoubleHashH(payload), nil
}
func (p *Processor) getHashPayload(header types.BlockHeader) ([]byte, error) {
var sb strings.Builder
sb.WriteString("payload:v" + strconv.Itoa(EventHashVersion) + ":")
sb.WriteString("blockHash:")
sb.Write(header.Hash[:])
// serialize new rune entries
{
runeEntries := lo.Values(p.newRuneEntries)
slices.SortFunc(runeEntries, func(t1, t2 *runes.RuneEntry) int {
return int(t1.Number) - int(t2.Number)
})
for _, entry := range runeEntries {
sb.Write(serializeNewRuneEntry(entry))
}
}
// serialize new rune entry states
{
runeIds := lo.Keys(p.newRuneEntryStates)
slices.SortFunc(runeIds, func(t1, t2 runes.RuneId) int {
return t1.Cmp(t2)
})
for _, runeId := range runeIds {
sb.Write(serializeNewRuneEntryState(p.newRuneEntryStates[runeId]))
}
}
// serialize new out point balances
sb.Write(serializeNewOutPointBalances(p.newOutPointBalances))
// serialize spend out points
sb.Write(serializeSpendOutPoints(p.newSpendOutPoints))
// serialize new balances
{
bytes, err := serializeNewBalances(p.newBalances)
if err != nil {
return nil, errors.Wrap(err, "failed to serialize new balances")
}
sb.Write(bytes)
}
// serialize new txs
// sort txs by block height and index
{
bytes, err := serializeRuneTxs(p.newRuneTxs)
if err != nil {
return nil, errors.Wrap(err, "failed to serialize new rune txs")
}
sb.Write(bytes)
}
return []byte(sb.String()), nil
}
func serializeNewRuneEntry(entry *runes.RuneEntry) []byte {
var sb strings.Builder
sb.WriteString("newRuneEntry:")
// nolint:goconst
sb.WriteString("runeId:" + entry.RuneId.String())
sb.WriteString("number:" + strconv.Itoa(int(entry.Number)))
sb.WriteString("divisibility:" + strconv.Itoa(int(entry.Divisibility)))
sb.WriteString("premine:" + entry.Premine.String())
sb.WriteString("rune:" + entry.SpacedRune.Rune.String())
sb.WriteString("spacers:" + strconv.Itoa(int(entry.SpacedRune.Spacers)))
sb.WriteString("symbol:" + string(entry.Symbol))
if entry.Terms != nil {
sb.WriteString("terms:")
terms := entry.Terms
if terms.Amount != nil {
// nolint:goconst
sb.WriteString("amount:" + terms.Amount.String())
}
if terms.Cap != nil {
sb.WriteString("cap:" + terms.Cap.String())
}
if terms.HeightStart != nil {
sb.WriteString("heightStart:" + strconv.Itoa(int(*terms.HeightStart)))
}
if terms.HeightEnd != nil {
sb.WriteString("heightEnd:" + strconv.Itoa(int(*terms.HeightEnd)))
}
if terms.OffsetStart != nil {
sb.WriteString("offsetStart:" + strconv.Itoa(int(*terms.OffsetStart)))
}
if terms.OffsetEnd != nil {
sb.WriteString("offsetEnd:" + strconv.Itoa(int(*terms.OffsetEnd)))
}
}
sb.WriteString("turbo:" + strconv.FormatBool(entry.Turbo))
sb.WriteString("etchingBlock:" + strconv.Itoa(int(entry.EtchingBlock)))
sb.WriteString("etchingTxHash:" + entry.EtchingTxHash.String())
sb.WriteString("etchedAt:" + strconv.Itoa(int(entry.EtchedAt.Unix())))
sb.WriteString(";")
return []byte(sb.String())
}
func serializeNewRuneEntryState(entry *runes.RuneEntry) []byte {
var sb strings.Builder
sb.WriteString("newRuneEntryState:")
// write only mutable states
sb.WriteString("runeId:" + entry.RuneId.String())
sb.WriteString("mints:" + entry.Mints.String())
sb.WriteString("burnedAmount:" + entry.BurnedAmount.String())
if entry.CompletedAtHeight != nil {
sb.WriteString("completedAtHeight:" + strconv.Itoa(int(*entry.CompletedAtHeight)))
sb.WriteString("completedAt:" + strconv.Itoa(int(entry.CompletedAt.Unix())))
}
sb.WriteString(";")
return []byte(sb.String())
}
func serializeNewOutPointBalances(outPointBalances map[wire.OutPoint][]*entity.OutPointBalance) []byte {
var sb strings.Builder
sb.WriteString("newOutPointBalances:")
// collect balance values
newBalances := make([]*entity.OutPointBalance, 0)
for _, balances := range outPointBalances {
newBalances = append(newBalances, balances...)
}
// sort balances to ensure order
slices.SortFunc(newBalances, func(t1, t2 *entity.OutPointBalance) int {
// sort by outpoint first
if t1.OutPoint != t2.OutPoint {
if t1.OutPoint.Hash != t2.OutPoint.Hash {
return bytes.Compare(t1.OutPoint.Hash[:], t2.OutPoint.Hash[:])
}
return int(t1.OutPoint.Index) - int(t2.OutPoint.Index)
}
// sort by runeId
return t1.RuneId.Cmp(t2.RuneId)
})
for _, balance := range newBalances {
sb.WriteString("outPoint:")
sb.WriteString("hash:")
sb.Write(balance.OutPoint.Hash[:])
sb.WriteString("index:" + strconv.Itoa(int(balance.OutPoint.Index)))
sb.WriteString("pkScript:")
sb.Write(balance.PkScript)
sb.WriteString("runeId:" + balance.RuneId.String())
sb.WriteString("amount:" + balance.Amount.String())
sb.WriteString(";")
}
return []byte(sb.String())
}
func serializeSpendOutPoints(spendOutPoints []wire.OutPoint) []byte {
var sb strings.Builder
sb.WriteString("spendOutPoints:")
// sort outpoints to ensure order
slices.SortFunc(spendOutPoints, func(t1, t2 wire.OutPoint) int {
if t1.Hash != t2.Hash {
return bytes.Compare(t1.Hash[:], t2.Hash[:])
}
return int(t1.Index) - int(t2.Index)
})
for _, outPoint := range spendOutPoints {
sb.WriteString("hash:")
sb.Write(outPoint.Hash[:])
sb.WriteString("index:" + strconv.Itoa(int(outPoint.Index)))
sb.WriteString(";")
}
return []byte(sb.String())
}
func serializeNewBalances(balances map[string]map[runes.RuneId]uint128.Uint128) ([]byte, error) {
var sb strings.Builder
sb.WriteString("newBalances:")
pkScriptStrs := lo.Keys(balances)
// sort pkScripts to ensure order
slices.SortFunc(pkScriptStrs, func(t1, t2 string) int {
return strings.Compare(t1, t2)
})
for _, pkScriptStr := range pkScriptStrs {
runeIds := lo.Keys(balances[pkScriptStr])
// sort runeIds to ensure order
slices.SortFunc(runeIds, func(t1, t2 runes.RuneId) int {
return t1.Cmp(t2)
})
pkScript, err := hex.DecodeString(pkScriptStr)
if err != nil {
return nil, errors.Wrap(err, "failed to decode pkScript")
}
for _, runeId := range runeIds {
sb.WriteString("pkScript:")
sb.Write(pkScript)
sb.WriteString("runeId:" + runeId.String())
sb.WriteString("amount:" + balances[pkScriptStr][runeId].String())
sb.WriteString(";")
}
}
return []byte(sb.String()), nil
}
func serializeRuneTxs(txs []*entity.RuneTransaction) ([]byte, error) {
var sb strings.Builder
slices.SortFunc(txs, func(t1, t2 *entity.RuneTransaction) int {
if t1.BlockHeight != t2.BlockHeight {
return int(t1.BlockHeight) - int(t2.BlockHeight)
}
return int(t1.Index) - int(t2.Index)
})
sb.WriteString("txs:")
for _, tx := range txs {
sb.WriteString("hash:")
sb.Write(tx.Hash[:])
sb.WriteString("blockHeight:" + strconv.Itoa(int(tx.BlockHeight)))
sb.WriteString("index:" + strconv.Itoa(int(tx.Index)))
writeOutPointBalance := func(ob *entity.TxInputOutput) {
sb.WriteString("pkScript:")
sb.Write(ob.PkScript)
sb.WriteString("runeId:" + ob.RuneId.String())
sb.WriteString("amount:" + ob.Amount.String())
sb.WriteString("index:" + strconv.Itoa(int(ob.Index)))
sb.WriteString("txHash:")
sb.Write(ob.TxHash[:])
sb.WriteString("txOutIndex:" + strconv.Itoa(int(ob.TxOutIndex)))
sb.WriteString(";")
}
// sort inputs to ensure order
slices.SortFunc(tx.Inputs, func(t1, t2 *entity.TxInputOutput) int {
if t1.Index != t2.Index {
return int(t1.Index) - int(t2.Index)
}
return t1.RuneId.Cmp(t2.RuneId)
})
sb.WriteString("in:")
for _, in := range tx.Inputs {
writeOutPointBalance(in)
}
// sort outputs to ensure order
slices.SortFunc(tx.Inputs, func(t1, t2 *entity.TxInputOutput) int {
if t1.Index != t2.Index {
return int(t1.Index) - int(t2.Index)
}
return t1.RuneId.Cmp(t2.RuneId)
})
sb.WriteString("out:")
for _, out := range tx.Outputs {
writeOutPointBalance(out)
}
mintsKeys := lo.Keys(tx.Mints)
slices.SortFunc(mintsKeys, func(t1, t2 runes.RuneId) int {
return t1.Cmp(t2)
})
sb.WriteString("mints:")
for _, runeId := range mintsKeys {
amount := tx.Mints[runeId]
sb.WriteString(runeId.String())
sb.WriteString(amount.String())
sb.WriteString(";")
}
burnsKeys := lo.Keys(tx.Burns)
slices.SortFunc(mintsKeys, func(t1, t2 runes.RuneId) int {
return t1.Cmp(t2)
})
sb.WriteString("burns:")
for _, runeId := range burnsKeys {
amount := tx.Burns[runeId]
sb.WriteString(runeId.String())
sb.WriteString(amount.String())
sb.WriteString(";")
}
sb.WriteString("runeEtched:" + strconv.FormatBool(tx.RuneEtched))
sb.Write(serializeRunestoneForEventHash(tx.Runestone))
sb.WriteString(";")
}
return []byte(sb.String()), nil
}
func serializeRunestoneForEventHash(r *runes.Runestone) []byte {
if r == nil {
return []byte("rune:nil")
}
var sb strings.Builder
sb.WriteString("rune:")
if r.Etching != nil {
etching := r.Etching
sb.WriteString("etching:")
if etching.Divisibility != nil {
sb.WriteString("divisibility:" + strconv.Itoa(int(*etching.Divisibility)))
}
if etching.Premine != nil {
sb.WriteString("premine:" + etching.Premine.String())
}
if etching.Rune != nil {
sb.WriteString("rune:" + etching.Rune.String())
}
if etching.Spacers != nil {
sb.WriteString("spacers:" + strconv.Itoa(int(*etching.Spacers)))
}
if etching.Symbol != nil {
sb.WriteString("symbol:" + string(*etching.Symbol))
}
if etching.Terms != nil {
terms := etching.Terms
if terms.Amount != nil {
sb.WriteString("amount:" + terms.Amount.String())
}
if terms.Cap != nil {
sb.WriteString("cap:" + terms.Cap.String())
}
if terms.HeightStart != nil {
sb.WriteString("heightStart:" + strconv.Itoa(int(*terms.HeightStart)))
}
if terms.HeightEnd != nil {
sb.WriteString("heightEnd:" + strconv.Itoa(int(*terms.HeightEnd)))
}
if terms.OffsetStart != nil {
sb.WriteString("offsetStart:" + strconv.Itoa(int(*terms.OffsetStart)))
}
if terms.OffsetEnd != nil {
sb.WriteString("offsetEnd:" + strconv.Itoa(int(*terms.OffsetEnd)))
}
}
if etching.Turbo {
sb.WriteString("turbo:" + strconv.FormatBool(etching.Turbo))
}
}
if len(r.Edicts) > 0 {
sb.WriteString("edicts:")
// don't sort edicts, order must be kept the same because of delta encoding
for _, edict := range r.Edicts {
sb.WriteString(edict.Id.String() + edict.Amount.String() + strconv.Itoa(edict.Output) + ";")
}
}
if r.Mint != nil {
sb.WriteString("mint:" + r.Mint.String())
}
if r.Pointer != nil {
sb.WriteString("pointer:" + strconv.Itoa(int(*r.Pointer)))
}
sb.WriteString("cenotaph:" + strconv.FormatBool(r.Cenotaph))
sb.WriteString("flaws:" + strconv.Itoa(int(r.Flaws)))
return []byte(sb.String())
}

View File

@@ -0,0 +1,14 @@
package entity
import (
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
)
type Balance struct {
PkScript []byte
Amount uint128.Uint128
RuneId runes.RuneId
// BlockHeight last updated block height
BlockHeight uint64
}

View File

@@ -0,0 +1,11 @@
package entity
import "github.com/btcsuite/btcd/chaincfg/chainhash"
type IndexedBlock struct {
Height int64
Hash chainhash.Hash
PrevHash chainhash.Hash
EventHash chainhash.Hash
CumulativeEventHash chainhash.Hash
}

View File

@@ -0,0 +1,9 @@
package entity
import "time"
type IndexerState struct {
CreatedAt time.Time
DBVersion int32
EventHashVersion int32
}

View File

@@ -0,0 +1,16 @@
package entity
import (
"github.com/btcsuite/btcd/wire"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
)
type OutPointBalance struct {
RuneId runes.RuneId
PkScript []byte
OutPoint wire.OutPoint
Amount uint128.Uint128
BlockHeight uint64
SpentHeight *uint64
}

View File

@@ -0,0 +1,76 @@
package entity
import (
"encoding/hex"
"encoding/json"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
)
type TxInputOutput struct {
PkScript []byte
RuneId runes.RuneId
Amount uint128.Uint128
Index uint32
TxHash chainhash.Hash
TxOutIndex uint32
}
type txInputOutputJSON struct {
PkScript string `json:"pkScript"`
RuneId runes.RuneId `json:"runeId"`
Amount uint128.Uint128 `json:"amount"`
Index uint32 `json:"index"`
TxHash chainhash.Hash `json:"txHash"`
TxOutIndex uint32 `json:"txOutIndex"`
}
func (o TxInputOutput) MarshalJSON() ([]byte, error) {
bytes, err := json.Marshal(txInputOutputJSON{
PkScript: hex.EncodeToString(o.PkScript),
RuneId: o.RuneId,
Amount: o.Amount,
Index: o.Index,
TxHash: o.TxHash,
TxOutIndex: o.TxOutIndex,
})
if err != nil {
return nil, errors.WithStack(err)
}
return bytes, nil
}
func (o *TxInputOutput) UnmarshalJSON(data []byte) error {
var aux txInputOutputJSON
if err := json.Unmarshal(data, &aux); err != nil {
return errors.WithStack(err)
}
pkScript, err := hex.DecodeString(aux.PkScript)
if err != nil {
return errors.WithStack(err)
}
o.PkScript = pkScript
o.RuneId = aux.RuneId
o.Amount = aux.Amount
o.Index = aux.Index
o.TxHash = aux.TxHash
o.TxOutIndex = aux.TxOutIndex
return nil
}
type RuneTransaction struct {
Hash chainhash.Hash
BlockHeight uint64
Index uint32
Timestamp time.Time
Inputs []*TxInputOutput
Outputs []*TxInputOutput
Mints map[runes.RuneId]uint128.Uint128
Burns map[runes.RuneId]uint128.Uint128
Runestone *runes.Runestone
RuneEtched bool
}

View File

@@ -0,0 +1,32 @@
package entity
import (
"encoding/hex"
"encoding/json"
"testing"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/stretchr/testify/assert"
)
func TestTxInputOutputJSON(t *testing.T) {
ob := TxInputOutput{
PkScript: utils.Must(hex.DecodeString("51203daaca9b82a51aca960c1491588246029d7e0fc49e0abdbcc8fd17574be5c74b")),
RuneId: runes.RuneId{BlockHeight: 1, TxIndex: 2},
Amount: uint128.From64(100),
Index: 1,
TxHash: *utils.Must(chainhash.NewHashFromStr("3ea1b497b25993adf3f2c8dae1470721316a45c82600798c14d0425039c410ad")),
TxOutIndex: 2,
}
bytes, err := json.Marshal(ob)
assert.NoError(t, err)
t.Log(string(bytes))
var parsedOB TxInputOutput
err = json.Unmarshal(bytes, &parsedOB)
assert.NoError(t, err)
assert.Equal(t, ob, parsedOB)
}

242
modules/runes/processor.go Normal file
View File

@@ -0,0 +1,242 @@
package runes
import (
"context"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/pkg/btcclient"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
// Make sure to implement the Bitcoin Processor interface
var _ indexer.Processor[*types.Block] = (*Processor)(nil)
type Processor struct {
runesDg datagateway.RunesDataGateway
indexerInfoDg datagateway.IndexerInfoDataGateway
bitcoinClient btcclient.Contract
network common.Network
reportingClient *reportingclient.ReportingClient
cleanupFuncs []func(context.Context) error
newRuneEntries map[runes.RuneId]*runes.RuneEntry
newRuneEntryStates map[runes.RuneId]*runes.RuneEntry
newOutPointBalances map[wire.OutPoint][]*entity.OutPointBalance
newSpendOutPoints []wire.OutPoint
newBalances map[string]map[runes.RuneId]uint128.Uint128 // pkScript(hex) -> runeId -> amount
newRuneTxs []*entity.RuneTransaction
}
func NewProcessor(runesDg datagateway.RunesDataGateway, indexerInfoDg datagateway.IndexerInfoDataGateway, bitcoinClient btcclient.Contract, network common.Network, reportingClient *reportingclient.ReportingClient, cleanupFuncs []func(context.Context) error) *Processor {
return &Processor{
runesDg: runesDg,
indexerInfoDg: indexerInfoDg,
bitcoinClient: bitcoinClient,
network: network,
reportingClient: reportingClient,
cleanupFuncs: cleanupFuncs,
newRuneEntries: make(map[runes.RuneId]*runes.RuneEntry),
newRuneEntryStates: make(map[runes.RuneId]*runes.RuneEntry),
newOutPointBalances: make(map[wire.OutPoint][]*entity.OutPointBalance),
newSpendOutPoints: make([]wire.OutPoint, 0),
newBalances: make(map[string]map[runes.RuneId]uint128.Uint128),
newRuneTxs: make([]*entity.RuneTransaction, 0),
}
}
var (
ErrDBVersionMismatch = errors.New("db version mismatch: please migrate db")
ErrEventHashVersionMismatch = errors.New("event hash version mismatch: please reset db and reindex")
)
func (p *Processor) VerifyStates(ctx context.Context) error {
// TODO: ensure db is migrated
if err := p.ensureValidState(ctx); err != nil {
return errors.Wrap(err, "error during ensureValidState")
}
if p.network == common.NetworkMainnet {
if err := p.ensureGenesisRune(ctx); err != nil {
return errors.Wrap(err, "error during ensureGenesisRune")
}
}
if p.reportingClient != nil {
if err := p.reportingClient.SubmitNodeReport(ctx, "runes", p.network); err != nil {
return errors.Wrap(err, "failed to submit node report")
}
}
return nil
}
func (p *Processor) ensureValidState(ctx context.Context) error {
indexerState, err := p.indexerInfoDg.GetLatestIndexerState(ctx)
if err != nil && !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "failed to get latest indexer state")
}
// if not found, set indexer state
if errors.Is(err, errs.NotFound) {
if err := p.indexerInfoDg.SetIndexerState(ctx, entity.IndexerState{
DBVersion: DBVersion,
EventHashVersion: EventHashVersion,
}); err != nil {
return errors.Wrap(err, "failed to set indexer state")
}
} else {
if indexerState.DBVersion != DBVersion {
return errors.Wrapf(errs.ConflictSetting, "db version mismatch: current version is %d. Please upgrade to version %d", indexerState.DBVersion, DBVersion)
}
if indexerState.EventHashVersion != EventHashVersion {
return errors.Wrapf(errs.ConflictSetting, "event version mismatch: current version is %d. Please reset rune's db first.", indexerState.EventHashVersion, EventHashVersion)
}
}
_, network, err := p.indexerInfoDg.GetLatestIndexerStats(ctx)
if err != nil && !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "failed to get latest indexer stats")
}
// if found, verify indexer stats
if err == nil {
if network != p.network {
return errors.Wrapf(errs.ConflictSetting, "network mismatch: latest indexed network is %d, configured network is %d. If you want to change the network, please reset the database", network, p.network)
}
}
if err := p.indexerInfoDg.UpdateIndexerStats(ctx, p.network.String(), p.network); err != nil {
return errors.Wrap(err, "failed to update indexer stats")
}
return nil
}
var genesisRuneId = runes.RuneId{BlockHeight: 1, TxIndex: 0}
func (p *Processor) ensureGenesisRune(ctx context.Context) error {
_, err := p.runesDg.GetRuneEntryByRuneId(ctx, genesisRuneId)
if err != nil && !errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "failed to get genesis rune entry")
}
if errors.Is(err, errs.NotFound) {
runeEntry := &runes.RuneEntry{
RuneId: genesisRuneId,
Number: 0,
Divisibility: 0,
Premine: uint128.Zero,
SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000),
Symbol: '\u29c9',
Terms: &runes.Terms{
Amount: lo.ToPtr(uint128.From64(1)),
Cap: &uint128.Max,
HeightStart: lo.ToPtr(uint64(common.HalvingInterval * 4)),
HeightEnd: lo.ToPtr(uint64(common.HalvingInterval * 5)),
OffsetStart: nil,
OffsetEnd: nil,
},
Turbo: true,
Mints: uint128.Zero,
BurnedAmount: uint128.Zero,
CompletedAt: time.Time{},
CompletedAtHeight: nil,
EtchingBlock: 1,
EtchingTxHash: chainhash.Hash{},
EtchedAt: time.Time{},
}
if err := p.runesDg.CreateRuneEntry(ctx, runeEntry, genesisRuneId.BlockHeight); err != nil {
return errors.Wrap(err, "failed to create genesis rune entry")
}
}
return nil
}
func (p *Processor) Name() string {
return "runes"
}
func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error) {
blockHeader, err := p.runesDg.GetLatestBlock(ctx)
if err != nil {
if errors.Is(err, errs.NotFound) {
return startingBlockHeader[p.network], nil
}
return types.BlockHeader{}, errors.Wrap(err, "failed to get latest block")
}
return blockHeader, nil
}
// warning: GetIndexedBlock currently returns a types.BlockHeader with only Height, Hash fields populated.
// This is because it is known that all usage of this function only requires these fields. In the future, we may want to populate all fields for type safety.
func (p *Processor) GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error) {
block, err := p.runesDg.GetIndexedBlockByHeight(ctx, height)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to get indexed block")
}
return types.BlockHeader{
Height: block.Height,
Hash: block.Hash,
}, nil
}
func (p *Processor) RevertData(ctx context.Context, from int64) error {
runesDgTx, err := p.runesDg.BeginRunesTx(ctx)
if err != nil {
return errors.Wrap(err, "failed to begin transaction")
}
defer func() {
if err := runesDgTx.Rollback(ctx); err != nil {
logger.WarnContext(ctx, "failed to rollback transaction",
slogx.Error(err),
slogx.String("event", "rollback_runes_revert"),
)
}
}()
if err := runesDgTx.DeleteIndexedBlockSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete indexed blocks")
}
if err := runesDgTx.DeleteRuneEntriesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete rune entries")
}
if err := runesDgTx.DeleteRuneEntryStatesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete rune entry states")
}
if err := runesDgTx.DeleteRuneTransactionsSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete rune transactions")
}
if err := runesDgTx.DeleteRunestonesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete runestones")
}
if err := runesDgTx.DeleteOutPointBalancesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete outpoint balances")
}
if err := runesDgTx.UnspendOutPointBalancesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to unspend outpoint balances")
}
if err := runesDgTx.DeleteRuneBalancesSinceHeight(ctx, uint64(from)); err != nil {
return errors.Wrap(err, "failed to delete rune balances")
}
if err := runesDgTx.Commit(ctx); err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
return nil
}
func (p *Processor) Shutdown(ctx context.Context) error {
var errs []error
for _, cleanup := range p.cleanupFuncs {
if err := cleanup(ctx); err != nil {
errs = append(errs, err)
}
}
return errors.WithStack(errors.Join(errs...))
}

View File

@@ -0,0 +1,807 @@
package runes
import (
"bytes"
"context"
"encoding/hex"
"log/slog"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/logger/slogx"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
func (p *Processor) Process(ctx context.Context, blocks []*types.Block) error {
for _, block := range blocks {
ctx := logger.WithContext(ctx, slog.Int64("height", block.Header.Height))
logger.DebugContext(ctx, "Processing new block", slog.Int("txs", len(block.Transactions)))
for _, tx := range block.Transactions {
if err := p.processTx(ctx, tx, block.Header); err != nil {
return errors.Wrap(err, "failed to process tx")
}
}
if err := p.flushBlock(ctx, block.Header); err != nil {
return errors.Wrap(err, "failed to flush block")
}
logger.DebugContext(ctx, "Inserted new block")
}
return nil
}
func (p *Processor) processTx(ctx context.Context, tx *types.Transaction, blockHeader types.BlockHeader) error {
if tx.BlockHeight < int64(runes.FirstRuneHeight(p.network)) {
// prevent processing txs before the activation height
return nil
}
runestone, err := runes.DecipherRunestone(tx)
if err != nil {
return errors.Wrap(err, "failed to decipher runestone")
}
inputBalances, err := p.getInputBalances(ctx, tx.TxIn)
if err != nil {
return errors.Wrap(err, "failed to get input balances")
}
if runestone == nil && len(inputBalances) == 0 {
// no runes involved in this tx
return nil
}
unallocated := make(map[runes.RuneId]uint128.Uint128)
allocated := make(map[int]map[runes.RuneId]uint128.Uint128)
for _, balances := range inputBalances {
for runeId, balance := range balances {
unallocated[runeId] = unallocated[runeId].Add(balance.Amount)
p.newSpendOutPoints = append(p.newSpendOutPoints, balance.OutPoint)
}
}
allocate := func(output int, runeId runes.RuneId, amount uint128.Uint128) {
if _, ok := unallocated[runeId]; !ok {
return
}
// cap amount to unallocated amount
if amount.Cmp(unallocated[runeId]) > 0 {
amount = unallocated[runeId]
}
if amount.IsZero() {
return
}
if _, ok := allocated[output]; !ok {
allocated[output] = make(map[runes.RuneId]uint128.Uint128)
}
allocated[output][runeId] = allocated[output][runeId].Add(amount)
unallocated[runeId] = unallocated[runeId].Sub(amount)
}
mints := make(map[runes.RuneId]uint128.Uint128)
var runeEtched bool
if runestone != nil {
if runestone.Mint != nil {
mintRuneId := *runestone.Mint
amount, err := p.mint(ctx, mintRuneId, blockHeader)
if err != nil {
return errors.Wrap(err, "error during mint")
}
if !amount.IsZero() {
unallocated[mintRuneId] = unallocated[mintRuneId].Add(amount)
mints[mintRuneId] = amount
}
}
etching, etchedRuneId, etchedRune, err := p.getEtchedRune(ctx, tx, runestone)
if err != nil {
return errors.Wrap(err, "error during getting etched rune")
}
if etching != nil {
runeEtched = true
}
if !runestone.Cenotaph {
// include premine in unallocated, if exists
if etching != nil {
premine := lo.FromPtr(etching.Premine)
if !premine.IsZero() {
unallocated[etchedRuneId] = unallocated[etchedRuneId].Add(premine)
mints[etchedRuneId] = mints[etchedRuneId].Add(premine)
}
}
// allocate runes
for _, edict := range runestone.Edicts {
// sanity check, should not happen since it is already checked in runes.MessageFromIntegers
if edict.Output > len(tx.TxOut) {
return errors.New("edict output index is out of range")
}
var emptyRuneId runes.RuneId
// if rune id is empty, then use etched rune id
if edict.Id == emptyRuneId {
// empty rune id is only allowed for runestones with etching
if etching == nil {
continue
}
edict.Id = etchedRuneId
}
if edict.Output == len(tx.TxOut) {
// if output == len(tx.TxOut), then allocate the amount to all outputs
// find all non-OP_RETURN outputs
var destinations []int
for i, txOut := range tx.TxOut {
if txOut.IsOpReturn() {
destinations = append(destinations, i)
}
}
if len(destinations) > 0 {
if edict.Amount.IsZero() {
// if amount is zero, divide ALL unallocated amount to all destinations
amount, remainder := unallocated[edict.Id].QuoRem64(uint64(len(destinations)))
for i, dest := range destinations {
// if i < remainder, then add 1 to amount
allocate(dest, edict.Id, lo.Ternary(i < int(remainder), amount.Add64(1), amount))
}
} else {
// if amount is not zero, allocate the amount to all destinations, sequentially.
// If there is no more amount to allocate the rest of outputs, then no more will be allocated.
for _, dest := range destinations {
allocate(dest, edict.Id, edict.Amount)
}
}
}
} else {
// allocate amount to specific output
var amount uint128.Uint128
if edict.Amount.IsZero() {
// if amount is zero, allocate the whole unallocated amount
amount = unallocated[edict.Id]
} else {
amount = edict.Amount
}
allocate(edict.Output, edict.Id, amount)
}
}
}
if etching != nil {
if err := p.createRuneEntry(ctx, runestone, etchedRuneId, etchedRune, tx, blockHeader); err != nil {
return errors.Wrap(err, "failed to create rune entry")
}
}
}
burns := make(map[runes.RuneId]uint128.Uint128)
if runestone != nil && runestone.Cenotaph {
// all input runes and minted runes in a tx with cenotaph are burned
for runeId, amount := range unallocated {
burns[runeId] = burns[runeId].Add(amount)
}
} else {
// assign all un-allocated runes to the default output (pointer), or the first non
// OP_RETURN output if there is no default, or if the default output exceeds the number of outputs
var pointer *uint64
if runestone != nil && !runestone.Cenotaph && runestone.Pointer != nil && *runestone.Pointer < uint64(len(tx.TxOut)) {
pointer = runestone.Pointer
}
// if no pointer is provided, use the first non-OP_RETURN output
if pointer == nil {
for i, txOut := range tx.TxOut {
if !txOut.IsOpReturn() {
pointer = lo.ToPtr(uint64(i))
break
}
}
}
if pointer != nil {
// allocate all unallocated runes to the pointer
output := int(*pointer)
for runeId, amount := range unallocated {
allocate(output, runeId, amount)
}
} else {
// if pointer is still nil, then no output is available. Burn all unallocated runes.
for runeId, amount := range unallocated {
burns[runeId] = burns[runeId].Add(amount)
}
}
}
// update outpoint balances
for output, balances := range allocated {
if tx.TxOut[output].IsOpReturn() {
// burn all allocated runes to OP_RETURN outputs
for runeId, amount := range balances {
burns[runeId] = burns[runeId].Add(amount)
}
continue
}
outPoint := wire.OutPoint{
Hash: tx.TxHash,
Index: uint32(output),
}
for runeId, amount := range balances {
p.newOutPointBalances[outPoint] = append(p.newOutPointBalances[outPoint], &entity.OutPointBalance{
RuneId: runeId,
PkScript: tx.TxOut[output].PkScript,
OutPoint: outPoint,
Amount: amount,
BlockHeight: uint64(tx.BlockHeight),
SpentHeight: nil,
})
}
}
if err := p.updateNewBalances(ctx, tx, inputBalances, allocated); err != nil {
return errors.Wrap(err, "failed to update new balances")
}
// increment burned amounts in rune entries
if err := p.incrementBurnedAmount(ctx, burns); err != nil {
return errors.Wrap(err, "failed to update burned amount")
}
// construct RuneTransaction
runeTx := entity.RuneTransaction{
Hash: tx.TxHash,
BlockHeight: uint64(blockHeader.Height),
Index: tx.Index,
Timestamp: blockHeader.Timestamp,
Inputs: make([]*entity.TxInputOutput, 0),
Outputs: make([]*entity.TxInputOutput, 0),
Mints: mints,
Burns: burns,
Runestone: runestone,
RuneEtched: runeEtched,
}
for inputIndex, balances := range inputBalances {
for runeId, balance := range balances {
runeTx.Inputs = append(runeTx.Inputs, &entity.TxInputOutput{
PkScript: balance.PkScript,
RuneId: runeId,
Amount: balance.Amount,
Index: uint32(inputIndex),
TxHash: tx.TxIn[inputIndex].PreviousOutTxHash,
TxOutIndex: tx.TxIn[inputIndex].PreviousOutIndex,
})
}
}
for outputIndex, balances := range allocated {
pkScript := tx.TxOut[outputIndex].PkScript
for runeId, amount := range balances {
runeTx.Outputs = append(runeTx.Outputs, &entity.TxInputOutput{
PkScript: pkScript,
RuneId: runeId,
Amount: amount,
Index: uint32(outputIndex),
TxHash: tx.TxHash,
TxOutIndex: uint32(outputIndex),
})
}
}
p.newRuneTxs = append(p.newRuneTxs, &runeTx)
return nil
}
func (p *Processor) getInputBalances(ctx context.Context, txInputs []*types.TxIn) (map[int]map[runes.RuneId]*entity.OutPointBalance, error) {
inputBalances := make(map[int]map[runes.RuneId]*entity.OutPointBalance)
for i, txIn := range txInputs {
balances, err := p.getRunesBalancesAtOutPoint(ctx, wire.OutPoint{
Hash: txIn.PreviousOutTxHash,
Index: txIn.PreviousOutIndex,
})
if err != nil {
return nil, errors.Wrap(err, "failed to get runes balances at outpoint")
}
if len(balances) > 0 {
inputBalances[i] = balances
}
}
return inputBalances, nil
}
func (p *Processor) updateNewBalances(ctx context.Context, tx *types.Transaction, inputBalances map[int]map[runes.RuneId]*entity.OutPointBalance, allocated map[int]map[runes.RuneId]uint128.Uint128) error {
// getBalanceFromDg returns the current balance of the pkScript and runeId since last flush
getBalanceFromDg := func(ctx context.Context, pkScript []byte, runeId runes.RuneId) (uint128.Uint128, error) {
balance, err := p.runesDg.GetBalanceByPkScriptAndRuneId(ctx, pkScript, runeId, uint64(tx.BlockHeight-1))
if err != nil {
if errors.Is(err, errs.NotFound) {
return uint128.Zero, nil
}
return uint128.Uint128{}, errors.Wrap(err, "failed to get balance by pk script and rune id")
}
return balance.Amount, nil
}
// deduct balances used in inputs
for _, balances := range inputBalances {
for runeId, balance := range balances {
pkScript := balance.PkScript
pkScriptStr := hex.EncodeToString(pkScript)
if _, ok := p.newBalances[pkScriptStr]; !ok {
p.newBalances[pkScriptStr] = make(map[runes.RuneId]uint128.Uint128)
}
if _, ok := p.newBalances[pkScriptStr][runeId]; !ok {
balance, err := getBalanceFromDg(ctx, pkScript, runeId)
if err != nil {
return errors.WithStack(err)
}
p.newBalances[pkScriptStr][runeId] = balance
}
if p.newBalances[pkScriptStr][runeId].Cmp(balance.Amount) < 0 {
// total pkScript's balance is less that balance in input. This is impossible. Something is wrong.
return errors.Errorf("current balance is less than balance in input: %s", runeId)
}
p.newBalances[pkScriptStr][runeId] = p.newBalances[pkScriptStr][runeId].Sub(balance.Amount)
}
}
// add balances allocated in outputs
for outputIndex, balances := range allocated {
pkScript := tx.TxOut[outputIndex].PkScript
pkScriptStr := hex.EncodeToString(pkScript)
for runeId, amount := range balances {
if _, ok := p.newBalances[pkScriptStr]; !ok {
p.newBalances[pkScriptStr] = make(map[runes.RuneId]uint128.Uint128)
}
if _, ok := p.newBalances[pkScriptStr][runeId]; !ok {
balance, err := getBalanceFromDg(ctx, pkScript, runeId)
if err != nil {
return errors.WithStack(err)
}
p.newBalances[pkScriptStr][runeId] = balance
}
p.newBalances[pkScriptStr][runeId] = p.newBalances[pkScriptStr][runeId].Add(amount)
}
}
return nil
}
func (p *Processor) mint(ctx context.Context, runeId runes.RuneId, blockHeader types.BlockHeader) (uint128.Uint128, error) {
runeEntry, err := p.getRuneEntryByRuneId(ctx, runeId)
if err != nil {
if errors.Is(err, errs.NotFound) {
return uint128.Zero, nil
}
return uint128.Uint128{}, errors.Wrap(err, "failed to get rune entry by rune id")
}
amount, err := runeEntry.GetMintableAmount(uint64(blockHeader.Height))
if err != nil {
return uint128.Zero, nil
}
if err := p.incrementMintCount(ctx, runeId, blockHeader); err != nil {
return uint128.Zero, errors.Wrap(err, "failed to increment mint count")
}
return amount, nil
}
func (p *Processor) getEtchedRune(ctx context.Context, tx *types.Transaction, runestone *runes.Runestone) (*runes.Etching, runes.RuneId, runes.Rune, error) {
if runestone.Etching == nil {
return nil, runes.RuneId{}, runes.Rune{}, nil
}
rune := runestone.Etching.Rune
if rune != nil {
minimumRune := runes.MinimumRuneAtHeight(p.network, uint64(tx.BlockHeight))
if rune.Cmp(minimumRune) < 0 {
return nil, runes.RuneId{}, runes.Rune{}, nil
}
if rune.IsReserved() {
return nil, runes.RuneId{}, runes.Rune{}, nil
}
ok, err := p.isRuneExists(ctx, *rune)
if err != nil {
return nil, runes.RuneId{}, runes.Rune{}, errors.Wrap(err, "error during check rune existence")
}
if ok {
return nil, runes.RuneId{}, runes.Rune{}, nil
}
// check if tx commits to the rune
commit, err := p.txCommitsToRune(ctx, tx, *rune)
if err != nil {
return nil, runes.RuneId{}, runes.Rune{}, errors.Wrap(err, "error during check tx commits to rune")
}
if !commit {
return nil, runes.RuneId{}, runes.Rune{}, nil
}
} else {
rune = lo.ToPtr(runes.GetReservedRune(uint64(tx.BlockHeight), tx.Index))
}
runeId, err := runes.NewRuneId(uint64(tx.BlockHeight), tx.Index)
if err != nil {
return nil, runes.RuneId{}, runes.Rune{}, errors.Wrap(err, "failed to create rune id")
}
return runestone.Etching, runeId, *rune, nil
}
func (p *Processor) txCommitsToRune(ctx context.Context, tx *types.Transaction, rune runes.Rune) (bool, error) {
commitment := rune.Commitment()
for i, txIn := range tx.TxIn {
tapscript, ok := extractTapScript(txIn.Witness)
if !ok {
continue
}
for tapscript.Next() {
// ignore errors and continue to next input
if tapscript.Err() != nil {
break
}
// check opcode is valid
if !runes.IsDataPushOpCode(tapscript.Opcode()) {
continue
}
// tapscript must contain commitment of the rune
if !bytes.Equal(tapscript.Data(), commitment) {
continue
}
// It is impossible to verify that input utxo is a P2TR output with just the input.
// Need to verify with utxo's pk script.
prevTx, err := p.bitcoinClient.GetTransactionByHash(ctx, txIn.PreviousOutTxHash)
if err != nil && errors.Is(err, errs.NotFound) {
continue
}
if err != nil {
return false, errors.Wrapf(err, "can't get previous txout for txin `%v:%v`", tx.TxHash.String(), i)
}
pkScript := prevTx.TxOut[txIn.PreviousOutIndex].PkScript
// input utxo must be P2TR
if !txscript.IsPayToTaproot(pkScript) {
break
}
// input must be mature enough
confirmations := tx.BlockHeight - prevTx.BlockHeight + 1
if confirmations < runes.RUNE_COMMIT_BLOCKS {
continue
}
return true, nil
}
}
return false, nil
}
func extractTapScript(witness [][]byte) (txscript.ScriptTokenizer, bool) {
witness = removeAnnexFromWitness(witness)
if len(witness) < 2 {
return txscript.ScriptTokenizer{}, false
}
script := witness[len(witness)-2]
return txscript.MakeScriptTokenizer(0, script), true
}
func removeAnnexFromWitness(witness [][]byte) [][]byte {
if len(witness) >= 2 && len(witness[len(witness)-1]) > 0 && witness[len(witness)-1][0] == txscript.TaprootAnnexTag {
return witness[:len(witness)-1]
}
return witness
}
func (p *Processor) createRuneEntry(ctx context.Context, runestone *runes.Runestone, runeId runes.RuneId, rune runes.Rune, tx *types.Transaction, blockHeader types.BlockHeader) error {
count, err := p.countRuneEntries(ctx)
if err != nil {
return errors.Wrap(err, "failed to count rune entries")
}
var runeEntry *runes.RuneEntry
if runestone.Cenotaph {
runeEntry = &runes.RuneEntry{
RuneId: runeId,
Number: count,
SpacedRune: runes.NewSpacedRune(rune, 0),
Mints: uint128.Zero,
BurnedAmount: uint128.Zero,
Premine: uint128.Zero,
Symbol: '¤',
Divisibility: 0,
Terms: nil,
Turbo: false,
CompletedAt: time.Time{},
CompletedAtHeight: nil,
EtchingBlock: uint64(tx.BlockHeight),
EtchingTxHash: tx.TxHash,
EtchedAt: blockHeader.Timestamp,
}
} else {
etching := runestone.Etching
runeEntry = &runes.RuneEntry{
RuneId: runeId,
Number: count,
SpacedRune: runes.NewSpacedRune(rune, lo.FromPtr(etching.Spacers)),
Mints: uint128.Zero,
BurnedAmount: uint128.Zero,
Premine: lo.FromPtr(etching.Premine),
Symbol: lo.FromPtrOr(etching.Symbol, '¤'),
Divisibility: lo.FromPtr(etching.Divisibility),
Terms: etching.Terms,
Turbo: etching.Turbo,
CompletedAt: time.Time{},
CompletedAtHeight: nil,
EtchingBlock: uint64(tx.BlockHeight),
EtchingTxHash: tx.TxHash,
EtchedAt: blockHeader.Timestamp,
}
}
p.newRuneEntries[runeId] = runeEntry
p.newRuneEntryStates[runeId] = runeEntry
return nil
}
func (p *Processor) incrementMintCount(ctx context.Context, runeId runes.RuneId, blockHeader types.BlockHeader) (err error) {
runeEntry, err := p.getRuneEntryByRuneId(ctx, runeId)
if err != nil {
return errors.Wrap(err, "failed to get rune entry by rune id")
}
runeEntry.Mints = runeEntry.Mints.Add64(1)
if runeEntry.Mints == lo.FromPtr(runeEntry.Terms.Cap) {
runeEntry.CompletedAt = blockHeader.Timestamp
runeEntry.CompletedAtHeight = lo.ToPtr(uint64(blockHeader.Height))
}
p.newRuneEntryStates[runeId] = runeEntry
return nil
}
func (p *Processor) incrementBurnedAmount(ctx context.Context, burned map[runes.RuneId]uint128.Uint128) (err error) {
runeEntries := make(map[runes.RuneId]*runes.RuneEntry)
runeIdsToFetch := make([]runes.RuneId, 0)
for runeId, amount := range burned {
if amount.IsZero() {
// ignore zero burn amount
continue
}
runeEntry, ok := p.newRuneEntryStates[runeId]
if !ok {
runeIdsToFetch = append(runeIdsToFetch, runeId)
} else {
runeEntries[runeId] = runeEntry
}
}
if len(runeIdsToFetch) > 0 {
for _, runeId := range runeIdsToFetch {
runeEntry, err := p.getRuneEntryByRuneId(ctx, runeId)
if err != nil {
if errors.Is(err, errs.NotFound) {
return errors.Wrap(err, "rune entry not found")
}
return errors.Wrap(err, "failed to get rune entry by rune id")
}
runeEntries[runeId] = runeEntry
}
}
// update rune entries
for runeId, amount := range burned {
runeEntry, ok := runeEntries[runeId]
if !ok {
continue
}
runeEntry.BurnedAmount = runeEntry.BurnedAmount.Add(amount)
p.newRuneEntryStates[runeId] = runeEntry
}
return nil
}
func (p *Processor) countRuneEntries(ctx context.Context) (uint64, error) {
runeCountInDB, err := p.runesDg.CountRuneEntries(ctx)
if err != nil {
return 0, errors.Wrap(err, "failed to count rune entries in db")
}
return runeCountInDB + uint64(len(p.newRuneEntries)), nil
}
func (p *Processor) getRuneEntryByRuneId(ctx context.Context, runeId runes.RuneId) (*runes.RuneEntry, error) {
runeEntry, ok := p.newRuneEntryStates[runeId]
if ok {
return runeEntry, nil
}
// not checking from p.newRuneEntries since new rune entries add to p.newRuneEntryStates as well
runeEntry, err := p.runesDg.GetRuneEntryByRuneId(ctx, runeId)
if err != nil {
return nil, errors.Wrap(err, "failed to get rune entry by rune id")
}
return runeEntry, nil
}
func (p *Processor) isRuneExists(ctx context.Context, rune runes.Rune) (bool, error) {
for _, runeEntry := range p.newRuneEntries {
if runeEntry.SpacedRune.Rune == rune {
return true, nil
}
}
_, err := p.runesDg.GetRuneIdFromRune(ctx, rune)
if err != nil {
if errors.Is(err, errs.NotFound) {
return false, nil
}
return false, errors.Wrap(err, "failed to get rune id from rune")
}
return true, nil
}
func (p *Processor) getRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error) {
if outPointBalances, ok := p.newOutPointBalances[outPoint]; ok {
balances := make(map[runes.RuneId]*entity.OutPointBalance)
for _, outPointBalance := range outPointBalances {
balances[outPointBalance.RuneId] = outPointBalance
}
return balances, nil
}
balances, err := p.runesDg.GetRunesBalancesAtOutPoint(ctx, outPoint)
if err != nil {
return nil, errors.Wrap(err, "failed to get runes balances at outpoint")
}
return balances, nil
}
func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeader) error {
runesDgTx, err := p.runesDg.BeginRunesTx(ctx)
if err != nil {
return errors.Wrap(err, "failed to begin runes tx")
}
defer func() {
if err := runesDgTx.Rollback(ctx); err != nil {
logger.WarnContext(ctx, "failed to rollback transaction",
slogx.Error(err),
slogx.String("event", "rollback_runes_insertion"),
)
}
}()
// CreateIndexedBlock must be performed before other flush methods to correctly calculate event hash
eventHash, err := p.calculateEventHash(blockHeader)
if err != nil {
return errors.Wrap(err, "failed to calculate event hash")
}
prevIndexedBlock, err := runesDgTx.GetIndexedBlockByHeight(ctx, blockHeader.Height-1)
if err != nil && errors.Is(err, errs.NotFound) && blockHeader.Height-1 == startingBlockHeader[p.network].Height {
prevIndexedBlock = &entity.IndexedBlock{
Height: startingBlockHeader[p.network].Height,
Hash: startingBlockHeader[p.network].Hash,
EventHash: chainhash.Hash{},
CumulativeEventHash: chainhash.Hash{},
}
err = nil
}
if err != nil {
if errors.Is(err, errs.NotFound) {
return errors.Errorf("indexed block not found for height %d. Indexed block must be created for every Bitcoin block", blockHeader.Height)
}
return errors.Wrap(err, "failed to get indexed block by height")
}
cumulativeEventHash := chainhash.DoubleHashH(append(prevIndexedBlock.CumulativeEventHash[:], eventHash[:]...))
if err := runesDgTx.CreateIndexedBlock(ctx, &entity.IndexedBlock{
Height: blockHeader.Height,
Hash: blockHeader.Hash,
PrevHash: blockHeader.PrevBlock,
EventHash: eventHash,
CumulativeEventHash: cumulativeEventHash,
}); err != nil {
return errors.Wrap(err, "failed to create indexed block")
}
// flush new rune entries
{
for _, runeEntry := range p.newRuneEntries {
if err := runesDgTx.CreateRuneEntry(ctx, runeEntry, uint64(blockHeader.Height)); err != nil {
return errors.Wrap(err, "failed to create rune entry")
}
}
p.newRuneEntries = make(map[runes.RuneId]*runes.RuneEntry)
}
// flush new rune entry states
{
for _, runeEntry := range p.newRuneEntryStates {
if err := runesDgTx.CreateRuneEntryState(ctx, runeEntry, uint64(blockHeader.Height)); err != nil {
return errors.Wrap(err, "failed to create rune entry state")
}
}
p.newRuneEntryStates = make(map[runes.RuneId]*runes.RuneEntry)
}
// flush new outpoint balances
{
newBalances := make([]*entity.OutPointBalance, 0)
for _, balances := range p.newOutPointBalances {
newBalances = append(newBalances, balances...)
}
if err := runesDgTx.CreateOutPointBalances(ctx, newBalances); err != nil {
return errors.Wrap(err, "failed to create outpoint balances")
}
p.newOutPointBalances = make(map[wire.OutPoint][]*entity.OutPointBalance)
}
// flush new spend outpoints
{
for _, outPoint := range p.newSpendOutPoints {
if err := runesDgTx.SpendOutPointBalances(ctx, outPoint, uint64(blockHeader.Height)); err != nil {
return errors.Wrap(err, "failed to create spend outpoint")
}
}
p.newSpendOutPoints = make([]wire.OutPoint, 0)
}
// flush new balances
{
params := make([]datagateway.CreateRuneBalancesParams, 0)
for pkScriptStr, balances := range p.newBalances {
pkScript, err := hex.DecodeString(pkScriptStr)
if err != nil {
return errors.Wrap(err, "failed to decode pk script")
}
for runeId, balance := range balances {
params = append(params, datagateway.CreateRuneBalancesParams{
PkScript: pkScript,
RuneId: runeId,
Balance: balance,
BlockHeight: uint64(blockHeader.Height),
})
}
}
if err := runesDgTx.CreateRuneBalances(ctx, params); err != nil {
return errors.Wrap(err, "failed to create balances at block")
}
p.newBalances = make(map[string]map[runes.RuneId]uint128.Uint128)
}
// flush new rune transactions
{
for _, runeTx := range p.newRuneTxs {
if err := runesDgTx.CreateRuneTransaction(ctx, runeTx); err != nil {
return errors.Wrap(err, "failed to create rune transaction")
}
}
p.newRuneTxs = make([]*entity.RuneTransaction, 0)
}
if err := runesDgTx.Commit(ctx); err != nil {
return errors.Wrap(err, "failed to commit runes tx")
}
// submit event to reporting system
if p.reportingClient != nil {
if err := p.reportingClient.SubmitBlockReport(ctx, reportingclient.SubmitBlockReportPayload{
Type: "runes",
ClientVersion: Version,
DBVersion: DBVersion,
EventHashVersion: EventHashVersion,
Network: p.network,
BlockHeight: uint64(blockHeader.Height),
BlockHash: blockHeader.Hash,
EventHash: eventHash,
CumulativeEventHash: cumulativeEventHash,
}); err != nil {
return errors.Wrap(err, "failed to submit block report")
}
}
return nil
}

View File

@@ -0,0 +1,130 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: batch.go
package gen
import (
"context"
"errors"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgtype"
)
var (
ErrBatchAlreadyClosed = errors.New("batch already closed")
)
const createOutPointBalances = `-- name: CreateOutPointBalances :batchexec
INSERT INTO runes_outpoint_balances (rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height) VALUES ($1, $2, $3, $4, $5, $6, $7)
`
type CreateOutPointBalancesBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateOutPointBalancesParams struct {
RuneID string
Pkscript string
TxHash string
TxIdx int32
Amount pgtype.Numeric
BlockHeight int32
SpentHeight pgtype.Int4
}
func (q *Queries) CreateOutPointBalances(ctx context.Context, arg []CreateOutPointBalancesParams) *CreateOutPointBalancesBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.RuneID,
a.Pkscript,
a.TxHash,
a.TxIdx,
a.Amount,
a.BlockHeight,
a.SpentHeight,
}
batch.Queue(createOutPointBalances, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateOutPointBalancesBatchResults{br, len(arg), false}
}
func (b *CreateOutPointBalancesBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateOutPointBalancesBatchResults) Close() error {
b.closed = true
return b.br.Close()
}
const createRuneBalanceAtBlock = `-- name: CreateRuneBalanceAtBlock :batchexec
INSERT INTO runes_balances (pkscript, block_height, rune_id, amount) VALUES ($1, $2, $3, $4)
`
type CreateRuneBalanceAtBlockBatchResults struct {
br pgx.BatchResults
tot int
closed bool
}
type CreateRuneBalanceAtBlockParams struct {
Pkscript string
BlockHeight int32
RuneID string
Amount pgtype.Numeric
}
func (q *Queries) CreateRuneBalanceAtBlock(ctx context.Context, arg []CreateRuneBalanceAtBlockParams) *CreateRuneBalanceAtBlockBatchResults {
batch := &pgx.Batch{}
for _, a := range arg {
vals := []interface{}{
a.Pkscript,
a.BlockHeight,
a.RuneID,
a.Amount,
}
batch.Queue(createRuneBalanceAtBlock, vals...)
}
br := q.db.SendBatch(ctx, batch)
return &CreateRuneBalanceAtBlockBatchResults{br, len(arg), false}
}
func (b *CreateRuneBalanceAtBlockBatchResults) Exec(f func(int, error)) {
defer b.br.Close()
for t := 0; t < b.tot; t++ {
if b.closed {
if f != nil {
f(t, ErrBatchAlreadyClosed)
}
continue
}
_, err := b.br.Exec()
if f != nil {
f(t, err)
}
}
}
func (b *CreateRuneBalanceAtBlockBatchResults) Close() error {
b.closed = true
return b.br.Close()
}

View File

@@ -0,0 +1,819 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: data.sql
package gen
import (
"context"
"github.com/jackc/pgx/v5/pgtype"
)
const countRuneEntries = `-- name: CountRuneEntries :one
SELECT COUNT(*) FROM runes_entries
`
func (q *Queries) CountRuneEntries(ctx context.Context) (int64, error) {
row := q.db.QueryRow(ctx, countRuneEntries)
var count int64
err := row.Scan(&count)
return count, err
}
const createIndexedBlock = `-- name: CreateIndexedBlock :exec
INSERT INTO runes_indexed_blocks (hash, height, prev_hash, event_hash, cumulative_event_hash) VALUES ($1, $2, $3, $4, $5)
`
type CreateIndexedBlockParams struct {
Hash string
Height int32
PrevHash string
EventHash string
CumulativeEventHash string
}
func (q *Queries) CreateIndexedBlock(ctx context.Context, arg CreateIndexedBlockParams) error {
_, err := q.db.Exec(ctx, createIndexedBlock,
arg.Hash,
arg.Height,
arg.PrevHash,
arg.EventHash,
arg.CumulativeEventHash,
)
return err
}
const createRuneEntry = `-- name: CreateRuneEntry :exec
INSERT INTO runes_entries (rune_id, rune, number, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18)
`
type CreateRuneEntryParams struct {
RuneID string
Rune string
Number int64
Spacers int32
Premine pgtype.Numeric
Symbol int32
Divisibility int16
Terms bool
TermsAmount pgtype.Numeric
TermsCap pgtype.Numeric
TermsHeightStart pgtype.Int4
TermsHeightEnd pgtype.Int4
TermsOffsetStart pgtype.Int4
TermsOffsetEnd pgtype.Int4
Turbo bool
EtchingBlock int32
EtchingTxHash string
EtchedAt pgtype.Timestamp
}
func (q *Queries) CreateRuneEntry(ctx context.Context, arg CreateRuneEntryParams) error {
_, err := q.db.Exec(ctx, createRuneEntry,
arg.RuneID,
arg.Rune,
arg.Number,
arg.Spacers,
arg.Premine,
arg.Symbol,
arg.Divisibility,
arg.Terms,
arg.TermsAmount,
arg.TermsCap,
arg.TermsHeightStart,
arg.TermsHeightEnd,
arg.TermsOffsetStart,
arg.TermsOffsetEnd,
arg.Turbo,
arg.EtchingBlock,
arg.EtchingTxHash,
arg.EtchedAt,
)
return err
}
const createRuneEntryState = `-- name: CreateRuneEntryState :exec
INSERT INTO runes_entry_states (rune_id, block_height, mints, burned_amount, completed_at, completed_at_height) VALUES ($1, $2, $3, $4, $5, $6)
`
type CreateRuneEntryStateParams struct {
RuneID string
BlockHeight int32
Mints pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}
func (q *Queries) CreateRuneEntryState(ctx context.Context, arg CreateRuneEntryStateParams) error {
_, err := q.db.Exec(ctx, createRuneEntryState,
arg.RuneID,
arg.BlockHeight,
arg.Mints,
arg.BurnedAmount,
arg.CompletedAt,
arg.CompletedAtHeight,
)
return err
}
const createRuneTransaction = `-- name: CreateRuneTransaction :exec
INSERT INTO runes_transactions (hash, block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
`
type CreateRuneTransactionParams struct {
Hash string
BlockHeight int32
Index int32
Timestamp pgtype.Timestamp
Inputs []byte
Outputs []byte
Mints []byte
Burns []byte
RuneEtched bool
}
func (q *Queries) CreateRuneTransaction(ctx context.Context, arg CreateRuneTransactionParams) error {
_, err := q.db.Exec(ctx, createRuneTransaction,
arg.Hash,
arg.BlockHeight,
arg.Index,
arg.Timestamp,
arg.Inputs,
arg.Outputs,
arg.Mints,
arg.Burns,
arg.RuneEtched,
)
return err
}
const createRunestone = `-- name: CreateRunestone :exec
INSERT INTO runes_runestones (tx_hash, block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21)
`
type CreateRunestoneParams struct {
TxHash string
BlockHeight int32
Etching bool
EtchingDivisibility pgtype.Int2
EtchingPremine pgtype.Numeric
EtchingRune pgtype.Text
EtchingSpacers pgtype.Int4
EtchingSymbol pgtype.Int4
EtchingTerms pgtype.Bool
EtchingTermsAmount pgtype.Numeric
EtchingTermsCap pgtype.Numeric
EtchingTermsHeightStart pgtype.Int4
EtchingTermsHeightEnd pgtype.Int4
EtchingTermsOffsetStart pgtype.Int4
EtchingTermsOffsetEnd pgtype.Int4
EtchingTurbo pgtype.Bool
Edicts []byte
Mint pgtype.Text
Pointer pgtype.Int4
Cenotaph bool
Flaws int32
}
func (q *Queries) CreateRunestone(ctx context.Context, arg CreateRunestoneParams) error {
_, err := q.db.Exec(ctx, createRunestone,
arg.TxHash,
arg.BlockHeight,
arg.Etching,
arg.EtchingDivisibility,
arg.EtchingPremine,
arg.EtchingRune,
arg.EtchingSpacers,
arg.EtchingSymbol,
arg.EtchingTerms,
arg.EtchingTermsAmount,
arg.EtchingTermsCap,
arg.EtchingTermsHeightStart,
arg.EtchingTermsHeightEnd,
arg.EtchingTermsOffsetStart,
arg.EtchingTermsOffsetEnd,
arg.EtchingTurbo,
arg.Edicts,
arg.Mint,
arg.Pointer,
arg.Cenotaph,
arg.Flaws,
)
return err
}
const deleteIndexedBlockSinceHeight = `-- name: DeleteIndexedBlockSinceHeight :exec
DELETE FROM runes_indexed_blocks WHERE height >= $1
`
func (q *Queries) DeleteIndexedBlockSinceHeight(ctx context.Context, height int32) error {
_, err := q.db.Exec(ctx, deleteIndexedBlockSinceHeight, height)
return err
}
const deleteOutPointBalancesSinceHeight = `-- name: DeleteOutPointBalancesSinceHeight :exec
DELETE FROM runes_outpoint_balances WHERE block_height >= $1
`
func (q *Queries) DeleteOutPointBalancesSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteOutPointBalancesSinceHeight, blockHeight)
return err
}
const deleteRuneBalancesSinceHeight = `-- name: DeleteRuneBalancesSinceHeight :exec
DELETE FROM runes_balances WHERE block_height >= $1
`
func (q *Queries) DeleteRuneBalancesSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteRuneBalancesSinceHeight, blockHeight)
return err
}
const deleteRuneEntriesSinceHeight = `-- name: DeleteRuneEntriesSinceHeight :exec
DELETE FROM runes_entries WHERE etching_block >= $1
`
func (q *Queries) DeleteRuneEntriesSinceHeight(ctx context.Context, etchingBlock int32) error {
_, err := q.db.Exec(ctx, deleteRuneEntriesSinceHeight, etchingBlock)
return err
}
const deleteRuneEntryStatesSinceHeight = `-- name: DeleteRuneEntryStatesSinceHeight :exec
DELETE FROM runes_entry_states WHERE block_height >= $1
`
func (q *Queries) DeleteRuneEntryStatesSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteRuneEntryStatesSinceHeight, blockHeight)
return err
}
const deleteRuneTransactionsSinceHeight = `-- name: DeleteRuneTransactionsSinceHeight :exec
DELETE FROM runes_transactions WHERE block_height >= $1
`
func (q *Queries) DeleteRuneTransactionsSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteRuneTransactionsSinceHeight, blockHeight)
return err
}
const deleteRunestonesSinceHeight = `-- name: DeleteRunestonesSinceHeight :exec
DELETE FROM runes_runestones WHERE block_height >= $1
`
func (q *Queries) DeleteRunestonesSinceHeight(ctx context.Context, blockHeight int32) error {
_, err := q.db.Exec(ctx, deleteRunestonesSinceHeight, blockHeight)
return err
}
const getBalanceByPkScriptAndRuneId = `-- name: GetBalanceByPkScriptAndRuneId :one
SELECT pkscript, block_height, rune_id, amount FROM runes_balances WHERE pkscript = $1 AND rune_id = $2 AND block_height <= $3 ORDER BY block_height DESC LIMIT 1
`
type GetBalanceByPkScriptAndRuneIdParams struct {
Pkscript string
RuneID string
BlockHeight int32
}
func (q *Queries) GetBalanceByPkScriptAndRuneId(ctx context.Context, arg GetBalanceByPkScriptAndRuneIdParams) (RunesBalance, error) {
row := q.db.QueryRow(ctx, getBalanceByPkScriptAndRuneId, arg.Pkscript, arg.RuneID, arg.BlockHeight)
var i RunesBalance
err := row.Scan(
&i.Pkscript,
&i.BlockHeight,
&i.RuneID,
&i.Amount,
)
return i, err
}
const getBalancesByPkScript = `-- name: GetBalancesByPkScript :many
WITH balances AS (
SELECT DISTINCT ON (rune_id) pkscript, block_height, rune_id, amount FROM runes_balances WHERE pkscript = $1 AND block_height <= $2 ORDER BY rune_id, block_height DESC
)
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0
`
type GetBalancesByPkScriptParams struct {
Pkscript string
BlockHeight int32
}
type GetBalancesByPkScriptRow struct {
Pkscript string
BlockHeight int32
RuneID string
Amount pgtype.Numeric
}
func (q *Queries) GetBalancesByPkScript(ctx context.Context, arg GetBalancesByPkScriptParams) ([]GetBalancesByPkScriptRow, error) {
rows, err := q.db.Query(ctx, getBalancesByPkScript, arg.Pkscript, arg.BlockHeight)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetBalancesByPkScriptRow
for rows.Next() {
var i GetBalancesByPkScriptRow
if err := rows.Scan(
&i.Pkscript,
&i.BlockHeight,
&i.RuneID,
&i.Amount,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getBalancesByRuneId = `-- name: GetBalancesByRuneId :many
WITH balances AS (
SELECT DISTINCT ON (pkscript) pkscript, block_height, rune_id, amount FROM runes_balances WHERE rune_id = $1 AND block_height <= $2 ORDER BY pkscript, block_height DESC
)
SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0
`
type GetBalancesByRuneIdParams struct {
RuneID string
BlockHeight int32
}
type GetBalancesByRuneIdRow struct {
Pkscript string
BlockHeight int32
RuneID string
Amount pgtype.Numeric
}
func (q *Queries) GetBalancesByRuneId(ctx context.Context, arg GetBalancesByRuneIdParams) ([]GetBalancesByRuneIdRow, error) {
rows, err := q.db.Query(ctx, getBalancesByRuneId, arg.RuneID, arg.BlockHeight)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetBalancesByRuneIdRow
for rows.Next() {
var i GetBalancesByRuneIdRow
if err := rows.Scan(
&i.Pkscript,
&i.BlockHeight,
&i.RuneID,
&i.Amount,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getIndexedBlockByHeight = `-- name: GetIndexedBlockByHeight :one
SELECT height, hash, prev_hash, event_hash, cumulative_event_hash FROM runes_indexed_blocks WHERE height = $1
`
func (q *Queries) GetIndexedBlockByHeight(ctx context.Context, height int32) (RunesIndexedBlock, error) {
row := q.db.QueryRow(ctx, getIndexedBlockByHeight, height)
var i RunesIndexedBlock
err := row.Scan(
&i.Height,
&i.Hash,
&i.PrevHash,
&i.EventHash,
&i.CumulativeEventHash,
)
return i, err
}
const getLatestIndexedBlock = `-- name: GetLatestIndexedBlock :one
SELECT height, hash, prev_hash, event_hash, cumulative_event_hash FROM runes_indexed_blocks ORDER BY height DESC LIMIT 1
`
func (q *Queries) GetLatestIndexedBlock(ctx context.Context) (RunesIndexedBlock, error) {
row := q.db.QueryRow(ctx, getLatestIndexedBlock)
var i RunesIndexedBlock
err := row.Scan(
&i.Height,
&i.Hash,
&i.PrevHash,
&i.EventHash,
&i.CumulativeEventHash,
)
return i, err
}
const getOutPointBalancesAtOutPoint = `-- name: GetOutPointBalancesAtOutPoint :many
SELECT rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height FROM runes_outpoint_balances WHERE tx_hash = $1 AND tx_idx = $2
`
type GetOutPointBalancesAtOutPointParams struct {
TxHash string
TxIdx int32
}
func (q *Queries) GetOutPointBalancesAtOutPoint(ctx context.Context, arg GetOutPointBalancesAtOutPointParams) ([]RunesOutpointBalance, error) {
rows, err := q.db.Query(ctx, getOutPointBalancesAtOutPoint, arg.TxHash, arg.TxIdx)
if err != nil {
return nil, err
}
defer rows.Close()
var items []RunesOutpointBalance
for rows.Next() {
var i RunesOutpointBalance
if err := rows.Scan(
&i.RuneID,
&i.Pkscript,
&i.TxHash,
&i.TxIdx,
&i.Amount,
&i.BlockHeight,
&i.SpentHeight,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getRuneEntriesByRuneIds = `-- name: GetRuneEntriesByRuneIds :many
WITH states AS (
-- select latest state
SELECT DISTINCT ON (rune_id) rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entry_states WHERE rune_id = ANY($1::text[]) ORDER BY rune_id, block_height DESC
)
SELECT runes_entries.rune_id, number, rune, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at, states.rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entries
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
WHERE runes_entries.rune_id = ANY($1::text[])
`
type GetRuneEntriesByRuneIdsRow struct {
RuneID string
Number int64
Rune string
Spacers int32
Premine pgtype.Numeric
Symbol int32
Divisibility int16
Terms bool
TermsAmount pgtype.Numeric
TermsCap pgtype.Numeric
TermsHeightStart pgtype.Int4
TermsHeightEnd pgtype.Int4
TermsOffsetStart pgtype.Int4
TermsOffsetEnd pgtype.Int4
Turbo bool
EtchingBlock int32
EtchingTxHash string
EtchedAt pgtype.Timestamp
RuneID_2 pgtype.Text
BlockHeight pgtype.Int4
Mints pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}
func (q *Queries) GetRuneEntriesByRuneIds(ctx context.Context, runeIds []string) ([]GetRuneEntriesByRuneIdsRow, error) {
rows, err := q.db.Query(ctx, getRuneEntriesByRuneIds, runeIds)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetRuneEntriesByRuneIdsRow
for rows.Next() {
var i GetRuneEntriesByRuneIdsRow
if err := rows.Scan(
&i.RuneID,
&i.Number,
&i.Rune,
&i.Spacers,
&i.Premine,
&i.Symbol,
&i.Divisibility,
&i.Terms,
&i.TermsAmount,
&i.TermsCap,
&i.TermsHeightStart,
&i.TermsHeightEnd,
&i.TermsOffsetStart,
&i.TermsOffsetEnd,
&i.Turbo,
&i.EtchingBlock,
&i.EtchingTxHash,
&i.EtchedAt,
&i.RuneID_2,
&i.BlockHeight,
&i.Mints,
&i.BurnedAmount,
&i.CompletedAt,
&i.CompletedAtHeight,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getRuneEntriesByRuneIdsAndHeight = `-- name: GetRuneEntriesByRuneIdsAndHeight :many
WITH states AS (
-- select latest state
SELECT DISTINCT ON (rune_id) rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entry_states WHERE rune_id = ANY($1::text[]) AND block_height <= $2 ORDER BY rune_id, block_height DESC
)
SELECT runes_entries.rune_id, number, rune, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at, states.rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entries
LEFT JOIN states ON runes_entries.rune_id = states.rune_id
WHERE runes_entries.rune_id = ANY($1::text[]) AND etching_block <= $2
`
type GetRuneEntriesByRuneIdsAndHeightParams struct {
RuneIds []string
Height int32
}
type GetRuneEntriesByRuneIdsAndHeightRow struct {
RuneID string
Number int64
Rune string
Spacers int32
Premine pgtype.Numeric
Symbol int32
Divisibility int16
Terms bool
TermsAmount pgtype.Numeric
TermsCap pgtype.Numeric
TermsHeightStart pgtype.Int4
TermsHeightEnd pgtype.Int4
TermsOffsetStart pgtype.Int4
TermsOffsetEnd pgtype.Int4
Turbo bool
EtchingBlock int32
EtchingTxHash string
EtchedAt pgtype.Timestamp
RuneID_2 pgtype.Text
BlockHeight pgtype.Int4
Mints pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}
func (q *Queries) GetRuneEntriesByRuneIdsAndHeight(ctx context.Context, arg GetRuneEntriesByRuneIdsAndHeightParams) ([]GetRuneEntriesByRuneIdsAndHeightRow, error) {
rows, err := q.db.Query(ctx, getRuneEntriesByRuneIdsAndHeight, arg.RuneIds, arg.Height)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetRuneEntriesByRuneIdsAndHeightRow
for rows.Next() {
var i GetRuneEntriesByRuneIdsAndHeightRow
if err := rows.Scan(
&i.RuneID,
&i.Number,
&i.Rune,
&i.Spacers,
&i.Premine,
&i.Symbol,
&i.Divisibility,
&i.Terms,
&i.TermsAmount,
&i.TermsCap,
&i.TermsHeightStart,
&i.TermsHeightEnd,
&i.TermsOffsetStart,
&i.TermsOffsetEnd,
&i.Turbo,
&i.EtchingBlock,
&i.EtchingTxHash,
&i.EtchedAt,
&i.RuneID_2,
&i.BlockHeight,
&i.Mints,
&i.BurnedAmount,
&i.CompletedAt,
&i.CompletedAtHeight,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getRuneIdFromRune = `-- name: GetRuneIdFromRune :one
SELECT rune_id FROM runes_entries WHERE rune = $1
`
func (q *Queries) GetRuneIdFromRune(ctx context.Context, rune string) (string, error) {
row := q.db.QueryRow(ctx, getRuneIdFromRune, rune)
var rune_id string
err := row.Scan(&rune_id)
return rune_id, err
}
const getRuneTransactions = `-- name: GetRuneTransactions :many
SELECT hash, runes_transactions.block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched, tx_hash, runes_runestones.block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws FROM runes_transactions
LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash
WHERE (
$1::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter
OR runes_transactions.outputs @> $2::JSONB
OR runes_transactions.inputs @> $2::JSONB
) AND (
$3::BOOLEAN = FALSE -- if @filter_rune_id is TRUE, apply rune_id filter
OR runes_transactions.outputs @> $4::JSONB
OR runes_transactions.inputs @> $4::JSONB
OR runes_transactions.mints ? $5
OR runes_transactions.burns ? $5
OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = $6 AND runes_transactions.index = $7)
) AND (
$8 <= runes_transactions.block_height AND runes_transactions.block_height <= $9
)
ORDER BY runes_transactions.block_height DESC LIMIT 10000
`
type GetRuneTransactionsParams struct {
FilterPkScript bool
PkScriptParam []byte
FilterRuneID bool
RuneIDParam []byte
RuneID []byte
RuneIDBlockHeight int32
RuneIDTxIndex int32
FromBlock int32
ToBlock int32
}
type GetRuneTransactionsRow struct {
Hash string
BlockHeight int32
Index int32
Timestamp pgtype.Timestamp
Inputs []byte
Outputs []byte
Mints []byte
Burns []byte
RuneEtched bool
TxHash pgtype.Text
BlockHeight_2 pgtype.Int4
Etching pgtype.Bool
EtchingDivisibility pgtype.Int2
EtchingPremine pgtype.Numeric
EtchingRune pgtype.Text
EtchingSpacers pgtype.Int4
EtchingSymbol pgtype.Int4
EtchingTerms pgtype.Bool
EtchingTermsAmount pgtype.Numeric
EtchingTermsCap pgtype.Numeric
EtchingTermsHeightStart pgtype.Int4
EtchingTermsHeightEnd pgtype.Int4
EtchingTermsOffsetStart pgtype.Int4
EtchingTermsOffsetEnd pgtype.Int4
EtchingTurbo pgtype.Bool
Edicts []byte
Mint pgtype.Text
Pointer pgtype.Int4
Cenotaph pgtype.Bool
Flaws pgtype.Int4
}
func (q *Queries) GetRuneTransactions(ctx context.Context, arg GetRuneTransactionsParams) ([]GetRuneTransactionsRow, error) {
rows, err := q.db.Query(ctx, getRuneTransactions,
arg.FilterPkScript,
arg.PkScriptParam,
arg.FilterRuneID,
arg.RuneIDParam,
arg.RuneID,
arg.RuneIDBlockHeight,
arg.RuneIDTxIndex,
arg.FromBlock,
arg.ToBlock,
)
if err != nil {
return nil, err
}
defer rows.Close()
var items []GetRuneTransactionsRow
for rows.Next() {
var i GetRuneTransactionsRow
if err := rows.Scan(
&i.Hash,
&i.BlockHeight,
&i.Index,
&i.Timestamp,
&i.Inputs,
&i.Outputs,
&i.Mints,
&i.Burns,
&i.RuneEtched,
&i.TxHash,
&i.BlockHeight_2,
&i.Etching,
&i.EtchingDivisibility,
&i.EtchingPremine,
&i.EtchingRune,
&i.EtchingSpacers,
&i.EtchingSymbol,
&i.EtchingTerms,
&i.EtchingTermsAmount,
&i.EtchingTermsCap,
&i.EtchingTermsHeightStart,
&i.EtchingTermsHeightEnd,
&i.EtchingTermsOffsetStart,
&i.EtchingTermsOffsetEnd,
&i.EtchingTurbo,
&i.Edicts,
&i.Mint,
&i.Pointer,
&i.Cenotaph,
&i.Flaws,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getUnspentOutPointBalancesByPkScript = `-- name: GetUnspentOutPointBalancesByPkScript :many
SELECT rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height FROM runes_outpoint_balances WHERE pkscript = $1 AND block_height <= $2 AND (spent_height IS NULL OR spent_height > $2)
`
type GetUnspentOutPointBalancesByPkScriptParams struct {
Pkscript string
BlockHeight int32
}
func (q *Queries) GetUnspentOutPointBalancesByPkScript(ctx context.Context, arg GetUnspentOutPointBalancesByPkScriptParams) ([]RunesOutpointBalance, error) {
rows, err := q.db.Query(ctx, getUnspentOutPointBalancesByPkScript, arg.Pkscript, arg.BlockHeight)
if err != nil {
return nil, err
}
defer rows.Close()
var items []RunesOutpointBalance
for rows.Next() {
var i RunesOutpointBalance
if err := rows.Scan(
&i.RuneID,
&i.Pkscript,
&i.TxHash,
&i.TxIdx,
&i.Amount,
&i.BlockHeight,
&i.SpentHeight,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const spendOutPointBalances = `-- name: SpendOutPointBalances :exec
UPDATE runes_outpoint_balances SET spent_height = $1 WHERE tx_hash = $2 AND tx_idx = $3
`
type SpendOutPointBalancesParams struct {
SpentHeight pgtype.Int4
TxHash string
TxIdx int32
}
func (q *Queries) SpendOutPointBalances(ctx context.Context, arg SpendOutPointBalancesParams) error {
_, err := q.db.Exec(ctx, spendOutPointBalances, arg.SpentHeight, arg.TxHash, arg.TxIdx)
return err
}
const unspendOutPointBalancesSinceHeight = `-- name: UnspendOutPointBalancesSinceHeight :exec
UPDATE runes_outpoint_balances SET spent_height = NULL WHERE spent_height >= $1
`
func (q *Queries) UnspendOutPointBalancesSinceHeight(ctx context.Context, spentHeight pgtype.Int4) error {
_, err := q.db.Exec(ctx, unspendOutPointBalancesSinceHeight, spentHeight)
return err
}

View File

@@ -0,0 +1,33 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
package gen
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
)
type DBTX interface {
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
QueryRow(context.Context, string, ...interface{}) pgx.Row
SendBatch(context.Context, *pgx.Batch) pgx.BatchResults
}
func New(db DBTX) *Queries {
return &Queries{db: db}
}
type Queries struct {
db DBTX
}
func (q *Queries) WithTx(tx pgx.Tx) *Queries {
return &Queries{
db: tx,
}
}

View File

@@ -0,0 +1,70 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: info.sql
package gen
import (
"context"
)
const getLatestIndexerState = `-- name: GetLatestIndexerState :one
SELECT id, db_version, event_hash_version, created_at FROM runes_indexer_state ORDER BY created_at DESC LIMIT 1
`
func (q *Queries) GetLatestIndexerState(ctx context.Context) (RunesIndexerState, error) {
row := q.db.QueryRow(ctx, getLatestIndexerState)
var i RunesIndexerState
err := row.Scan(
&i.Id,
&i.DbVersion,
&i.EventHashVersion,
&i.CreatedAt,
)
return i, err
}
const getLatestIndexerStats = `-- name: GetLatestIndexerStats :one
SELECT "client_version", "network" FROM runes_indexer_stats ORDER BY id DESC LIMIT 1
`
type GetLatestIndexerStatsRow struct {
ClientVersion string
Network string
}
func (q *Queries) GetLatestIndexerStats(ctx context.Context) (GetLatestIndexerStatsRow, error) {
row := q.db.QueryRow(ctx, getLatestIndexerStats)
var i GetLatestIndexerStatsRow
err := row.Scan(&i.ClientVersion, &i.Network)
return i, err
}
const setIndexerState = `-- name: SetIndexerState :exec
INSERT INTO runes_indexer_state (db_version, event_hash_version) VALUES ($1, $2)
`
type SetIndexerStateParams struct {
DbVersion int32
EventHashVersion int32
}
func (q *Queries) SetIndexerState(ctx context.Context, arg SetIndexerStateParams) error {
_, err := q.db.Exec(ctx, setIndexerState, arg.DbVersion, arg.EventHashVersion)
return err
}
const updateIndexerStats = `-- name: UpdateIndexerStats :exec
INSERT INTO runes_indexer_stats (client_version, network) VALUES ($1, $2)
`
type UpdateIndexerStatsParams struct {
ClientVersion string
Network string
}
func (q *Queries) UpdateIndexerStats(ctx context.Context, arg UpdateIndexerStatsParams) error {
_, err := q.db.Exec(ctx, updateIndexerStats, arg.ClientVersion, arg.Network)
return err
}

View File

@@ -0,0 +1,114 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
package gen
import (
"github.com/jackc/pgx/v5/pgtype"
)
type RunesBalance struct {
Pkscript string
BlockHeight int32
RuneID string
Amount pgtype.Numeric
}
type RunesEntry struct {
RuneID string
Number int64
Rune string
Spacers int32
Premine pgtype.Numeric
Symbol int32
Divisibility int16
Terms bool
TermsAmount pgtype.Numeric
TermsCap pgtype.Numeric
TermsHeightStart pgtype.Int4
TermsHeightEnd pgtype.Int4
TermsOffsetStart pgtype.Int4
TermsOffsetEnd pgtype.Int4
Turbo bool
EtchingBlock int32
EtchingTxHash string
EtchedAt pgtype.Timestamp
}
type RunesEntryState struct {
RuneID string
BlockHeight int32
Mints pgtype.Numeric
BurnedAmount pgtype.Numeric
CompletedAt pgtype.Timestamp
CompletedAtHeight pgtype.Int4
}
type RunesIndexedBlock struct {
Height int32
Hash string
PrevHash string
EventHash string
CumulativeEventHash string
}
type RunesIndexerStat struct {
Id int64
ClientVersion string
Network string
CreatedAt pgtype.Timestamptz
}
type RunesIndexerState struct {
Id int64
DbVersion int32
EventHashVersion int32
CreatedAt pgtype.Timestamptz
}
type RunesOutpointBalance struct {
RuneID string
Pkscript string
TxHash string
TxIdx int32
Amount pgtype.Numeric
BlockHeight int32
SpentHeight pgtype.Int4
}
type RunesRunestone struct {
TxHash string
BlockHeight int32
Etching bool
EtchingDivisibility pgtype.Int2
EtchingPremine pgtype.Numeric
EtchingRune pgtype.Text
EtchingSpacers pgtype.Int4
EtchingSymbol pgtype.Int4
EtchingTerms pgtype.Bool
EtchingTermsAmount pgtype.Numeric
EtchingTermsCap pgtype.Numeric
EtchingTermsHeightStart pgtype.Int4
EtchingTermsHeightEnd pgtype.Int4
EtchingTermsOffsetStart pgtype.Int4
EtchingTermsOffsetEnd pgtype.Int4
EtchingTurbo pgtype.Bool
Edicts []byte
Mint pgtype.Text
Pointer pgtype.Int4
Cenotaph bool
Flaws int32
}
type RunesTransaction struct {
Hash string
BlockHeight int32
Index int32
Timestamp pgtype.Timestamp
Inputs []byte
Outputs []byte
Mints []byte
Burns []byte
RuneEtched bool
}

View File

@@ -0,0 +1,56 @@
package postgres
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/repository/postgres/gen"
"github.com/jackc/pgx/v5"
)
var _ datagateway.IndexerInfoDataGateway = (*Repository)(nil)
func (r *Repository) GetLatestIndexerState(ctx context.Context) (entity.IndexerState, error) {
indexerStateModel, err := r.queries.GetLatestIndexerState(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return entity.IndexerState{}, errors.WithStack(errs.NotFound)
}
return entity.IndexerState{}, errors.Wrap(err, "error during query")
}
indexerState := mapIndexerStateModelToType(indexerStateModel)
return indexerState, nil
}
func (r *Repository) GetLatestIndexerStats(ctx context.Context) (string, common.Network, error) {
stats, err := r.queries.GetLatestIndexerStats(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return "", "", errors.WithStack(errs.NotFound)
}
return "", "", errors.Wrap(err, "error during query")
}
return stats.ClientVersion, common.Network(stats.Network), nil
}
func (r *Repository) SetIndexerState(ctx context.Context, state entity.IndexerState) error {
params := mapIndexerStateTypeToParams(state)
if err := r.queries.SetIndexerState(ctx, params); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) UpdateIndexerStats(ctx context.Context, clientVersion string, network common.Network) error {
if err := r.queries.UpdateIndexerStats(ctx, gen.UpdateIndexerStatsParams{
ClientVersion: clientVersion,
Network: string(network),
}); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}

View File

@@ -0,0 +1,693 @@
package postgres
import (
"encoding/hex"
"encoding/json"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/repository/postgres/gen"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/gaze-network/uint128"
"github.com/jackc/pgx/v5/pgtype"
"github.com/samber/lo"
)
func uint128FromNumeric(src pgtype.Numeric) (*uint128.Uint128, error) {
if !src.Valid {
return nil, nil
}
bytes, err := src.MarshalJSON()
if err != nil {
return nil, errors.WithStack(err)
}
result, err := uint128.FromString(string(bytes))
if err != nil {
return nil, errors.WithStack(err)
}
return &result, nil
}
func numericFromUint128(src *uint128.Uint128) (pgtype.Numeric, error) {
if src == nil {
return pgtype.Numeric{}, nil
}
bytes := []byte(src.String())
var result pgtype.Numeric
err := result.UnmarshalJSON(bytes)
if err != nil {
return pgtype.Numeric{}, errors.WithStack(err)
}
return result, nil
}
func mapIndexerStateModelToType(src gen.RunesIndexerState) entity.IndexerState {
var createdAt time.Time
if src.CreatedAt.Valid {
createdAt = src.CreatedAt.Time
}
return entity.IndexerState{
DBVersion: src.DbVersion,
EventHashVersion: src.EventHashVersion,
CreatedAt: createdAt,
}
}
func mapIndexerStateTypeToParams(src entity.IndexerState) gen.SetIndexerStateParams {
return gen.SetIndexerStateParams{
DbVersion: src.DBVersion,
EventHashVersion: src.EventHashVersion,
}
}
func mapRuneEntryModelToType(src gen.GetRuneEntriesByRuneIdsRow) (runes.RuneEntry, error) {
runeId, err := runes.NewRuneIdFromString(src.RuneID)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse rune id")
}
burnedAmount, err := uint128FromNumeric(src.BurnedAmount)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse burned amount")
}
rune, err := runes.NewRuneFromString(src.Rune)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse rune")
}
mints, err := uint128FromNumeric(src.Mints)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse mints")
}
premine, err := uint128FromNumeric(src.Premine)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse premine")
}
var completedAt time.Time
if src.CompletedAt.Valid {
completedAt = src.CompletedAt.Time
}
var completedAtHeight *uint64
if src.CompletedAtHeight.Valid {
completedAtHeight = lo.ToPtr(uint64(src.CompletedAtHeight.Int32))
}
var terms *runes.Terms
if src.Terms {
terms = &runes.Terms{}
if src.TermsAmount.Valid {
amount, err := uint128FromNumeric(src.TermsAmount)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse terms amount")
}
terms.Amount = amount
}
if src.TermsCap.Valid {
cap, err := uint128FromNumeric(src.TermsCap)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse terms cap")
}
terms.Cap = cap
}
if src.TermsHeightStart.Valid {
heightStart := uint64(src.TermsHeightStart.Int32)
terms.HeightStart = &heightStart
}
if src.TermsHeightEnd.Valid {
heightEnd := uint64(src.TermsHeightEnd.Int32)
terms.HeightEnd = &heightEnd
}
if src.TermsOffsetStart.Valid {
offsetStart := uint64(src.TermsOffsetStart.Int32)
terms.OffsetStart = &offsetStart
}
if src.TermsOffsetEnd.Valid {
offsetEnd := uint64(src.TermsOffsetEnd.Int32)
terms.OffsetEnd = &offsetEnd
}
}
etchingTxHash, err := chainhash.NewHashFromStr(src.EtchingTxHash)
if err != nil {
return runes.RuneEntry{}, errors.Wrap(err, "failed to parse etching tx hash")
}
var etchedAt time.Time
if src.EtchedAt.Valid {
etchedAt = src.EtchedAt.Time
}
return runes.RuneEntry{
RuneId: runeId,
Number: uint64(src.Number),
Divisibility: uint8(src.Divisibility),
Premine: lo.FromPtr(premine),
SpacedRune: runes.NewSpacedRune(rune, uint32(src.Spacers)),
Symbol: src.Symbol,
Terms: terms,
Turbo: src.Turbo,
Mints: lo.FromPtr(mints),
BurnedAmount: lo.FromPtr(burnedAmount),
CompletedAt: completedAt,
CompletedAtHeight: completedAtHeight,
EtchingBlock: uint64(src.EtchingBlock),
EtchingTxHash: *etchingTxHash,
EtchedAt: etchedAt,
}, nil
}
func mapRuneEntryTypeToParams(src runes.RuneEntry, blockHeight uint64) (gen.CreateRuneEntryParams, gen.CreateRuneEntryStateParams, error) {
runeId := src.RuneId.String()
rune := src.SpacedRune.Rune.String()
spacers := int32(src.SpacedRune.Spacers)
mints, err := numericFromUint128(&src.Mints)
if err != nil {
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse mints")
}
burnedAmount, err := numericFromUint128(&src.BurnedAmount)
if err != nil {
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse burned amount")
}
premine, err := numericFromUint128(&src.Premine)
if err != nil {
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse premine")
}
var completedAt pgtype.Timestamp
if !src.CompletedAt.IsZero() {
completedAt.Time = src.CompletedAt
completedAt.Valid = true
}
var completedAtHeight pgtype.Int4
if src.CompletedAtHeight != nil {
completedAtHeight.Int32 = int32(*src.CompletedAtHeight)
completedAtHeight.Valid = true
}
var terms bool
var termsAmount, termsCap pgtype.Numeric
var termsHeightStart, termsHeightEnd, termsOffsetStart, termsOffsetEnd pgtype.Int4
if src.Terms != nil {
terms = true
if src.Terms.Amount != nil {
termsAmount, err = numericFromUint128(src.Terms.Amount)
if err != nil {
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse terms amount")
}
}
if src.Terms.Cap != nil {
termsCap, err = numericFromUint128(src.Terms.Cap)
if err != nil {
return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse terms cap")
}
}
if src.Terms.HeightStart != nil {
termsHeightStart = pgtype.Int4{
Int32: int32(*src.Terms.HeightStart),
Valid: true,
}
}
if src.Terms.HeightEnd != nil {
termsHeightEnd = pgtype.Int4{
Int32: int32(*src.Terms.HeightEnd),
Valid: true,
}
}
if src.Terms.OffsetStart != nil {
termsOffsetStart = pgtype.Int4{
Int32: int32(*src.Terms.OffsetStart),
Valid: true,
}
}
if src.Terms.OffsetEnd != nil {
termsOffsetEnd = pgtype.Int4{
Int32: int32(*src.Terms.OffsetEnd),
Valid: true,
}
}
}
etchedAt := pgtype.Timestamp{Time: time.Time{}, Valid: true}
return gen.CreateRuneEntryParams{
RuneID: runeId,
Rune: rune,
Number: int64(src.Number),
Spacers: spacers,
Premine: premine,
Symbol: src.Symbol,
Divisibility: int16(src.Divisibility),
Terms: terms,
TermsAmount: termsAmount,
TermsCap: termsCap,
TermsHeightStart: termsHeightStart,
TermsHeightEnd: termsHeightEnd,
TermsOffsetStart: termsOffsetStart,
TermsOffsetEnd: termsOffsetEnd,
Turbo: src.Turbo,
EtchingBlock: int32(src.EtchingBlock),
EtchingTxHash: src.EtchingTxHash.String(),
EtchedAt: etchedAt,
}, gen.CreateRuneEntryStateParams{
BlockHeight: int32(blockHeight),
RuneID: runeId,
Mints: mints,
BurnedAmount: burnedAmount,
CompletedAt: completedAt,
CompletedAtHeight: completedAtHeight,
}, nil
}
// mapRuneTransactionModelToType returns params for creating a new rune transaction and (optionally) a runestone.
func mapRuneTransactionTypeToParams(src entity.RuneTransaction) (gen.CreateRuneTransactionParams, *gen.CreateRunestoneParams, error) {
var timestamp pgtype.Timestamp
if !src.Timestamp.IsZero() {
timestamp.Time = src.Timestamp
timestamp.Valid = true
}
inputsBytes, err := json.Marshal(src.Inputs)
if err != nil {
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal inputs")
}
outputsBytes, err := json.Marshal(src.Outputs)
if err != nil {
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal outputs")
}
mints := make(map[string]uint128.Uint128)
for key, value := range src.Mints {
mints[key.String()] = value
}
mintsBytes, err := json.Marshal(mints)
if err != nil {
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal mints")
}
burns := make(map[string]uint128.Uint128)
for key, value := range src.Burns {
burns[key.String()] = value
}
burnsBytes, err := json.Marshal(burns)
if err != nil {
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal burns")
}
var runestoneParams *gen.CreateRunestoneParams
if src.Runestone != nil {
params, err := mapRunestoneTypeToParams(*src.Runestone, src.Hash, src.BlockHeight)
if err != nil {
return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to map runestone to params")
}
runestoneParams = &params
}
return gen.CreateRuneTransactionParams{
Hash: src.Hash.String(),
BlockHeight: int32(src.BlockHeight),
Index: int32(src.Index),
Timestamp: timestamp,
Inputs: inputsBytes,
Outputs: outputsBytes,
Mints: mintsBytes,
Burns: burnsBytes,
RuneEtched: src.RuneEtched,
}, runestoneParams, nil
}
func extractModelRuneTxAndRunestone(src gen.GetRuneTransactionsRow) (gen.RunesTransaction, *gen.RunesRunestone, error) {
var runestone *gen.RunesRunestone
if src.TxHash.Valid {
// these fields should never be null
if !src.Cenotaph.Valid {
return gen.RunesTransaction{}, nil, errors.New("runestone cenotaph is null")
}
if !src.Flaws.Valid {
return gen.RunesTransaction{}, nil, errors.New("runestone flaws is null")
}
runestone = &gen.RunesRunestone{
TxHash: src.TxHash.String,
BlockHeight: src.BlockHeight,
Etching: src.Etching.Bool,
EtchingDivisibility: src.EtchingDivisibility,
EtchingPremine: src.EtchingPremine,
EtchingRune: src.EtchingRune,
EtchingSpacers: src.EtchingSpacers,
EtchingSymbol: src.EtchingSymbol,
EtchingTerms: src.EtchingTerms,
EtchingTermsAmount: src.EtchingTermsAmount,
EtchingTermsCap: src.EtchingTermsCap,
EtchingTermsHeightStart: src.EtchingTermsHeightStart,
EtchingTermsHeightEnd: src.EtchingTermsHeightEnd,
EtchingTermsOffsetStart: src.EtchingTermsOffsetStart,
EtchingTermsOffsetEnd: src.EtchingTermsOffsetEnd,
Edicts: src.Edicts,
Mint: src.Mint,
Pointer: src.Pointer,
Cenotaph: src.Cenotaph.Bool,
Flaws: src.Flaws.Int32,
}
}
return gen.RunesTransaction{
Hash: src.Hash,
BlockHeight: src.BlockHeight,
Index: src.Index,
Timestamp: src.Timestamp,
Inputs: src.Inputs,
Outputs: src.Outputs,
Mints: src.Mints,
Burns: src.Burns,
RuneEtched: src.RuneEtched,
}, runestone, nil
}
func mapRuneTransactionModelToType(src gen.RunesTransaction) (entity.RuneTransaction, error) {
hash, err := chainhash.NewHashFromStr(src.Hash)
if err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to parse transaction hash")
}
var timestamp time.Time
if src.Timestamp.Valid {
timestamp = src.Timestamp.Time
}
inputs := make([]*entity.TxInputOutput, 0)
if err := json.Unmarshal(src.Inputs, &inputs); err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to unmarshal inputs")
}
outputs := make([]*entity.TxInputOutput, 0)
if err := json.Unmarshal(src.Outputs, &outputs); err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to unmarshal outputs")
}
mintsRaw := make(map[string]uint128.Uint128)
if err := json.Unmarshal(src.Mints, &mintsRaw); err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to unmarshal mints")
}
mints := make(map[runes.RuneId]uint128.Uint128)
for key, value := range mintsRaw {
runeId, err := runes.NewRuneIdFromString(key)
if err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to parse rune id")
}
mints[runeId] = value
}
burnsRaw := make(map[string]uint128.Uint128)
if err := json.Unmarshal(src.Burns, &burnsRaw); err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to unmarshal burns")
}
burns := make(map[runes.RuneId]uint128.Uint128)
for key, value := range burnsRaw {
runeId, err := runes.NewRuneIdFromString(key)
if err != nil {
return entity.RuneTransaction{}, errors.Wrap(err, "failed to parse rune id")
}
burns[runeId] = value
}
return entity.RuneTransaction{
Hash: *hash,
BlockHeight: uint64(src.BlockHeight),
Index: uint32(src.Index),
Timestamp: timestamp,
Inputs: inputs,
Outputs: outputs,
Mints: mints,
Burns: burns,
RuneEtched: src.RuneEtched,
}, nil
}
func mapRunestoneTypeToParams(src runes.Runestone, txHash chainhash.Hash, blockHeight uint64) (gen.CreateRunestoneParams, error) {
var runestoneParams gen.CreateRunestoneParams
// TODO: optimize serialized edicts
edictsBytes, err := json.Marshal(src.Edicts)
if err != nil {
return gen.CreateRunestoneParams{}, errors.Wrap(err, "failed to marshal runestone edicts")
}
runestoneParams = gen.CreateRunestoneParams{
TxHash: txHash.String(),
BlockHeight: int32(blockHeight),
Edicts: edictsBytes,
Cenotaph: src.Cenotaph,
Flaws: int32(src.Flaws),
}
if src.Etching != nil {
runestoneParams.Etching = true
etching := *src.Etching
if etching.Divisibility != nil {
runestoneParams.EtchingDivisibility = pgtype.Int2{Int16: int16(*etching.Divisibility), Valid: true}
}
if etching.Premine != nil {
premine, err := numericFromUint128(etching.Premine)
if err != nil {
return gen.CreateRunestoneParams{}, errors.Wrap(err, "failed to parse etching premine")
}
runestoneParams.EtchingPremine = premine
}
if etching.Rune != nil {
runestoneParams.EtchingRune = pgtype.Text{String: etching.Rune.String(), Valid: true}
}
if etching.Spacers != nil {
runestoneParams.EtchingSpacers = pgtype.Int4{Int32: int32(*etching.Spacers), Valid: true}
}
if etching.Symbol != nil {
runestoneParams.EtchingSymbol = pgtype.Int4{Int32: *etching.Symbol, Valid: true}
}
if etching.Terms != nil {
runestoneParams.EtchingTerms = pgtype.Bool{Bool: true, Valid: true}
terms := *etching.Terms
if terms.Amount != nil {
amount, err := numericFromUint128(terms.Amount)
if err != nil {
return gen.CreateRunestoneParams{}, errors.Wrap(err, "failed to parse etching terms amount")
}
runestoneParams.EtchingTermsAmount = amount
}
if terms.Cap != nil {
cap, err := numericFromUint128(terms.Cap)
if err != nil {
return gen.CreateRunestoneParams{}, errors.Wrap(err, "failed to parse etching terms cap")
}
runestoneParams.EtchingTermsCap = cap
}
if terms.HeightStart != nil {
runestoneParams.EtchingTermsHeightStart = pgtype.Int4{Int32: int32(*terms.HeightStart), Valid: true}
}
if terms.HeightEnd != nil {
runestoneParams.EtchingTermsHeightEnd = pgtype.Int4{Int32: int32(*terms.HeightEnd), Valid: true}
}
if terms.OffsetStart != nil {
runestoneParams.EtchingTermsOffsetStart = pgtype.Int4{Int32: int32(*terms.OffsetStart), Valid: true}
}
if terms.OffsetEnd != nil {
runestoneParams.EtchingTermsOffsetEnd = pgtype.Int4{Int32: int32(*terms.OffsetEnd), Valid: true}
}
}
runestoneParams.EtchingTurbo = pgtype.Bool{Bool: etching.Turbo, Valid: true}
}
if src.Mint != nil {
runestoneParams.Mint = pgtype.Text{String: src.Mint.String(), Valid: true}
}
if src.Pointer != nil {
runestoneParams.Pointer = pgtype.Int4{Int32: int32(*src.Pointer), Valid: true}
}
return runestoneParams, nil
}
func mapRunestoneModelToType(src gen.RunesRunestone) (runes.Runestone, error) {
runestone := runes.Runestone{
Cenotaph: src.Cenotaph,
Flaws: runes.Flaws(src.Flaws),
}
if src.Etching {
etching := runes.Etching{}
if src.EtchingDivisibility.Valid {
divisibility := uint8(src.EtchingDivisibility.Int16)
etching.Divisibility = &divisibility
}
if src.EtchingPremine.Valid {
premine, err := uint128FromNumeric(src.EtchingPremine)
if err != nil {
return runes.Runestone{}, errors.Wrap(err, "failed to parse etching premine")
}
etching.Premine = premine
}
if src.EtchingRune.Valid {
rune, err := runes.NewRuneFromString(src.EtchingRune.String)
if err != nil {
return runes.Runestone{}, errors.Wrap(err, "failed to parse etching rune")
}
etching.Rune = &rune
}
if src.EtchingSpacers.Valid {
spacers := uint32(src.EtchingSpacers.Int32)
etching.Spacers = &spacers
}
if src.EtchingSymbol.Valid {
var symbol rune = src.EtchingSymbol.Int32
etching.Symbol = &symbol
}
if src.EtchingTerms.Valid && src.EtchingTerms.Bool {
terms := runes.Terms{}
if src.EtchingTermsAmount.Valid {
amount, err := uint128FromNumeric(src.EtchingTermsAmount)
if err != nil {
return runes.Runestone{}, errors.Wrap(err, "failed to parse etching terms amount")
}
terms.Amount = amount
}
if src.EtchingTermsCap.Valid {
cap, err := uint128FromNumeric(src.EtchingTermsCap)
if err != nil {
return runes.Runestone{}, errors.Wrap(err, "failed to parse etching terms cap")
}
terms.Cap = cap
}
if src.EtchingTermsHeightStart.Valid {
heightStart := uint64(src.EtchingTermsHeightStart.Int32)
terms.HeightStart = &heightStart
}
if src.EtchingTermsHeightEnd.Valid {
heightEnd := uint64(src.EtchingTermsHeightEnd.Int32)
terms.HeightEnd = &heightEnd
}
if src.EtchingTermsOffsetStart.Valid {
offsetStart := uint64(src.EtchingTermsOffsetStart.Int32)
terms.OffsetStart = &offsetStart
}
if src.EtchingTermsOffsetEnd.Valid {
offsetEnd := uint64(src.EtchingTermsOffsetEnd.Int32)
terms.OffsetEnd = &offsetEnd
}
etching.Terms = &terms
}
etching.Turbo = src.EtchingTurbo.Valid && src.EtchingTurbo.Bool
runestone.Etching = &etching
}
if src.Mint.Valid {
mint, err := runes.NewRuneIdFromString(src.Mint.String)
if err != nil {
return runes.Runestone{}, errors.Wrap(err, "failed to parse mint")
}
runestone.Mint = &mint
}
if src.Pointer.Valid {
pointer := uint64(src.Pointer.Int32)
runestone.Pointer = &pointer
}
// Edicts
{
if err := json.Unmarshal(src.Edicts, &runestone.Edicts); err != nil {
return runes.Runestone{}, errors.Wrap(err, "failed to unmarshal edicts")
}
if len(runestone.Edicts) == 0 {
runestone.Edicts = nil
}
}
return runestone, nil
}
func mapBalanceModelToType(src gen.RunesBalance) (*entity.Balance, error) {
runeId, err := runes.NewRuneIdFromString(src.RuneID)
if err != nil {
return nil, errors.Wrap(err, "failed to parse rune id")
}
amount, err := uint128FromNumeric(src.Amount)
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance")
}
pkScript, err := hex.DecodeString(src.Pkscript)
if err != nil {
return nil, errors.Wrap(err, "failed to parse pkscript")
}
return &entity.Balance{
PkScript: pkScript,
RuneId: runeId,
Amount: lo.FromPtr(amount),
BlockHeight: uint64(src.BlockHeight),
}, nil
}
func mapIndexedBlockModelToType(src gen.RunesIndexedBlock) (*entity.IndexedBlock, error) {
hash, err := chainhash.NewHashFromStr(src.Hash)
if err != nil {
return nil, errors.Wrap(err, "failed to parse block hash")
}
prevBlockHash, err := chainhash.NewHashFromStr(src.PrevHash)
if err != nil {
return nil, errors.Wrap(err, "failed to parse prev block hash")
}
eventHash, err := chainhash.NewHashFromStr(src.EventHash)
if err != nil {
return nil, errors.Wrap(err, "failed to parse event hash")
}
cumulativeEventHash, err := chainhash.NewHashFromStr(src.CumulativeEventHash)
if err != nil {
return nil, errors.Wrap(err, "failed to parse cumulative event hash")
}
return &entity.IndexedBlock{
Height: int64(src.Height),
Hash: *hash,
PrevHash: *prevBlockHash,
EventHash: *eventHash,
CumulativeEventHash: *cumulativeEventHash,
}, nil
}
func mapIndexedBlockTypeToParams(src entity.IndexedBlock) (gen.CreateIndexedBlockParams, error) {
return gen.CreateIndexedBlockParams{
Height: int32(src.Height),
Hash: src.Hash.String(),
PrevHash: src.PrevHash.String(),
EventHash: src.EventHash.String(),
CumulativeEventHash: src.CumulativeEventHash.String(),
}, nil
}
func mapOutPointBalanceModelToType(src gen.RunesOutpointBalance) (entity.OutPointBalance, error) {
runeId, err := runes.NewRuneIdFromString(src.RuneID)
if err != nil {
return entity.OutPointBalance{}, errors.Wrap(err, "failed to parse rune id")
}
amount, err := uint128FromNumeric(src.Amount)
if err != nil {
return entity.OutPointBalance{}, errors.Wrap(err, "failed to parse balance")
}
pkScript, err := hex.DecodeString(src.Pkscript)
if err != nil {
return entity.OutPointBalance{}, errors.Wrap(err, "failed to parse pkscript")
}
txHash, err := chainhash.NewHashFromStr(src.TxHash)
if err != nil {
return entity.OutPointBalance{}, errors.Wrap(err, "failed to parse tx hash")
}
var spentHeight *uint64
if src.SpentHeight.Valid {
spentHeight = lo.ToPtr(uint64(src.SpentHeight.Int32))
}
return entity.OutPointBalance{
PkScript: pkScript,
RuneId: runeId,
Amount: lo.FromPtr(amount),
OutPoint: wire.OutPoint{
Hash: *txHash,
Index: uint32(src.TxIdx),
},
BlockHeight: uint64(src.BlockHeight),
SpentHeight: spentHeight,
}, nil
}
func mapOutPointBalanceTypeToParams(src entity.OutPointBalance) (gen.CreateOutPointBalancesParams, error) {
amount, err := numericFromUint128(&src.Amount)
if err != nil {
return gen.CreateOutPointBalancesParams{}, errors.Wrap(err, "failed to parse amount")
}
var spentHeight pgtype.Int4
if src.SpentHeight != nil {
spentHeight = pgtype.Int4{Int32: int32(*src.SpentHeight), Valid: true}
}
return gen.CreateOutPointBalancesParams{
TxHash: src.OutPoint.Hash.String(),
TxIdx: int32(src.OutPoint.Index),
Pkscript: hex.EncodeToString(src.PkScript),
RuneID: src.RuneId.String(),
Amount: amount,
BlockHeight: int32(src.BlockHeight),
SpentHeight: spentHeight,
}, nil
}

View File

@@ -0,0 +1,61 @@
package postgres
import (
"testing"
"github.com/gaze-network/uint128"
"github.com/jackc/pgx/v5/pgtype"
"github.com/stretchr/testify/assert"
)
func TestUint128FromNumeric(t *testing.T) {
t.Run("normal", func(t *testing.T) {
numeric := pgtype.Numeric{}
numeric.ScanInt64(pgtype.Int8{
Int64: 1000,
Valid: true,
})
expected := uint128.From64(1000)
result, err := uint128FromNumeric(numeric)
assert.NoError(t, err)
assert.Equal(t, &expected, result)
})
t.Run("nil", func(t *testing.T) {
numeric := pgtype.Numeric{}
numeric.ScanInt64(pgtype.Int8{
Valid: false,
})
result, err := uint128FromNumeric(numeric)
assert.NoError(t, err)
assert.Nil(t, result)
})
}
func TestNumericFromUint128(t *testing.T) {
t.Run("normal", func(t *testing.T) {
u128 := uint128.From64(1)
expected := pgtype.Numeric{}
expected.ScanInt64(pgtype.Int8{
Int64: 1,
Valid: true,
})
result, err := numericFromUint128(&u128)
assert.NoError(t, err)
assert.Equal(t, expected, result)
})
t.Run("nil", func(t *testing.T) {
expected := pgtype.Numeric{}
expected.ScanInt64(pgtype.Int8{
Valid: false,
})
result, err := numericFromUint128(nil)
assert.NoError(t, err)
assert.Equal(t, expected, result)
})
}

View File

@@ -0,0 +1,20 @@
package postgres
import (
"github.com/gaze-network/indexer-network/internal/postgres"
"github.com/gaze-network/indexer-network/modules/runes/repository/postgres/gen"
"github.com/jackc/pgx/v5"
)
type Repository struct {
db postgres.DB
queries *gen.Queries
tx pgx.Tx
}
func NewRepository(db postgres.DB) *Repository {
return &Repository{
db: db,
queries: gen.New(db),
}
}

View File

@@ -0,0 +1,484 @@
package postgres
import (
"context"
"encoding/hex"
"fmt"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
"github.com/gaze-network/indexer-network/modules/runes/internal/entity"
"github.com/gaze-network/indexer-network/modules/runes/repository/postgres/gen"
"github.com/gaze-network/indexer-network/modules/runes/runes"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgtype"
"github.com/samber/lo"
)
var _ datagateway.RunesDataGateway = (*Repository)(nil)
// warning: GetLatestBlock currently returns a types.BlockHeader with only Height, Hash, and PrevBlock fields populated.
// This is because it is known that all usage of this function only requires these fields. In the future, we may want to populate all fields for type safety.
func (r *Repository) GetLatestBlock(ctx context.Context) (types.BlockHeader, error) {
block, err := r.queries.GetLatestIndexedBlock(ctx)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return types.BlockHeader{}, errors.WithStack(errs.NotFound)
}
return types.BlockHeader{}, errors.Wrap(err, "error during query")
}
hash, err := chainhash.NewHashFromStr(block.Hash)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to parse block hash")
}
prevHash, err := chainhash.NewHashFromStr(block.PrevHash)
if err != nil {
return types.BlockHeader{}, errors.Wrap(err, "failed to parse prev block hash")
}
return types.BlockHeader{
Height: int64(block.Height),
Hash: *hash,
PrevBlock: *prevHash,
}, nil
}
func (r *Repository) GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error) {
indexedBlockModel, err := r.queries.GetIndexedBlockByHeight(ctx, int32(height))
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, errors.WithStack(errs.NotFound)
}
return nil, errors.Wrap(err, "error during query")
}
indexedBlock, err := mapIndexedBlockModelToType(indexedBlockModel)
if err != nil {
return nil, errors.Wrap(err, "failed to parse indexed block model")
}
return indexedBlock, nil
}
func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, fromBlock, toBlock uint64) ([]*entity.RuneTransaction, error) {
pkScriptParam := []byte(fmt.Sprintf(`[{"pkScript":"%s"}]`, hex.EncodeToString(pkScript)))
runeIdParam := []byte(fmt.Sprintf(`[{"runeId":"%s"}]`, runeId.String()))
rows, err := r.queries.GetRuneTransactions(ctx, gen.GetRuneTransactionsParams{
FilterPkScript: pkScript != nil,
PkScriptParam: pkScriptParam,
FilterRuneID: runeId != runes.RuneId{},
RuneIDParam: runeIdParam,
RuneID: []byte(runeId.String()),
RuneIDBlockHeight: int32(runeId.BlockHeight),
RuneIDTxIndex: int32(runeId.TxIndex),
FromBlock: int32(fromBlock),
ToBlock: int32(toBlock),
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
runeTxs := make([]*entity.RuneTransaction, 0, len(rows))
for _, row := range rows {
runeTxModel, runestoneModel, err := extractModelRuneTxAndRunestone(row)
if err != nil {
return nil, errors.Wrap(err, "failed to extract rune transaction and runestone from row")
}
runeTx, err := mapRuneTransactionModelToType(runeTxModel)
if err != nil {
return nil, errors.Wrap(err, "failed to parse rune transaction model")
}
if runestoneModel != nil {
runestone, err := mapRunestoneModelToType(*runestoneModel)
if err != nil {
return nil, errors.Wrap(err, "failed to parse runestone model")
}
runeTx.Runestone = &runestone
}
runeTxs = append(runeTxs, &runeTx)
}
return runeTxs, nil
}
func (r *Repository) GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error) {
balances, err := r.queries.GetOutPointBalancesAtOutPoint(ctx, gen.GetOutPointBalancesAtOutPointParams{
TxHash: outPoint.Hash.String(),
TxIdx: int32(outPoint.Index),
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
result := make(map[runes.RuneId]*entity.OutPointBalance, len(balances))
for _, balanceModel := range balances {
balance, err := mapOutPointBalanceModelToType(balanceModel)
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
result[balance.RuneId] = &balance
}
return result, nil
}
func (r *Repository) GetUnspentOutPointBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.OutPointBalance, error) {
balances, err := r.queries.GetUnspentOutPointBalancesByPkScript(ctx, gen.GetUnspentOutPointBalancesByPkScriptParams{
Pkscript: hex.EncodeToString(pkScript),
BlockHeight: int32(blockHeight),
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
result := make([]*entity.OutPointBalance, 0, len(balances))
for _, balanceModel := range balances {
balance, err := mapOutPointBalanceModelToType(balanceModel)
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
result = append(result, &balance)
}
return result, nil
}
func (r *Repository) GetRuneIdFromRune(ctx context.Context, rune runes.Rune) (runes.RuneId, error) {
runeIdStr, err := r.queries.GetRuneIdFromRune(ctx, rune.String())
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return runes.RuneId{}, errors.WithStack(errs.NotFound)
}
return runes.RuneId{}, errors.Wrap(err, "error during query")
}
runeId, err := runes.NewRuneIdFromString(runeIdStr)
if err != nil {
return runes.RuneId{}, errors.Wrap(err, "failed to parse RuneId")
}
return runeId, nil
}
func (r *Repository) GetRuneEntryByRuneId(ctx context.Context, runeId runes.RuneId) (*runes.RuneEntry, error) {
runeEntries, err := r.GetRuneEntryByRuneIdBatch(ctx, []runes.RuneId{runeId})
if err != nil {
return nil, errors.Wrap(err, "failed to get rune entries by rune id")
}
runeEntry, ok := runeEntries[runeId]
if !ok {
return nil, errors.WithStack(errs.NotFound)
}
return runeEntry, nil
}
func (r *Repository) GetRuneEntryByRuneIdBatch(ctx context.Context, runeIds []runes.RuneId) (map[runes.RuneId]*runes.RuneEntry, error) {
rows, err := r.queries.GetRuneEntriesByRuneIds(ctx, lo.Map(runeIds, func(runeId runes.RuneId, _ int) string {
return runeId.String()
}))
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
runeEntries := make(map[runes.RuneId]*runes.RuneEntry, len(rows))
var errs []error
for i, runeEntryModel := range rows {
runeEntry, err := mapRuneEntryModelToType(runeEntryModel)
if err != nil {
errs = append(errs, errors.Wrapf(err, "failed to parse rune entry model index %d", i))
continue
}
runeEntries[runeEntry.RuneId] = &runeEntry
}
if len(errs) > 0 {
return nil, errors.Join(errs...)
}
return runeEntries, nil
}
func (r *Repository) GetRuneEntryByRuneIdAndHeight(ctx context.Context, runeId runes.RuneId, blockHeight uint64) (*runes.RuneEntry, error) {
runeEntries, err := r.GetRuneEntryByRuneIdBatch(ctx, []runes.RuneId{runeId})
if err != nil {
return nil, errors.Wrap(err, "failed to get rune entries by rune id")
}
runeEntry, ok := runeEntries[runeId]
if !ok {
return nil, errors.WithStack(errs.NotFound)
}
return runeEntry, nil
}
func (r *Repository) GetRuneEntryByRuneIdAndHeightBatch(ctx context.Context, runeIds []runes.RuneId, blockHeight uint64) (map[runes.RuneId]*runes.RuneEntry, error) {
rows, err := r.queries.GetRuneEntriesByRuneIdsAndHeight(ctx, gen.GetRuneEntriesByRuneIdsAndHeightParams{
RuneIds: lo.Map(runeIds, func(runeId runes.RuneId, _ int) string {
return runeId.String()
}),
Height: int32(blockHeight),
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
runeEntries := make(map[runes.RuneId]*runes.RuneEntry, len(rows))
var errs []error
for i, runeEntryModel := range rows {
runeEntry, err := mapRuneEntryModelToType(gen.GetRuneEntriesByRuneIdsRow(runeEntryModel))
if err != nil {
errs = append(errs, errors.Wrapf(err, "failed to parse rune entry model index %d", i))
continue
}
runeEntries[runeEntry.RuneId] = &runeEntry
}
if len(errs) > 0 {
return nil, errors.Join(errs...)
}
return runeEntries, nil
}
func (r *Repository) CountRuneEntries(ctx context.Context) (uint64, error) {
count, err := r.queries.CountRuneEntries(ctx)
if err != nil {
return 0, errors.Wrap(err, "error during query")
}
return uint64(count), nil
}
func (r *Repository) GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[runes.RuneId]*entity.Balance, error) {
balances, err := r.queries.GetBalancesByPkScript(ctx, gen.GetBalancesByPkScriptParams{
Pkscript: hex.EncodeToString(pkScript),
BlockHeight: int32(blockHeight),
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
result := make(map[runes.RuneId]*entity.Balance, len(balances))
for _, balanceModel := range balances {
balance, err := mapBalanceModelToType(gen.RunesBalance(balanceModel))
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
result[balance.RuneId] = balance
}
return result, nil
}
func (r *Repository) GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64) ([]*entity.Balance, error) {
balances, err := r.queries.GetBalancesByRuneId(ctx, gen.GetBalancesByRuneIdParams{
RuneID: runeId.String(),
BlockHeight: int32(blockHeight),
})
if err != nil {
return nil, errors.Wrap(err, "error during query")
}
result := make([]*entity.Balance, 0, len(balances))
for _, balanceModel := range balances {
balance, err := mapBalanceModelToType(gen.RunesBalance(balanceModel))
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
result = append(result, balance)
}
return result, nil
}
func (r *Repository) GetBalanceByPkScriptAndRuneId(ctx context.Context, pkScript []byte, runeId runes.RuneId, blockHeight uint64) (*entity.Balance, error) {
balance, err := r.queries.GetBalanceByPkScriptAndRuneId(ctx, gen.GetBalanceByPkScriptAndRuneIdParams{
Pkscript: hex.EncodeToString(pkScript),
RuneID: runeId.String(),
BlockHeight: int32(blockHeight),
})
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, errors.WithStack(errs.NotFound)
}
return nil, errors.Wrap(err, "error during query")
}
result, err := mapBalanceModelToType(balance)
if err != nil {
return nil, errors.Wrap(err, "failed to parse balance model")
}
return result, nil
}
func (r *Repository) CreateRuneTransaction(ctx context.Context, tx *entity.RuneTransaction) error {
if tx == nil {
return nil
}
txParams, runestoneParams, err := mapRuneTransactionTypeToParams(*tx)
if err != nil {
return errors.Wrap(err, "failed to map rune transaction to params")
}
if err = r.queries.CreateRuneTransaction(ctx, txParams); err != nil {
return errors.Wrap(err, "error during exec CreateRuneTransaction")
}
if runestoneParams != nil {
if err = r.queries.CreateRunestone(ctx, *runestoneParams); err != nil {
return errors.Wrap(err, "error during exec CreateRunestone")
}
}
return nil
}
func (r *Repository) CreateRuneEntry(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error {
if entry == nil {
return nil
}
createParams, _, err := mapRuneEntryTypeToParams(*entry, blockHeight)
if err != nil {
return errors.Wrap(err, "failed to map rune entry to params")
}
if err = r.queries.CreateRuneEntry(ctx, createParams); err != nil {
return errors.Wrap(err, "error during exec CreateRuneEntry")
}
return nil
}
func (r *Repository) CreateRuneEntryState(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error {
if entry == nil {
return nil
}
_, createStateParams, err := mapRuneEntryTypeToParams(*entry, blockHeight)
if err != nil {
return errors.Wrap(err, "failed to map rune entry to params")
}
if err = r.queries.CreateRuneEntryState(ctx, createStateParams); err != nil {
return errors.Wrap(err, "error during exec CreateRuneEntryState")
}
return nil
}
func (r *Repository) CreateOutPointBalances(ctx context.Context, outPointBalances []*entity.OutPointBalance) error {
params := make([]gen.CreateOutPointBalancesParams, 0, len(outPointBalances))
for _, balance := range outPointBalances {
param, err := mapOutPointBalanceTypeToParams(*balance)
if err != nil {
return errors.Wrap(err, "failed to map outpoint balance to params")
}
params = append(params, param)
}
result := r.queries.CreateOutPointBalances(ctx, params)
var execErrors []error
result.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) SpendOutPointBalances(ctx context.Context, outPoint wire.OutPoint, blockHeight uint64) error {
if err := r.queries.SpendOutPointBalances(ctx, gen.SpendOutPointBalancesParams{
TxHash: outPoint.Hash.String(),
TxIdx: int32(outPoint.Index),
SpentHeight: pgtype.Int4{Int32: int32(blockHeight), Valid: true},
}); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) CreateRuneBalances(ctx context.Context, params []datagateway.CreateRuneBalancesParams) error {
insertParams := make([]gen.CreateRuneBalanceAtBlockParams, 0, len(params))
for _, param := range params {
param := param
amount, err := numericFromUint128(&param.Balance)
if err != nil {
return errors.Wrap(err, "failed to convert balance to numeric")
}
insertParams = append(insertParams, gen.CreateRuneBalanceAtBlockParams{
Pkscript: hex.EncodeToString(param.PkScript),
BlockHeight: int32(param.BlockHeight),
RuneID: param.RuneId.String(),
Amount: amount,
})
}
result := r.queries.CreateRuneBalanceAtBlock(ctx, insertParams)
var execErrors []error
result.Exec(func(i int, err error) {
if err != nil {
execErrors = append(execErrors, err)
}
})
if len(execErrors) > 0 {
return errors.Wrap(errors.Join(execErrors...), "error during exec")
}
return nil
}
func (r *Repository) CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error {
if block == nil {
return nil
}
params, err := mapIndexedBlockTypeToParams(*block)
if err != nil {
return errors.Wrap(err, "failed to map indexed block to params")
}
if err = r.queries.CreateIndexedBlock(ctx, params); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteIndexedBlockSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteIndexedBlockSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteRuneEntriesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteRuneEntriesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteRuneEntryStatesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteRuneEntryStatesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteRuneTransactionsSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteRuneTransactionsSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteRunestonesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteRunestonesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteOutPointBalancesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteOutPointBalancesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) UnspendOutPointBalancesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.UnspendOutPointBalancesSinceHeight(ctx, pgtype.Int4{Int32: int32(height), Valid: true}); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}
func (r *Repository) DeleteRuneBalancesSinceHeight(ctx context.Context, height uint64) error {
if err := r.queries.DeleteRuneBalancesSinceHeight(ctx, int32(height)); err != nil {
return errors.Wrap(err, "error during exec")
}
return nil
}

View File

@@ -0,0 +1,62 @@
package postgres
import (
"context"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/modules/runes/datagateway"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/jackc/pgx/v5"
)
var ErrTxAlreadyExists = errors.New("Transaction already exists. Call Commit() or Rollback() first.")
func (r *Repository) begin(ctx context.Context) (*Repository, error) {
if r.tx != nil {
return nil, errors.WithStack(ErrTxAlreadyExists)
}
tx, err := r.db.Begin(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to begin transaction")
}
return &Repository{
db: r.db,
queries: r.queries.WithTx(tx),
tx: tx,
}, nil
}
func (r *Repository) BeginRunesTx(ctx context.Context) (datagateway.RunesDataGatewayWithTx, error) {
repo, err := r.begin(ctx)
if err != nil {
return nil, errors.WithStack(err)
}
return repo, nil
}
func (r *Repository) Commit(ctx context.Context) error {
if r.tx == nil {
return nil
}
err := r.tx.Commit(ctx)
if err != nil {
return errors.Wrap(err, "failed to commit transaction")
}
r.tx = nil
return nil
}
func (r *Repository) Rollback(ctx context.Context) error {
if r.tx == nil {
return nil
}
err := r.tx.Rollback(ctx)
if err != nil && !errors.Is(err, pgx.ErrTxClosed) {
return errors.Wrap(err, "failed to rollback transaction")
}
if err == nil {
logger.DebugContext(ctx, "rolled back transaction")
}
r.tx = nil
return nil
}

93
modules/runes/runes.go Normal file
View File

@@ -0,0 +1,93 @@
package runes
import (
"context"
"strings"
"github.com/btcsuite/btcd/rpcclient"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/datasources"
"github.com/gaze-network/indexer-network/core/indexer"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/internal/config"
"github.com/gaze-network/indexer-network/internal/postgres"
runesapi "github.com/gaze-network/indexer-network/modules/runes/api"
runesdatagateway "github.com/gaze-network/indexer-network/modules/runes/datagateway"
runespostgres "github.com/gaze-network/indexer-network/modules/runes/repository/postgres"
runesusecase "github.com/gaze-network/indexer-network/modules/runes/usecase"
"github.com/gaze-network/indexer-network/pkg/btcclient"
"github.com/gaze-network/indexer-network/pkg/logger"
"github.com/gaze-network/indexer-network/pkg/reportingclient"
"github.com/gofiber/fiber/v2"
"github.com/samber/do/v2"
"github.com/samber/lo"
)
func New(injector do.Injector) (indexer.IndexerWorker, error) {
ctx := do.MustInvoke[context.Context](injector)
conf := do.MustInvoke[config.Config](injector)
reportingClient := do.MustInvoke[*reportingclient.ReportingClient](injector)
var (
runesDg runesdatagateway.RunesDataGateway
indexerInfoDg runesdatagateway.IndexerInfoDataGateway
)
var cleanupFuncs []func(context.Context) error
switch strings.ToLower(conf.Modules.Runes.Database) {
case "postgresql", "postgres", "pg":
pg, err := postgres.NewPool(ctx, conf.Modules.Runes.Postgres)
if err != nil {
if errors.Is(err, errs.InvalidArgument) {
return nil, errors.Wrap(err, "Invalid Postgres configuration for indexer")
}
return nil, errors.Wrap(err, "can't create Postgres connection pool")
}
cleanupFuncs = append(cleanupFuncs, func(ctx context.Context) error {
pg.Close()
return nil
})
runesRepo := runespostgres.NewRepository(pg)
runesDg = runesRepo
indexerInfoDg = runesRepo
default:
return nil, errors.Wrapf(errs.Unsupported, "%q database for indexer is not supported", conf.Modules.Runes.Database)
}
var bitcoinDatasource datasources.Datasource[*types.Block]
var bitcoinClient btcclient.Contract
switch strings.ToLower(conf.Modules.Runes.Datasource) {
case "bitcoin-node":
btcClient := do.MustInvoke[*rpcclient.Client](injector)
bitcoinNodeDatasource := datasources.NewBitcoinNode(btcClient)
bitcoinDatasource = bitcoinNodeDatasource
bitcoinClient = bitcoinNodeDatasource
default:
return nil, errors.Wrapf(errs.Unsupported, "%q datasource is not supported", conf.Modules.Runes.Datasource)
}
processor := NewProcessor(runesDg, indexerInfoDg, bitcoinClient, conf.Network, reportingClient, cleanupFuncs)
if err := processor.VerifyStates(ctx); err != nil {
return nil, errors.WithStack(err)
}
// Mount API
apiHandlers := lo.Uniq(conf.Modules.Runes.APIHandlers)
for _, handler := range apiHandlers {
switch handler { // TODO: support more handlers (e.g. gRPC)
case "http":
httpServer := do.MustInvoke[*fiber.App](injector)
runesUsecase := runesusecase.New(runesDg, bitcoinClient)
runesHTTPHandler := runesapi.NewHTTPHandler(conf.Network, runesUsecase)
if err := runesHTTPHandler.Mount(httpServer); err != nil {
return nil, errors.Wrap(err, "can't mount Runes API")
}
logger.InfoContext(ctx, "Mounted HTTP handler")
default:
return nil, errors.Wrapf(errs.Unsupported, "%q API handler is not supported", handler)
}
}
indexer := indexer.New(processor, bitcoinDatasource)
return indexer, nil
}

View File

@@ -0,0 +1,11 @@
package runes
import (
"github.com/gaze-network/uint128"
)
type Edict struct {
Amount uint128.Uint128
Id RuneId
Output int
}

View File

@@ -0,0 +1,64 @@
package runes
import (
"github.com/Cleverse/go-utilities/utils"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
type Terms struct {
// Amount of the rune to be minted per transaction
Amount *uint128.Uint128
// Number of allowed mints
Cap *uint128.Uint128
// Block height at which the rune can start being minted. If both HeightStart and OffsetStart are set, use the higher value.
HeightStart *uint64
// Block height at which the rune can no longer be minted. If both HeightEnd and OffsetEnd are set, use the lower value.
HeightEnd *uint64
// Offset from etched block at which the rune can start being minted. If both HeightStart and OffsetStart are set, use the higher value.
OffsetStart *uint64
// Offset from etched block at which the rune can no longer be minted. If both HeightEnd and OffsetEnd are set, use the lower value.
OffsetEnd *uint64
}
type Etching struct {
// Number of decimals when displaying the rune
Divisibility *uint8
// Number of runes to be minted during etching
Premine *uint128.Uint128
// Rune name
Rune *Rune
// Bitmap of spacers to be displayed between each letter of the rune name
Spacers *uint32
// Single Unicode codepoint to represent the rune
Symbol *rune
// Minting terms. If not provided, the rune is not mintable.
Terms *Terms
// Whether to opt-in to future protocol changes, whatever they may be
Turbo bool
}
const (
maxDivisibility uint8 = 38
maxSpacers uint32 = 0b00000111_11111111_11111111_11111111
)
func (e Etching) Supply() (uint128.Uint128, error) {
terms := utils.Default(e.Terms, &Terms{})
amount := lo.FromPtr(terms.Amount)
cap := lo.FromPtr(terms.Cap)
premine := lo.FromPtr(e.Premine)
result, overflow := amount.MulOverflow(cap)
if overflow {
return uint128.Uint128{}, errors.WithStack(errs.OverflowUint128)
}
result, overflow = result.AddOverflow(premine)
if overflow {
return uint128.Uint128{}, errors.WithStack(errs.OverflowUint128)
}
return result, nil
}

View File

@@ -0,0 +1,123 @@
package runes
import (
"fmt"
"strings"
"testing"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
)
func TestMaxSpacers(t *testing.T) {
maxRune := Rune(uint128.Max)
var sb strings.Builder
for i, c := range maxRune.String() {
if i > 0 {
sb.WriteRune('•')
}
sb.WriteRune(c)
}
spacedRune, err := NewSpacedRuneFromString(sb.String())
assert.NoError(t, err)
assert.Equal(t, maxSpacers, spacedRune.Spacers)
}
func TestSupply(t *testing.T) {
testNumber := 0
test := func(e Etching, expectedSupply uint128.Uint128) {
t.Run(fmt.Sprintf("case_%d", testNumber), func(t *testing.T) {
t.Parallel()
actualSupply, err := e.Supply()
assert.NoError(t, err)
assert.Equal(t, expectedSupply, actualSupply)
})
testNumber++
}
testError := func(e Etching, expectedError error) {
t.Run(fmt.Sprintf("case_%d", testNumber), func(t *testing.T) {
t.Parallel()
_, err := e.Supply()
assert.ErrorIs(t, err, expectedError)
})
testNumber++
}
test(Etching{}, uint128.From64(0))
test(Etching{
Premine: lo.ToPtr(uint128.From64(0)),
Terms: nil,
}, uint128.From64(0))
test(Etching{
Premine: lo.ToPtr(uint128.From64(1)),
Terms: nil,
}, uint128.From64(1))
test(Etching{
Premine: lo.ToPtr(uint128.From64(1)),
Terms: &Terms{
Amount: lo.ToPtr(uint128.From64(0)),
Cap: lo.ToPtr(uint128.From64(0)),
},
}, uint128.From64(1))
test(Etching{
Premine: lo.ToPtr(uint128.From64(1000)),
Terms: &Terms{
Amount: lo.ToPtr(uint128.From64(100)),
Cap: lo.ToPtr(uint128.From64(10)),
},
}, uint128.From64(2000))
test(Etching{
Premine: lo.ToPtr(uint128.From64(0)),
Terms: &Terms{
Amount: lo.ToPtr(uint128.From64(100)),
Cap: lo.ToPtr(uint128.From64(10)),
},
}, uint128.From64(1000))
test(Etching{
Premine: lo.ToPtr(uint128.From64(1000)),
Terms: &Terms{
Amount: lo.ToPtr(uint128.From64(100)),
Cap: lo.ToPtr(uint128.From64(0)),
},
}, uint128.From64(1000))
test(Etching{
Premine: lo.ToPtr(uint128.From64(1000)),
Terms: &Terms{
Amount: lo.ToPtr(uint128.From64(0)),
Cap: lo.ToPtr(uint128.From64(10)),
},
}, uint128.From64(1000))
test(Etching{
Premine: lo.ToPtr(uint128.Max.Div64(2).Add64(1)),
Terms: &Terms{
Amount: lo.ToPtr(uint128.From64(1)),
Cap: lo.ToPtr(uint128.Max.Div64(2)),
},
}, uint128.Max)
test(Etching{
Premine: lo.ToPtr(uint128.From64(0)),
Terms: &Terms{
Amount: lo.ToPtr(uint128.From64(1)),
Cap: lo.ToPtr(uint128.Max),
},
}, uint128.Max)
testError(Etching{
Premine: lo.ToPtr(uint128.Max),
Terms: &Terms{
Amount: lo.ToPtr(uint128.From64(1)),
Cap: lo.ToPtr(uint128.From64(1)),
},
}, errs.OverflowUint128)
}

View File

@@ -0,0 +1,77 @@
package runes
import (
"math/big"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/uint128"
)
// Flag represents a single flag that can be set on a runestone.
type Flag uint8
const (
FlagEtching = Flag(0)
FlagTerms = Flag(1)
FlagTurbo = Flag(2)
FlagCenotaph = Flag(127)
)
func (f Flag) Mask() Flags {
return Flags(uint128.From64(1).Lsh(uint(f)))
}
// Flags is a bitmask of flags that can be set on a runestone.
type Flags uint128.Uint128
func (f Flags) Uint128() uint128.Uint128 {
return uint128.Uint128(f)
}
func (f Flags) And(other Flags) Flags {
return Flags(f.Uint128().And(other.Uint128()))
}
func (f Flags) Or(other Flags) Flags {
return Flags(f.Uint128().Or(other.Uint128()))
}
func ParseFlags(input interface{}) (Flags, error) {
switch input := input.(type) {
case Flags:
return input, nil
case uint128.Uint128:
return Flags(input), nil
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
return Flags(uint128.From64(input.(uint64))), nil
case big.Int:
u128, err := uint128.FromBig(&input)
if err != nil {
return Flags{}, errors.Join(err, errs.OverflowUint128)
}
return Flags(u128), nil
case *big.Int:
u128, err := uint128.FromBig(input)
if err != nil {
return Flags{}, errors.Join(err, errs.OverflowUint128)
}
return Flags(u128), nil
default:
panic("invalid flags input type")
}
}
func (f *Flags) Take(flag Flag) bool {
found := !f.And(flag.Mask()).Uint128().Equals64(0)
if found {
// f = f - (1 << flag)
*f = Flags(f.Uint128().Sub(flag.Mask().Uint128()))
}
return found
}
func (f *Flags) Set(flag Flag) {
// f = f | (1 << flag)
*f = Flags(f.Uint128().Or(flag.Mask().Uint128()))
}

View File

@@ -0,0 +1,60 @@
package runes
type FlawFlag int
const (
FlawFlagEdictOutput FlawFlag = iota
FlawFlagEdictRuneId
FlawFlagInvalidScript
FlawFlagOpCode
FlawFlagSupplyOverflow
FlawFlagTrailingIntegers
FlawFlagTruncatedField
FlawFlagUnrecognizedEvenTag
FlawFlagUnrecognizedFlag
FlawFlagVarInt
)
func (f FlawFlag) Mask() Flaws {
return 1 << f
}
var flawMessages = map[FlawFlag]string{
FlawFlagEdictOutput: "edict output greater than transaction output count",
FlawFlagEdictRuneId: "invalid runeId in edict",
FlawFlagInvalidScript: "invalid script in OP_RETURN",
FlawFlagOpCode: "non-pushdata opcode in OP_RETURN",
FlawFlagSupplyOverflow: "supply overflows uint128",
FlawFlagTrailingIntegers: "trailing integers in body",
FlawFlagTruncatedField: "field with missing value",
FlawFlagUnrecognizedEvenTag: "unrecognized even tag",
FlawFlagUnrecognizedFlag: "unrecognized field",
FlawFlagVarInt: "invalid varint",
}
func (f FlawFlag) String() string {
return flawMessages[f]
}
// Flaws is a bitmask of flaws that caused a runestone to be a cenotaph.
type Flaws uint32
func (f Flaws) Collect() []FlawFlag {
var flags []FlawFlag
// collect from list of all flags
for flag := range flawMessages {
if f&flag.Mask() != 0 {
flags = append(flags, flag)
}
}
return flags
}
func (f Flaws) CollectAsString() []string {
flawFlags := f.Collect()
flawMsgs := make([]string, 0, len(flawFlags))
for _, flag := range flawFlags {
flawMsgs = append(flawMsgs, flag.String())
}
return flawMsgs
}

View File

@@ -0,0 +1,94 @@
package runes
import (
"math"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
type Message struct {
Fields Fields
Edicts []Edict
Flaws Flaws
}
type Fields map[Tag][]uint128.Uint128
func (fields Fields) Take(tag Tag) *uint128.Uint128 {
values, ok := fields[tag]
if !ok {
return nil
}
first := values[0]
values = values[1:]
if len(values) == 0 {
delete(fields, tag)
} else {
fields[tag] = values
}
return &first
}
func MessageFromIntegers(tx *types.Transaction, payload []uint128.Uint128) Message {
flaws := Flaws(0)
var edicts []Edict
fields := make(map[Tag][]uint128.Uint128)
for i := 0; i < len(payload); i += 2 {
tag, err := ParseTag(payload[i])
if err != nil {
continue
}
// If tag is Body, treat all remaining integers are edicts
if tag == TagBody {
runeId := RuneId{}
for _, chunk := range lo.Chunk(payload[i+1:], 4) {
if len(chunk) != 4 {
flaws |= FlawFlagTrailingIntegers.Mask()
break
}
blockDelta, txIndexDelta, amount, output := chunk[0], chunk[1], chunk[2], chunk[3]
if blockDelta.Cmp64(math.MaxUint64) > 0 || txIndexDelta.Cmp64(math.MaxUint32) > 0 {
flaws |= FlawFlagEdictRuneId.Mask()
break
}
if output.Cmp64(uint64(len(tx.TxOut))) > 0 {
flaws |= FlawFlagEdictOutput.Mask()
break
}
runeId, err = runeId.Next(blockDelta.Uint64(), txIndexDelta.Uint32()) // safe to cast as uint32 because we checked
if err != nil {
flaws |= FlawFlagEdictRuneId.Mask()
break
}
edict := Edict{
Id: runeId,
Amount: amount,
Output: int(output.Uint64()),
}
edicts = append(edicts, edict)
}
break
}
// append tag value to fields
if i+1 >= len(payload) {
flaws |= FlawFlagTruncatedField.Mask()
break
}
value := payload[i+1]
if _, ok := fields[tag]; !ok {
fields[tag] = make([]uint128.Uint128, 0)
}
fields[tag] = append(fields[tag], value)
}
return Message{
Flaws: flaws,
Edicts: edicts,
Fields: fields,
}
}

175
modules/runes/runes/rune.go Normal file
View File

@@ -0,0 +1,175 @@
package runes
import (
"slices"
"github.com/Cleverse/go-utilities/utils"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/uint128"
)
type Rune uint128.Uint128
func (r Rune) Uint128() uint128.Uint128 {
return uint128.Uint128(r)
}
func NewRune(value uint64) Rune {
return Rune(uint128.From64(value))
}
func NewRuneFromUint128(value uint128.Uint128) Rune {
return Rune(value)
}
var ErrInvalidBase26 = errors.New("invalid base-26 character: must be in the range [A-Z]")
// NewRuneFromString creates a new Rune from a string of modified base-26 integer
func NewRuneFromString(value string) (Rune, error) {
n := uint128.From64(0)
for i, char := range value {
if i > 0 {
n = n.Add(uint128.From64(1))
}
n = n.Mul(uint128.From64(26))
if char < 'A' || char > 'Z' {
return Rune{}, ErrInvalidBase26
}
n = n.Add(uint128.From64(uint64(char - 'A')))
}
return Rune(n), nil
}
var firstReservedRune = NewRuneFromUint128(utils.Must(uint128.FromString("6402364363415443603228541259936211926")))
var unlockSteps = []uint128.Uint128{
utils.Must(uint128.FromString("0")), // A
utils.Must(uint128.FromString("26")), // AA
utils.Must(uint128.FromString("702")), // AAA
utils.Must(uint128.FromString("18278")), // AAAA
utils.Must(uint128.FromString("475254")), // AAAAA
utils.Must(uint128.FromString("12356630")), // AAAAAA
utils.Must(uint128.FromString("321272406")), // AAAAAAA
utils.Must(uint128.FromString("8353082582")), // AAAAAAAA
utils.Must(uint128.FromString("217180147158")), // AAAAAAAAA
utils.Must(uint128.FromString("5646683826134")), // AAAAAAAAAA
utils.Must(uint128.FromString("146813779479510")), // AAAAAAAAAAA
utils.Must(uint128.FromString("3817158266467286")), // AAAAAAAAAAAA
utils.Must(uint128.FromString("99246114928149462")), // AAAAAAAAAAAAA
utils.Must(uint128.FromString("2580398988131886038")), // AAAAAAAAAAAAAA
utils.Must(uint128.FromString("67090373691429037014")), // AAAAAAAAAAAAAAA
utils.Must(uint128.FromString("1744349715977154962390")), // AAAAAAAAAAAAAAAA
utils.Must(uint128.FromString("45353092615406029022166")), // AAAAAAAAAAAAAAAAA
utils.Must(uint128.FromString("1179180408000556754576342")), // AAAAAAAAAAAAAAAAAA
utils.Must(uint128.FromString("30658690608014475618984918")), // AAAAAAAAAAAAAAAAAAA
utils.Must(uint128.FromString("797125955808376366093607894")), // AAAAAAAAAAAAAAAAAAAA
utils.Must(uint128.FromString("20725274851017785518433805270")), // AAAAAAAAAAAAAAAAAAAAA
utils.Must(uint128.FromString("538857146126462423479278937046")), // AAAAAAAAAAAAAAAAAAAAAA
utils.Must(uint128.FromString("14010285799288023010461252363222")), // AAAAAAAAAAAAAAAAAAAAAAA
utils.Must(uint128.FromString("364267430781488598271992561443798")), // AAAAAAAAAAAAAAAAAAAAAAAA
utils.Must(uint128.FromString("9470953200318703555071806597538774")), // AAAAAAAAAAAAAAAAAAAAAAAAA
utils.Must(uint128.FromString("246244783208286292431866971536008150")), // AAAAAAAAAAAAAAAAAAAAAAAAAA
utils.Must(uint128.FromString("6402364363415443603228541259936211926")), // AAAAAAAAAAAAAAAAAAAAAAAAAAA
utils.Must(uint128.FromString("166461473448801533683942072758341510102")), // AAAAAAAAAAAAAAAAAAAAAAAAAAAA
}
func (r Rune) IsReserved() bool {
return r.Uint128().Cmp(firstReservedRune.Uint128()) >= 0
}
// Commitment returns the commitment of the rune. The commitment is the little-endian encoding of the rune.
func (r Rune) Commitment() []byte {
bytes := make([]byte, 16)
r.Uint128().PutBytes(bytes)
end := len(bytes)
for end > 0 && bytes[end-1] == 0 {
end--
}
return bytes[:end]
}
// String returns the string representation of the rune in modified base-26 integer
func (r Rune) String() string {
if r.Uint128() == uint128.Max {
return "BCGDENLQRQWDSLRUGSNLBTMFIJAV"
}
chars := "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
value := r.Uint128().Add64(1)
var encoded []byte
for !value.IsZero() {
idx := value.Sub64(1).Mod64(26)
encoded = append(encoded, chars[idx])
value = value.Sub64(1).Div64(26)
}
slices.Reverse(encoded)
return string(encoded)
}
func (r Rune) Cmp(other Rune) int {
return r.Uint128().Cmp(other.Uint128())
}
func FirstRuneHeight(network common.Network) uint64 {
switch network {
case common.NetworkMainnet:
return common.HalvingInterval * 4
case common.NetworkTestnet:
return common.HalvingInterval * 12
}
panic("invalid network")
}
func MinimumRuneAtHeight(network common.Network, height uint64) Rune {
offset := height + 1
interval := common.HalvingInterval / 12
// runes are gradually unlocked from rune activation height until the next halving
start := FirstRuneHeight(network)
end := start + common.HalvingInterval
if offset < start {
return (Rune)(unlockSteps[12])
}
if offset >= end {
return (Rune)(unlockSteps[0])
}
progress := offset - start
length := 12 - progress/uint64(interval)
startRune := unlockSteps[length]
endRune := unlockSteps[length-1] // length cannot be 0 because we checked that offset < end
remainder := progress % uint64(interval)
// result = startRune - ((startRune - endRune) * remainder / interval)
result := startRune.Sub(startRune.Sub(endRune).Mul64(remainder).Div64(uint64(interval)))
return Rune(result)
}
func GetReservedRune(blockHeight uint64, txIndex uint32) Rune {
// firstReservedRune + ((blockHeight << 32) | txIndex)
delta := uint128.From64(blockHeight).Lsh(32).Or64(uint64(txIndex))
return Rune(firstReservedRune.Uint128().Add(delta))
}
// MarshalJSON implements json.Marshaler
func (r Rune) MarshalJSON() ([]byte, error) {
return []byte(`"` + r.String() + `"`), nil
}
// UnmarshalJSON implements json.Unmarshaler
func (r *Rune) UnmarshalJSON(data []byte) error {
// data must be quoted
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("must be string")
}
data = data[1 : len(data)-1]
parsed, err := NewRuneFromString(string(data))
if err != nil {
return errors.WithStack(err)
}
*r = parsed
return nil
}

View File

@@ -0,0 +1,130 @@
package runes
import (
"math"
"time"
"github.com/Cleverse/go-utilities/utils"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
type RuneEntry struct {
RuneId RuneId
Number uint64
Divisibility uint8
// Premine is the amount of the rune that was premined.
Premine uint128.Uint128
SpacedRune SpacedRune
Symbol rune
// Terms is the minting terms of the rune.
Terms *Terms
Turbo bool
// Mints is the number of times that this rune has been minted.
Mints uint128.Uint128
BurnedAmount uint128.Uint128
// CompletedAt is the time when the rune was fully minted.
CompletedAt time.Time
// CompletedAtHeight is the block height when the rune was fully minted.
CompletedAtHeight *uint64
EtchingBlock uint64
EtchingTxHash chainhash.Hash
EtchedAt time.Time
}
var (
ErrUnmintable = errors.New("rune is not mintable")
ErrMintCapReached = errors.New("rune mint cap reached")
ErrMintBeforeStart = errors.New("rune minting has not started")
ErrMintAfterEnd = errors.New("rune minting has ended")
)
func (e *RuneEntry) GetMintableAmount(height uint64) (uint128.Uint128, error) {
if e.Terms == nil {
return uint128.Uint128{}, ErrUnmintable
}
if !e.IsMintStarted(height) {
return uint128.Uint128{}, ErrMintBeforeStart
}
if e.IsMintEnded(height) {
return uint128.Uint128{}, ErrMintAfterEnd
}
var cap uint128.Uint128
if e.Terms.Cap != nil {
cap = *e.Terms.Cap
}
if e.Mints.Cmp(cap) >= 0 {
return uint128.Uint128{}, ErrMintCapReached
}
var amount uint128.Uint128
if e.Terms.Amount != nil {
amount = *e.Terms.Amount
}
return amount, nil
}
func (e *RuneEntry) IsMintStarted(height uint64) bool {
if e.Terms == nil {
return false
}
var relative, absolute uint64
if e.Terms.OffsetStart != nil {
relative = e.RuneId.BlockHeight + *e.Terms.OffsetStart
}
if e.Terms.HeightStart != nil {
absolute = *e.Terms.HeightStart
}
return height >= max(relative, absolute)
}
func (e *RuneEntry) IsMintEnded(height uint64) bool {
if e.Terms == nil {
return false
}
var relative, absolute uint64 = math.MaxUint64, math.MaxUint64
if e.Terms.OffsetEnd != nil {
relative = e.RuneId.BlockHeight + *e.Terms.OffsetEnd
}
if e.Terms.HeightEnd != nil {
absolute = *e.Terms.HeightEnd
}
return height >= min(relative, absolute)
}
func (e RuneEntry) Supply() (uint128.Uint128, error) {
terms := utils.Default(e.Terms, &Terms{})
amount := lo.FromPtr(terms.Amount)
cap := lo.FromPtr(terms.Cap)
premine := e.Premine
result, overflow := amount.MulOverflow(cap)
if overflow {
return uint128.Uint128{}, errors.WithStack(errs.OverflowUint128)
}
result, overflow = result.AddOverflow(premine)
if overflow {
return uint128.Uint128{}, errors.WithStack(errs.OverflowUint128)
}
return result, nil
}
func (e RuneEntry) MintedAmount() (uint128.Uint128, error) {
terms := lo.FromPtr(e.Terms)
amount, overflow := e.Mints.MulOverflow(lo.FromPtr(terms.Amount))
if overflow {
return uint128.Uint128{}, errors.WithStack(errs.OverflowUint128)
}
amount, overflow = amount.AddOverflow(e.Premine)
if overflow {
return uint128.Uint128{}, errors.WithStack(errs.OverflowUint128)
}
return amount, nil
}

View File

@@ -0,0 +1,119 @@
package runes
import (
"fmt"
"math"
"strconv"
"strings"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
)
type RuneId struct {
BlockHeight uint64
TxIndex uint32
}
var ErrRuneIdZeroBlockNonZeroTxIndex = errors.New("rune id cannot be zero block height and non-zero tx index")
func NewRuneId(blockHeight uint64, txIndex uint32) (RuneId, error) {
if blockHeight == 0 && txIndex != 0 {
return RuneId{}, errors.WithStack(ErrRuneIdZeroBlockNonZeroTxIndex)
}
return RuneId{
BlockHeight: blockHeight,
TxIndex: txIndex,
}, nil
}
var (
ErrInvalidSeparator = errors.New("invalid rune id: must contain exactly one separator")
ErrCannotParseBlockHeight = errors.New("invalid rune id: cannot parse block height")
ErrCannotParseTxIndex = errors.New("invalid rune id: cannot parse tx index")
)
func NewRuneIdFromString(str string) (RuneId, error) {
strs := strings.Split(str, ":")
if len(strs) != 2 {
return RuneId{}, ErrInvalidSeparator
}
blockHeightStr, txIndexStr := strs[0], strs[1]
blockHeight, err := strconv.ParseUint(blockHeightStr, 10, 64)
if err != nil {
return RuneId{}, errors.WithStack(errors.Join(err, ErrCannotParseBlockHeight))
}
txIndex, err := strconv.ParseUint(txIndexStr, 10, 32)
if err != nil {
return RuneId{}, errors.WithStack(errors.Join(err, ErrCannotParseTxIndex))
}
return RuneId{
BlockHeight: blockHeight,
TxIndex: uint32(txIndex),
}, nil
}
func (r RuneId) String() string {
return fmt.Sprintf("%d:%d", r.BlockHeight, r.TxIndex)
}
// Cmp compares two RuneIds. It returns -1 if r is less than other, 0 if r is equal to other, and 1 if r is greater than other.
// RuneIds are compared first by block height and then by tx index in ascending order.
func (r RuneId) Cmp(other RuneId) int {
if r.BlockHeight != other.BlockHeight {
return int(r.BlockHeight - other.BlockHeight)
}
return int(r.TxIndex - other.TxIndex)
}
// Delta calculates the delta encoding between two RuneIds. If the two RuneIds are in the same block, then the block delta is 0 and the tx index delta is the difference between the two tx indices.
// If the two RuneIds are in different blocks, then the block delta is the difference between the two block indices and the tx index delta is the tx index in the other block.
func (r RuneId) Delta(next RuneId) (uint64, uint32) {
blockDelta := next.BlockHeight - r.BlockHeight
// if the block is the same, then tx index is the difference between the two
if blockDelta == 0 {
return 0, next.TxIndex - r.TxIndex
}
// otherwise, tx index is the tx index in the next block
return blockDelta, next.TxIndex
}
// Next calculates the next RuneId given a block delta and tx index delta.
func (r RuneId) Next(blockDelta uint64, txIndexDelta uint32) (RuneId, error) {
if blockDelta == 0 {
if math.MaxUint32-r.TxIndex < txIndexDelta {
return RuneId{}, errs.OverflowUint32
}
return NewRuneId(
r.BlockHeight,
r.TxIndex+txIndexDelta,
)
}
if math.MaxUint64-r.BlockHeight < blockDelta {
return RuneId{}, errs.OverflowUint64
}
return NewRuneId(
r.BlockHeight+blockDelta,
txIndexDelta,
)
}
// MarshalJSON implements json.Marshaler
func (r RuneId) MarshalJSON() ([]byte, error) {
return []byte(`"` + r.String() + `"`), nil
}
// UnmarshalJSON implements json.Unmarshaler
func (r *RuneId) UnmarshalJSON(data []byte) error {
// data must be quoted
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("must be string")
}
data = data[1 : len(data)-1]
parsed, err := NewRuneIdFromString(string(data))
if err != nil {
return errors.WithStack(err)
}
*r = parsed
return nil
}

View File

@@ -0,0 +1,108 @@
package runes
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewRuneIdFromString(t *testing.T) {
type testcase struct {
name string
input string
expectedOutput RuneId
shouldError bool
}
// TODO: test error instance match expected errors
testcases := []testcase{
{
name: "valid rune id",
input: "1:2",
expectedOutput: RuneId{
BlockHeight: 1,
TxIndex: 2,
},
shouldError: false,
},
{
name: "too many separators",
input: "1:2:3",
expectedOutput: RuneId{},
shouldError: true,
},
{
name: "too few separators",
input: "1",
expectedOutput: RuneId{},
shouldError: true,
},
{
name: "invalid tx index",
input: "1:a",
expectedOutput: RuneId{},
shouldError: true,
},
{
name: "invalid block",
input: "a:1",
expectedOutput: RuneId{},
shouldError: true,
},
{
name: "empty tx index",
input: "1:",
expectedOutput: RuneId{},
shouldError: true,
},
{
name: "empty block",
input: ":1",
expectedOutput: RuneId{},
shouldError: true,
},
{
name: "empty block and tx index",
input: ":",
expectedOutput: RuneId{},
shouldError: true,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
runeId, err := NewRuneIdFromString(tc.input)
if tc.shouldError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tc.expectedOutput, runeId)
}
})
}
}
func TestRuneIdMarshal(t *testing.T) {
runeId := RuneId{
BlockHeight: 1,
TxIndex: 2,
}
bytes, err := runeId.MarshalJSON()
assert.NoError(t, err)
assert.Equal(t, []byte(`"1:2"`), bytes)
}
func TestRuneIdUnmarshal(t *testing.T) {
str := `"1:2"`
var runeId RuneId
err := runeId.UnmarshalJSON([]byte(str))
assert.NoError(t, err)
assert.Equal(t, RuneId{
BlockHeight: 1,
TxIndex: 2,
}, runeId)
str = `1`
err = runeId.UnmarshalJSON([]byte(str))
assert.Error(t, err)
}

View File

@@ -0,0 +1,272 @@
package runes
import (
"fmt"
"math"
"strings"
"testing"
"github.com/Cleverse/go-utilities/utils"
"github.com/gaze-network/indexer-network/common"
"github.com/gaze-network/uint128"
"github.com/stretchr/testify/assert"
)
func TestRuneString(t *testing.T) {
test := func(rune Rune, encoded string) {
t.Run(encoded, func(t *testing.T) {
t.Parallel()
actualEncoded := rune.String()
assert.Equal(t, encoded, actualEncoded)
actualRune, err := NewRuneFromString(encoded)
assert.NoError(t, err)
assert.Equal(t, rune, actualRune)
})
}
test(NewRune(0), "A")
test(NewRune(1), "B")
test(NewRune(2), "C")
test(NewRune(3), "D")
test(NewRune(4), "E")
test(NewRune(5), "F")
test(NewRune(6), "G")
test(NewRune(7), "H")
test(NewRune(8), "I")
test(NewRune(9), "J")
test(NewRune(10), "K")
test(NewRune(11), "L")
test(NewRune(12), "M")
test(NewRune(13), "N")
test(NewRune(14), "O")
test(NewRune(15), "P")
test(NewRune(16), "Q")
test(NewRune(17), "R")
test(NewRune(18), "S")
test(NewRune(19), "T")
test(NewRune(20), "U")
test(NewRune(21), "V")
test(NewRune(22), "W")
test(NewRune(23), "X")
test(NewRune(24), "Y")
test(NewRune(25), "Z")
test(NewRune(26), "AA")
test(NewRune(27), "AB")
test(NewRune(51), "AZ")
test(NewRune(52), "BA")
test(NewRune(53), "BB")
test(NewRuneFromUint128(utils.Must(uint128.FromString("2055900680524219742"))), "UNCOMMONGOODS")
test(NewRuneFromUint128(uint128.Max.Sub64(2)), "BCGDENLQRQWDSLRUGSNLBTMFIJAT")
test(NewRuneFromUint128(uint128.Max.Sub64(1)), "BCGDENLQRQWDSLRUGSNLBTMFIJAU")
test(NewRuneFromUint128(uint128.Max), "BCGDENLQRQWDSLRUGSNLBTMFIJAV")
}
func TestNewRuneFromBase26Error(t *testing.T) {
_, err := NewRuneFromString("?")
assert.ErrorIs(t, err, ErrInvalidBase26)
}
func TestFirstRuneHeight(t *testing.T) {
test := func(network common.Network, expected uint64) {
t.Run(network.String(), func(t *testing.T) {
t.Parallel()
actual := FirstRuneHeight(network)
assert.Equal(t, expected, actual)
})
}
test(common.NetworkMainnet, 840_000)
test(common.NetworkTestnet, 2_520_000)
}
func TestMinimumRuneAtHeightMainnet(t *testing.T) {
test := func(height uint64, encoded string) {
t.Run(fmt.Sprintf("%d", height), func(t *testing.T) {
t.Parallel()
rune, err := NewRuneFromString(encoded)
assert.NoError(t, err)
actual := MinimumRuneAtHeight(common.NetworkMainnet, height)
assert.Equal(t, rune, actual)
})
}
start := FirstRuneHeight(common.NetworkMainnet)
end := start + common.HalvingInterval
interval := uint64(common.HalvingInterval / 12)
test(0, "AAAAAAAAAAAAA")
test(start/2, "AAAAAAAAAAAAA")
test(start, "ZZYZXBRKWXVA")
test(start+1, "ZZXZUDIVTVQA")
test(end-1, "A")
test(end, "A")
test(end+1, "A")
test(math.MaxUint32, "A")
test(start+interval*0-1, "AAAAAAAAAAAAA")
test(start+interval*0, "ZZYZXBRKWXVA")
test(start+interval*0+1, "ZZXZUDIVTVQA")
test(start+interval*1-1, "AAAAAAAAAAAA")
test(start+interval*1, "ZZYZXBRKWXV")
test(start+interval*1+1, "ZZXZUDIVTVQ")
test(start+interval*2-1, "AAAAAAAAAAA")
test(start+interval*2, "ZZYZXBRKWY")
test(start+interval*2+1, "ZZXZUDIVTW")
test(start+interval*3-1, "AAAAAAAAAA")
test(start+interval*3, "ZZYZXBRKX")
test(start+interval*3+1, "ZZXZUDIVU")
test(start+interval*4-1, "AAAAAAAAA")
test(start+interval*4, "ZZYZXBRL")
test(start+interval*4+1, "ZZXZUDIW")
test(start+interval*5-1, "AAAAAAAA")
test(start+interval*5, "ZZYZXBS")
test(start+interval*5+1, "ZZXZUDJ")
test(start+interval*6-1, "AAAAAAA")
test(start+interval*6, "ZZYZXC")
test(start+interval*6+1, "ZZXZUE")
test(start+interval*7-1, "AAAAAA")
test(start+interval*7, "ZZYZY")
test(start+interval*7+1, "ZZXZV")
test(start+interval*8-1, "AAAAA")
test(start+interval*8, "ZZZA")
test(start+interval*8+1, "ZZYA")
test(start+interval*9-1, "AAAA")
test(start+interval*9, "ZZZ")
test(start+interval*9+1, "ZZY")
test(start+interval*10-2, "AAC")
test(start+interval*10-1, "AAA")
test(start+interval*10, "AAA")
test(start+interval*10+1, "AAA")
test(start+interval*10+interval/2, "NA")
test(start+interval*11-2, "AB")
test(start+interval*11-1, "AA")
test(start+interval*11, "AA")
test(start+interval*11+1, "AA")
test(start+interval*11+interval/2, "N")
test(start+interval*12-2, "B")
test(start+interval*12-1, "A")
test(start+interval*12, "A")
test(start+interval*12+1, "A")
}
func TestMinimumRuneAtHeightTestnet(t *testing.T) {
test := func(height uint64, runeStr string) {
t.Run(fmt.Sprintf("%d", height), func(t *testing.T) {
t.Parallel()
rune, err := NewRuneFromString(runeStr)
assert.NoError(t, err)
actual := MinimumRuneAtHeight(common.NetworkTestnet, height)
assert.Equal(t, rune, actual)
})
}
start := FirstRuneHeight(common.NetworkTestnet)
test(start-1, "AAAAAAAAAAAAA")
test(start, "ZZYZXBRKWXVA")
test(start+1, "ZZXZUDIVTVQA")
}
func TestIsReserved(t *testing.T) {
test := func(runeStr string, expected bool) {
t.Run(runeStr, func(t *testing.T) {
t.Parallel()
rune, err := NewRuneFromString(runeStr)
assert.NoError(t, err)
actual := rune.IsReserved()
assert.Equal(t, expected, actual)
})
}
test("A", false)
test("B", false)
test("ZZZZZZZZZZZZZZZZZZZZZZZZZZ", false)
test("AAAAAAAAAAAAAAAAAAAAAAAAAAA", true)
test("AAAAAAAAAAAAAAAAAAAAAAAAAAB", true)
test("BCGDENLQRQWDSLRUGSNLBTMFIJAV", true)
}
func TestGetReservedRune(t *testing.T) {
test := func(blockHeight uint64, txIndex uint32, expected Rune) {
t.Run(fmt.Sprintf("blockHeight_%d_txIndex_%d", blockHeight, txIndex), func(t *testing.T) {
t.Parallel()
rune := GetReservedRune(blockHeight, txIndex)
assert.Equal(t, expected.String(), rune.String())
})
}
test(0, 0, firstReservedRune)
test(0, 1, Rune(firstReservedRune.Uint128().Add(uint128.From64(1))))
test(0, 2, Rune(firstReservedRune.Uint128().Add(uint128.From64(2))))
test(1, 0, Rune(firstReservedRune.Uint128().Add(uint128.From64(1).Lsh(32))))
test(1, 1, Rune(firstReservedRune.Uint128().Add(uint128.From64(1).Lsh(32).Add(uint128.From64(1)))))
test(1, 2, Rune(firstReservedRune.Uint128().Add(uint128.From64(1).Lsh(32).Add(uint128.From64(2)))))
test(2, 0, Rune(firstReservedRune.Uint128().Add(uint128.From64(2).Lsh(32))))
test(2, 1, Rune(firstReservedRune.Uint128().Add(uint128.From64(2).Lsh(32).Add(uint128.From64(1)))))
test(2, 2, Rune(firstReservedRune.Uint128().Add(uint128.From64(2).Lsh(32).Add(uint128.From64(2)))))
test(math.MaxUint64, math.MaxUint32, Rune(firstReservedRune.Uint128().Add(uint128.From64(math.MaxUint64).Lsh(32).Add(uint128.From64(math.MaxUint32)))))
}
func TestUnlockSteps(t *testing.T) {
for i := 0; i < len(unlockSteps); i++ {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Parallel()
encoded := Rune(unlockSteps[i]).String()
expected := strings.Repeat("A", i+1)
assert.Equal(t, expected, encoded)
})
}
}
func TestCommitment(t *testing.T) {
test := func(rune Rune, expected []byte) {
t.Run(rune.String(), func(t *testing.T) {
t.Parallel()
actual := rune.Commitment()
assert.Equal(t, expected, actual)
})
}
test(NewRune(0), []byte{})
test(NewRune(1), []byte{1})
test(NewRune(2), []byte{2})
test(NewRune(255), []byte{255})
test(NewRune(256), []byte{0, 1})
test(NewRune(257), []byte{1, 1})
test(NewRune(65535), []byte{255, 255})
test(NewRune(65536), []byte{0, 0, 1})
}
func TestRuneMarshal(t *testing.T) {
rune := NewRune(5)
bytes, err := rune.MarshalJSON()
assert.NoError(t, err)
assert.Equal(t, []byte(`"F"`), bytes)
}
func TestRuneUnmarshal(t *testing.T) {
str := `"F"`
var rune Rune
err := rune.UnmarshalJSON([]byte(str))
assert.NoError(t, err)
assert.Equal(t, NewRune(5), rune)
str = `1`
err = rune.UnmarshalJSON([]byte(str))
assert.Error(t, err)
}

View File

@@ -0,0 +1,389 @@
package runes
import (
"fmt"
"log"
"slices"
"unicode/utf8"
"github.com/btcsuite/btcd/txscript"
"github.com/cockroachdb/errors"
"github.com/gaze-network/indexer-network/common/errs"
"github.com/gaze-network/indexer-network/core/types"
"github.com/gaze-network/indexer-network/pkg/leb128"
"github.com/gaze-network/uint128"
"github.com/samber/lo"
)
const (
RUNESTONE_PAYLOAD_MAGIC_NUMBER = txscript.OP_13
RUNE_COMMIT_BLOCKS = 6
)
type Runestone struct {
// Rune to etch in this transaction
Etching *Etching
// The rune ID of the runestone to mint in this transaction
Mint *RuneId
// Denotes the transaction output to allocate leftover runes to. If nil, use the first non-OP_RETURN output. If target output is OP_RETURN, those runes are burned.
Pointer *uint64
// List of edicts to execute in this transaction
Edicts []Edict
// If true, the runestone is a cenotaph. All minted runes in a cenotaph are burned. Runes etched in a cenotaph are not mintable.
Cenotaph bool
// Bitmask of flaws that caused the runestone to be a cenotaph
Flaws Flaws
}
// Encipher encodes a runestone into a scriptPubKey, ready to be put into a transaction output.
func (r Runestone) Encipher() ([]byte, error) {
var payload []byte
encodeUint128 := func(value uint128.Uint128) {
payload = append(payload, leb128.EncodeUint128(value)...)
}
encodeTagValues := func(tag Tag, values ...uint128.Uint128) {
for _, value := range values {
// encode tag key
encodeUint128(tag.Uint128())
// encode tag value
encodeUint128(value)
}
}
encodeEdict := func(previousRuneId RuneId, edict Edict) {
blockHeight, txIndex := previousRuneId.Delta(edict.Id)
encodeUint128(uint128.From64(blockHeight))
encodeUint128(uint128.From64(uint64(txIndex)))
encodeUint128(edict.Amount)
encodeUint128(uint128.From64(uint64(edict.Output)))
}
if r.Etching != nil {
etching := r.Etching
flags := Flags(uint128.Zero)
flags.Set(FlagEtching)
if etching.Terms != nil {
flags.Set(FlagTerms)
}
if etching.Turbo {
flags.Set(FlagTurbo)
}
encodeTagValues(TagFlags, flags.Uint128())
if etching.Rune != nil {
encodeTagValues(TagRune, etching.Rune.Uint128())
}
if etching.Divisibility != nil {
encodeTagValues(TagDivisibility, uint128.From64(uint64(*etching.Divisibility)))
}
if etching.Spacers != nil {
encodeTagValues(TagSpacers, uint128.From64(uint64(*etching.Spacers)))
}
if etching.Symbol != nil {
encodeTagValues(TagSymbol, uint128.From64(uint64(*etching.Symbol)))
}
if etching.Premine != nil {
encodeTagValues(TagPremine, *etching.Premine)
}
if etching.Terms != nil {
terms := etching.Terms
if terms.Amount != nil {
encodeTagValues(TagAmount, *terms.Amount)
}
if terms.Cap != nil {
encodeTagValues(TagCap, *terms.Cap)
}
if terms.HeightStart != nil {
encodeTagValues(TagHeightStart, uint128.From64(*terms.HeightStart))
}
if terms.HeightEnd != nil {
encodeTagValues(TagHeightEnd, uint128.From64(*terms.HeightEnd))
}
if terms.OffsetStart != nil {
encodeTagValues(TagOffsetStart, uint128.From64(*terms.OffsetStart))
}
if terms.OffsetEnd != nil {
encodeTagValues(TagOffsetEnd, uint128.From64(*terms.OffsetEnd))
}
}
}
if r.Mint != nil {
encodeTagValues(TagMint, uint128.From64(r.Mint.BlockHeight), uint128.From64(uint64(r.Mint.TxIndex)))
}
if r.Pointer != nil {
encodeTagValues(TagPointer, uint128.From64(*r.Pointer))
}
if len(r.Edicts) > 0 {
encodeUint128(TagBody.Uint128())
edicts := make([]Edict, len(r.Edicts))
copy(edicts, r.Edicts)
// sort by block height first, then by tx index
slices.SortFunc(edicts, func(i, j Edict) int {
if i.Id.BlockHeight != j.Id.BlockHeight {
return int(i.Id.BlockHeight) - int(j.Id.BlockHeight)
}
return int(i.Id.TxIndex) - int(j.Id.TxIndex)
})
var previousRuneId RuneId
for _, edict := range edicts {
encodeEdict(previousRuneId, edict)
previousRuneId = edict.Id
}
}
sb := txscript.NewScriptBuilder().
AddOp(txscript.OP_RETURN).
AddOp(RUNESTONE_PAYLOAD_MAGIC_NUMBER)
// chunk payload to MaxScriptElementSize
for _, chunk := range lo.Chunk(payload, txscript.MaxScriptElementSize) {
sb.AddData(chunk)
}
scriptPubKey, err := sb.Script()
if err != nil {
return nil, errors.Wrap(err, "cannot build scriptPubKey")
}
return scriptPubKey, nil
}
// DecipherRunestone deciphers a runestone from a transaction. If the runestone is a cenotaph, the runestone is returned with Cenotaph set to true and Flaws set to the bitmask of flaws that caused the runestone to be a cenotaph.
// If no runestone is found, nil is returned.
func DecipherRunestone(tx *types.Transaction) (*Runestone, error) {
payload, flaws := runestonePayloadFromTx(tx)
if flaws != 0 {
return &Runestone{
Cenotaph: true,
Flaws: flaws,
}, nil
}
if payload == nil {
return nil, nil
}
integers, err := decodeLEB128VarIntsFromPayload(payload)
if err != nil {
log.Printf("warning: %v\n", err)
flaws |= FlawFlagVarInt.Mask()
return &Runestone{
Cenotaph: true,
Flaws: flaws,
}, nil
}
message := MessageFromIntegers(tx, integers)
edicts, fields := message.Edicts, message.Fields
flaws |= message.Flaws
flags, err := ParseFlags(lo.FromPtr(fields.Take(TagFlags)))
if err != nil {
return nil, errors.Wrap(err, "cannot parse flags")
}
var etching *Etching
if flags.Take(FlagEtching) {
divisibilityU128 := fields.Take(TagDivisibility)
if divisibilityU128 != nil && divisibilityU128.Cmp64(uint64(maxDivisibility)) > 0 {
divisibilityU128 = nil
}
spacersU128 := fields.Take(TagSpacers)
if spacersU128 != nil && spacersU128.Cmp64(uint64(maxSpacers)) > 0 {
spacersU128 = nil
}
symbolU128 := fields.Take(TagSymbol)
if symbolU128 != nil && symbolU128.Cmp64(utf8.MaxRune) > 0 {
symbolU128 = nil
}
var terms *Terms
if flags.Take(FlagTerms) {
var heightStart, heightEnd, offsetStart, offsetEnd *uint64
if value := fields.Take(TagHeightStart); value != nil && value.IsUint64() {
heightStart = lo.ToPtr(value.Uint64())
}
if value := fields.Take(TagHeightEnd); value != nil && value.IsUint64() {
heightEnd = lo.ToPtr(value.Uint64())
}
if value := fields.Take(TagOffsetStart); value != nil && value.IsUint64() {
offsetStart = lo.ToPtr(value.Uint64())
}
if value := fields.Take(TagOffsetEnd); value != nil && value.IsUint64() {
offsetEnd = lo.ToPtr(value.Uint64())
}
terms = &Terms{
Amount: fields.Take(TagAmount),
Cap: fields.Take(TagCap),
HeightStart: heightStart,
HeightEnd: heightEnd,
OffsetStart: offsetStart,
OffsetEnd: offsetEnd,
}
}
var divisibility *uint8
if divisibilityU128 != nil {
divisibility = lo.ToPtr(divisibilityU128.Uint8())
}
var spacers *uint32
if spacersU128 != nil {
spacers = lo.ToPtr(spacersU128.Uint32())
}
var symbol *rune
if symbolU128 != nil {
symbol = lo.ToPtr(rune(symbolU128.Uint32()))
}
etching = &Etching{
Divisibility: divisibility,
Premine: fields.Take(TagPremine),
Rune: (*Rune)(fields.Take(TagRune)),
Spacers: spacers,
Symbol: symbol,
Terms: terms,
Turbo: flags.Take(FlagTurbo),
}
}
var mint *RuneId
mintValues := fields[TagMint]
if len(mintValues) >= 2 {
mintRuneIdBlock := lo.FromPtr(fields.Take(TagMint))
mintRuneIdTx := lo.FromPtr(fields.Take(TagMint))
if mintRuneIdBlock.IsUint64() && mintRuneIdTx.IsUint32() {
runeId, err := NewRuneId(mintRuneIdBlock.Uint64(), mintRuneIdTx.Uint32())
if err != nil {
// invalid mint
flaws |= FlawFlagUnrecognizedEvenTag.Mask()
} else {
mint = &runeId
}
}
}
var pointer *uint64
pointerU128 := fields.Take(TagPointer)
if pointerU128 != nil {
if pointerU128.Cmp64(uint64(len(tx.TxOut))) < 0 {
pointer = lo.ToPtr(pointerU128.Uint64())
} else {
// invalid pointer
flaws |= FlawFlagUnrecognizedEvenTag.Mask()
}
}
if etching != nil {
_, err = etching.Supply()
if err != nil {
if errors.Is(err, errs.OverflowUint128) {
flaws |= FlawFlagSupplyOverflow.Mask()
} else {
return nil, errors.Wrap(err, "cannot calculate supply")
}
}
}
if !flags.Uint128().IsZero() {
flaws |= FlawFlagUnrecognizedFlag.Mask()
}
leftoverEvenTags := lo.Filter(lo.Keys(fields), func(tag Tag, _ int) bool {
return tag.Uint128().Mod64(2) == 0
})
if len(leftoverEvenTags) != 0 {
flaws |= FlawFlagUnrecognizedEvenTag.Mask()
}
if flaws != 0 {
var cenotaphEtching *Etching
if etching != nil && etching.Rune != nil {
cenotaphEtching = &Etching{
Rune: etching.Rune,
}
}
return &Runestone{
Cenotaph: true,
Flaws: flaws,
Mint: mint,
Etching: cenotaphEtching, // return etching with only Rune field if runestone is cenotaph
}, nil
}
return &Runestone{
Etching: etching,
Mint: mint,
Edicts: edicts,
Pointer: pointer,
}, nil
}
func runestonePayloadFromTx(tx *types.Transaction) ([]byte, Flaws) {
for _, output := range tx.TxOut {
tokenizer := txscript.MakeScriptTokenizer(0, output.PkScript)
// payload must start with OP_RETURN
if ok := tokenizer.Next(); !ok {
// script ended
continue
}
if err := tokenizer.Err(); err != nil {
continue
}
if opCode := tokenizer.Opcode(); opCode != txscript.OP_RETURN {
continue
}
// next opcode must be the magic number
if ok := tokenizer.Next(); !ok {
// script ended
continue
}
if err := tokenizer.Err(); err != nil {
fmt.Println(err.Error())
continue
}
if opCode := tokenizer.Opcode(); opCode != RUNESTONE_PAYLOAD_MAGIC_NUMBER {
continue
}
// this output is now selected to be the runestone output. Any errors from now on will be considered a flaw.
// construct the payload by concatenating the remaining data pushes
payload := make([]byte, 0)
for tokenizer.Next() {
if tokenizer.Err() != nil {
return nil, FlawFlagInvalidScript.Mask()
}
if !IsDataPushOpCode(tokenizer.Opcode()) {
return nil, FlawFlagOpCode.Mask()
}
payload = append(payload, tokenizer.Data()...)
}
if tokenizer.Err() != nil {
return nil, FlawFlagInvalidScript.Mask()
}
return payload, Flaws(0)
}
// if not found, return nil
return nil, 0
}
func decodeLEB128VarIntsFromPayload(payload []byte) ([]uint128.Uint128, error) {
integers := make([]uint128.Uint128, 0)
i := 0
for i < len(payload) {
n, length, err := leb128.DecodeUint128(payload[i:])
if err != nil {
return nil, errors.Wrap(err, "cannot decode LEB128 varint")
}
integers = append(integers, n)
i += length
}
return integers, nil
}
func IsDataPushOpCode(opCode byte) bool {
// includes OP_0, OP_DATA_1 to OP_DATA_75, OP_PUSHDATA1, OP_PUSHDATA2, OP_PUSHDATA4
return opCode <= txscript.OP_PUSHDATA4
}

Some files were not shown because too many files have changed in this diff Show More