From fcdecd4046b0846ec08bcd279d2a6e23fcc395b1 Mon Sep 17 00:00:00 2001 From: gazenw <163862510+gazenw@users.noreply.github.com> Date: Mon, 29 Apr 2024 15:16:10 +0700 Subject: [PATCH] feat: v0.1.0 release (#13) * fix: don't remove first block * fix: make etching_terms nullable * fix: fix panic if empty pkscript * chore: change testnet starting block * feat: more logs * fix: extract tapscript bug * feat: more logs * fix: switch pk to block height * chore: remove redundant log * fix: repo * fix: not found error * fix: golangci-lint * feat: add etching tx hash to rune entries * feat: stop main if indexer failed * fix: check balance after populating current balance * fix: sql ambiguous column * feat: add tx hash and out index in tx output * fix: actually use transactions to write db * fix: create rune entry states only during flushes * fix: mint cap reached off by one * fix: debug log unsafe * feat: prevent processing of txs before activation height * feat: add rune number to rune entry * feat: include new rune entries in event hash and flushing * refactor(config): separate init and get config func Co-authored-by: Gaze * feat: remove annoying log Co-authored-by: Gaze * feat: mod tidy Co-authored-by: Gaze * refactor: move main to root Co-authored-by: Gaze * feat(cli): create cli commands Co-authored-by: Gaze * refactor: move main logic to command Co-authored-by: Gaze * doc: remove unused desc Co-authored-by: Gaze * refactor: test structure in runestone_test.go * fix: edict flaws were ignored * feat: more tests * refactor(cli): add local flag Co-authored-by: Gaze * feat: set symbol limit to utf8.MaxRune * refactor(cli): flags for each module Co-authored-by: Gaze * feat(cli): support db selection Co-authored-by: Gaze * fix: remove temp code Co-authored-by: Gaze * fix: get data from cache in processor first, then dg * feat(cli): add version command Co-authored-by: Gaze * doc(cli): add refactor plan Co-authored-by: Gaze * refactor(cli): rename files Co-authored-by: Gaze * feat: add main.go Co-authored-by: Gaze * feat: more tests * feat: add overflow err * feat: finish runestone tests * refactor(cli): separate protocol config and cli flag Co-authored-by: Gaze * chore(btc): update example config Co-authored-by: Gaze * feat(btc): add get block header to datasource interface Co-authored-by: Gaze * feat(btc): reorg handling Co-authored-by: Gaze * fix: interface Co-authored-by: Gaze * fix: rename postgres config key * fix: migrated runes indexer integration to new cli * fix: commit every block * feat(btc): add revert data query Co-authored-by: Gaze * feat(btc): add revert data to processor Co-authored-by: Gaze * feat: implement public errors * fix: use errs in api * refactor: move api and usecase outside of internal * feat: add custom opcode check for datapush * fix: break if input utxo is not P2TR * fix: zero len destination case * fix: get the rest of transaction data in GetTransaction * refactor: create subscription utils tools Co-authored-by: Gaze * feat(btc): add btc_database from datasource Co-authored-by: Gaze * doc(btc): add note Co-authored-by: Gaze * wip(btc): imple prepare range func Co-authored-by: Gaze * feat(btc): add pg queries for datasource Co-authored-by: Gaze * feat(btc): update queries Co-authored-by: Gaze * feat(btc): implement repo for get blocks Co-authored-by: Gaze * feat(btc): update dg Co-authored-by: Gaze * fix(btc): return nil if errors Co-authored-by: Gaze * feat(btc): update fetch async for db datasource Co-authored-by: Gaze * feat(btc): add get block header from db for reorg handling Co-authored-by: Gaze * feat(btc): add todo notes Co-authored-by: Gaze * feat: implement get tx by hash * fix: rename func * fix: rename func * fix: rename func * fix: fix get transaction by hash * feat: integrate bitcoin client db to main * fix: reduce chunk size * fix: stop main if bitcoin indexer failed * fix: stop main if runes indexer failed * fix: move stop() inside goroutine * chore: add log * fix: duplicate rune entry number * feat(btc): add witness utils Co-authored-by: Gaze * feat(btc): witness datamodel parsing Co-authored-by: Gaze * fix(btc): invalid table name Co-authored-by: Gaze * fix(btc): remove uniqte index for hash Co-authored-by: Gaze * doc: add todo Co-authored-by: Gaze * feat(logger): remove error verbose Co-authored-by: Gaze * feat: support postgresql db Co-authored-by: Gaze * feat(btc): add err notfound Co-authored-by: Gaze * fix: invalid pgx version Co-authored-by: Gaze * fix: invalid indexer flow Co-authored-by: Gaze * feat: refactor runes api * feat: implement http server * fix: mount runes api * fix: error handler * fix: first empty state error Co-authored-by: Gaze * fix: off by one confirmation * ci: ignore RollBack error * fix: change WithPublicMessage to be prefix * feat: bump cstream version Co-authored-by: Gaze * feat(btc): nullable pkscript Co-authored-by: Gaze * feat(btc): change rollback style Co-authored-by: Gaze * refactor: move runes out of internal * feat: rename id field to runeId in rune transaction * feat(btc): update index Co-authored-by: Gaze * feat(btc): add default current block Co-authored-by: Gaze * doc: add note Co-authored-by: Gaze * fix(btc): use int64 to store sequence Co-authored-by: Gaze * fix(btc): upgrade data type for numbers Co-authored-by: Gaze * feat(btc): upgrade data type for idx Co-authored-by: Gaze * feat(btc): get indexed block impl Co-authored-by: Gaze * feat(btc): add common.ZeroHash Co-authored-by: Gaze * feat: add chainparam * feat: implement get transactions * fix: wrong condition for non-OP_RETURN output * feat(btc): add verify indexer states Co-authored-by: Gaze * refactor: sorting code Co-authored-by: Gaze * feat: fix interface * feat(btc): update chuunk size Co-authored-by: Gaze * feat: add rune_etched column in rune transaction * fix: missing field in create * feat: add runeEtched in get transactions * feat: implement get token info * feat: add holders count in token info * feat: implement get holders * fix: return a new repository when beginning a new tx * fix: rename type * feat: add pkscript to outpoint balance * feat: implement get utxos by address api * fix: spend outpoint bug * feat: implement get balances by address batch * feat: sort balances result by amount * ci: create Dockerfile Co-authored-by: Gaze * ci: add arg run Co-authored-by: Gaze * perf: add automaxprocs Co-authored-by: Gaze * chore: add performance logging Co-authored-by: Gaze * chore: add performance logger for debyug Co-authored-by: Gaze * fix: empty etched at * fix: revert data sequentially * fix: remove unused funcs * fix: main.go * feat: add flag --api-only to run cmd * fix: create index * fix: don't add zero mint to unallocated * fix: ignore zero burn amount * feat(reorg): add reorg detail Co-authored-by: Gaze * fix: wrong index type * feat: implement reporting client to report runes blocks * feat: implement report node * feat(runes): add latest block api Co-authored-by: Gaze * feat(btc): use logger warn Co-authored-by: Gaze * fix(btc): txout aren't revert if it's have to revert spent Co-authored-by: Gaze * fix: annoying error when unsubscribe fetcher Co-authored-by: Gaze * refactor(btc): readable code Co-authored-by: Gaze * fix(indexer): fix subscription closed before process when success fetch Co-authored-by: Gaze * fix: remove module enum * fix: increase max reorg limit * feat: add starting height for runes mainnet * fix(btc): fix `with` modified same row twice Co-authored-by: Gaze * fix(runes): handling latest block not found Co-authored-by: Gaze * feat: add decimals in get transactions * fix: wrong condition * feat: add more index * feat: implement get transactions by pkscript * feat: allow query by rune id too * feat: more comments * perf(btc): bitcoin indexer performance optimization (#4) * feat(btc): not null to witness Co-authored-by: Gaze * perf(btc): add batch insert txin Co-authored-by: Gaze * perf(btc): batch insert txout Co-authored-by: Gaze * perf(btc): batch insert transaction Co-authored-by: Gaze * feat(btc): remove old queries Co-authored-by: Gaze * fix(btc): typo Co-authored-by: Gaze * perf(btc): batch insert blocks (#5) Co-authored-by: Gaze --------- Co-authored-by: Gaze * feat(btc): Duplicate coinbase transaction handling (#7) * feat(btc): tx_hash can duplicated in block v1 Co-authored-by: Gaze * feat(btc): duplicate tx will use same txin/txout from previous tx Co-authored-by: Gaze * feat(btc): prevent revert block v1 data if you really want to revert the data before the block version 2, you should reset the database and reindex the data instead. Co-authored-by: Gaze * doc(btc): update list duplicate tx hash Co-authored-by: Gaze * doc(btc): update docs Co-authored-by: Gaze * fix(btc): use last v1 block instead Co-authored-by: Gaze --------- Co-authored-by: Gaze * feat: add ping handler * fix: type Co-authored-by: Gaze * doc: add refactor note Co-authored-by: Gaze * ci: add golang linter and test runner gh action * ci: use go-test-action@v0 * ci: annotate test result * ci: update running flag * fix: try to fix malformed import path * feat: add mock test * ci: remove annotation ci * ci: add annotate test result * chore: remove unused * feat: try testify * feat: remove test * ci: add go test on macos, windows and go latest version * ci: test building * feat: remove mock code * ci: add sqlc diff checker action (#10) * feat: Graceful shutdown (#8) * feat: add shutdown function for indexer Co-authored-by: Gaze * feat: add force shutdown Co-authored-by: Gaze * revert Co-authored-by: Gaze * feat(btc): remove unused Co-authored-by: Gaze * style: go fmt Co-authored-by: Gaze * feat: separate context for worker and application * feat: increase force shutdown timeout Co-authored-by: Gaze * feat(btc): update logging Co-authored-by: Gaze * feat(btc): update shutdown function Co-authored-by: Gaze * feat: remove wg for shutdown Co-authored-by: Gaze * feat: refactor shutdown flow Co-authored-by: Gaze * feat: update shutdown flow Co-authored-by: Gaze * feat: update maming Co-authored-by: Gaze * feat: update force shutdown logic Co-authored-by: Gaze --------- Co-authored-by: Gaze * feat: check reporting config name * fix: use db config in bitcoin module for runes datasource * Add migrate commands (#2) * feat: add migrate up * feat: add down migration * fix: example * feat: change description * fix: hardcode migration source directory * Update README.md for public release. (#11) * feat: initial draft for README.md * fix: remove some sections * feat: add block reporting to first description * fix: reduce redundancy * feat: update README.md * Update README.md * feat: update README.md * fix: update config.yaml in README * fix: remove redundant words * fix: change default datasource * fix: config.yaml comments * feat: update README.md * refactor(logger): format logging (#12) * feat(logger): format main logger * feat(logger): use duration ms for gcp output * refactor(logger): bitcoin node logger * refactor(logger): indexer logger * refactor(logger): fix cmd logger * refactor(logger): logger in config pacakge * refactor(logger): set pgx error log level debug * refactor(logger): btcclient datasource * refactor: processor name * refactor(logger): runese logger * refactor(logger): update logger * fix(runes): wrong btc db datasource * refactor(logger): remove unnecessary debug log * refactor: update logger in indexer * fix(logger): deadlock in load() * fix: remove unused --------- Co-authored-by: Gaze * feat(btc): remove unused func * fix: fix golangci-lint error * fix(pg): update logger level * doc: update config example * feat: go mod tidy * doc: update readme * fix: panic cause didn't handle error * doc: update example config * doc: update example config in readme * feat(logger): only log error stacktrace when debug mode is on * feat(reporting): handling invalid config error * feat(pg): handling invalid config error * fix: panic in get_token_info --------- Co-authored-by: Gaze Co-authored-by: Planxnx Co-authored-by: Thanee Charattrakool <37617738+Planxnx@users.noreply.github.com> --- .github/workflows/sqlc-verify.yml | 28 + .golangci.yaml | 22 +- .vscode/extensions.json | 3 + .vscode/settings.json | 82 + Dockerfile | 27 + README.md | 177 +- cmd/cmd.go | 59 + cmd/cmd_migrate.go | 20 + cmd/cmd_run.go | 370 ++++ cmd/cmd_version.go | 49 + cmd/migrate/cmd_down.go | 132 ++ cmd/migrate/cmd_up.go | 117 ++ cmd/migrate/logger.go | 22 + cmd/migrate/migrate.go | 25 + common/.gitkeep | 0 common/bitcoin.go | 4 + common/errs/errs.go | 99 + common/errs/public_errs.go | 43 + common/hash.go | 12 + common/network.go | 33 + config.example.yaml | 52 + core/constants/constants.go | 5 + core/datasources/bitcoin_node.go | 294 +++ core/datasources/datasources.go | 16 + core/indexers/bitcoin_indexer.go | 257 +++ core/indexers/indexers.go | 41 + core/types/bitcoin_block.go | 47 + core/types/bitcoin_transaction.go | 73 + core/worker.go | 1 - docs/database_migration.md | 34 + go.mod | 78 + go.sum | 311 +++ internal/.gitkeep | 0 internal/config/config.go | 120 ++ internal/postgres/interface.go | 37 + internal/postgres/postgres.go | 127 ++ internal/subscription/client_subscription.go | 31 + internal/subscription/subscription.go | 132 ++ main.go | 18 + modules/bitcoin/btcclient/client_db.go | 244 +++ modules/bitcoin/btcclient/contract.go | 12 + modules/bitcoin/config/config.go | 8 + modules/bitcoin/constants.go | 26 + .../000001_initialize_table.down.sql | 18 + .../migrations/000001_initialize_table.up.sql | 72 + .../database/postgresql/queries/data.sql | 99 + .../database/postgresql/queries/info.sql | 8 + modules/bitcoin/datagateway/bitcoin_data.go | 25 + modules/bitcoin/datagateway/indexer_info.go | 13 + modules/bitcoin/processor.go | 122 ++ modules/bitcoin/processor_process.go | 91 + modules/bitcoin/processor_process_test.go | 144 ++ modules/bitcoin/repository/postgres/block.go | 169 ++ .../repository/postgres/gen/data.sql.go | 408 ++++ modules/bitcoin/repository/postgres/gen/db.go | 32 + .../repository/postgres/gen/info.sql.go | 51 + .../bitcoin/repository/postgres/gen/models.go | 61 + modules/bitcoin/repository/postgres/info.go | 44 + .../bitcoin/repository/postgres/mappers.go | 197 ++ .../bitcoin/repository/postgres/postgres.go | 22 + .../repository/postgres/transaction.go | 35 + modules/runes/.gitkeep | 0 modules/runes/api/api.go | 11 + .../httphandler/get_balances_by_address.go | 116 ++ .../get_balances_by_address_batch.go | 139 ++ .../api/httphandler/get_current_block.go | 50 + modules/runes/api/httphandler/get_holders.go | 114 ++ .../runes/api/httphandler/get_token_info.go | 165 ++ .../runes/api/httphandler/get_transactions.go | 274 +++ .../api/httphandler/get_utxos_by_address.go | 146 ++ modules/runes/api/httphandler/httphandler.go | 114 ++ modules/runes/api/httphandler/routes.go | 18 + modules/runes/config/config.go | 10 + modules/runes/constants.go | 27 + .../000001_initialize_tables.down.sql | 14 + .../000001_initialize_tables.up.sql | 122 ++ .../database/postgresql/queries/data.sql | 118 ++ .../database/postgresql/queries/info.sql | 11 + modules/runes/datagateway/indexer_info.go | 15 + modules/runes/datagateway/runes.go | 81 + modules/runes/datagateway/tx.go | 12 + modules/runes/event_hash.go | 372 ++++ modules/runes/internal/entity/balance.go | 14 + .../runes/internal/entity/indexed_block.go | 11 + .../runes/internal/entity/indexer_state.go | 9 + .../runes/internal/entity/outpoint_balance.go | 16 + .../runes/internal/entity/rune_transaction.go | 76 + .../internal/entity/rune_transaction_test.go | 32 + modules/runes/processor.go | 231 +++ modules/runes/processor_process.go | 807 ++++++++ .../runes/repository/postgres/gen/batch.go | 130 ++ .../runes/repository/postgres/gen/data.sql.go | 816 ++++++++ modules/runes/repository/postgres/gen/db.go | 33 + .../runes/repository/postgres/gen/info.sql.go | 70 + .../runes/repository/postgres/gen/models.go | 114 ++ .../runes/repository/postgres/indexer_info.go | 56 + modules/runes/repository/postgres/mapper.go | 693 +++++++ .../runes/repository/postgres/mapper_test.go | 61 + modules/runes/repository/postgres/postgres.go | 20 + modules/runes/repository/postgres/runes.go | 483 +++++ modules/runes/repository/postgres/tx.go | 62 + modules/runes/runes/edict.go | 11 + modules/runes/runes/etching.go | 64 + modules/runes/runes/etching_test.go | 123 ++ modules/runes/runes/flag.go | 77 + modules/runes/runes/flaw.go | 60 + modules/runes/runes/message.go | 94 + modules/runes/runes/rune.go | 175 ++ modules/runes/runes/rune_entry.go | 130 ++ modules/runes/runes/rune_id.go | 119 ++ modules/runes/runes/rune_id_test.go | 108 ++ modules/runes/runes/rune_test.go | 272 +++ modules/runes/runes/runestone.go | 389 ++++ modules/runes/runes/runestone_test.go | 1671 +++++++++++++++++ modules/runes/runes/spaced_rune.go | 92 + modules/runes/runes/spaced_rune_test.go | 58 + modules/runes/runes/tag.go | 89 + modules/runes/usecase/get_balances.go | 25 + modules/runes/usecase/get_latest_block.go | 16 + .../runes/usecase/get_outpoint_balances.go | 16 + modules/runes/usecase/get_rune_entry.go | 48 + modules/runes/usecase/get_transactions.go | 17 + modules/runes/usecase/usecase.go | 18 + pkg/btcutils/witness.go | 59 + pkg/bufferpool/bufferpool.go | 56 + pkg/errorhandler/http.go | 33 + pkg/httpclient/httpclient.go | 171 ++ pkg/leb128/leb128.go | 48 + pkg/leb128/leb128_test.go | 83 + pkg/logger/context.go | 78 + pkg/logger/duration.go | 15 + pkg/logger/error.go | 57 + pkg/logger/level.go | 39 + pkg/logger/logger.go | 225 +++ pkg/logger/logger_gcp.go | 62 + pkg/logger/multi_handlers.go | 49 + pkg/logger/slogx/attr.go | 185 ++ pkg/logger/slogx/attr_keys.go | 14 + pkg/logger/slogx/slogx.go | 5 + pkg/reportingclient/reportingclient.go | 115 ++ pkg/stacktrace/errors.go | 89 + pkg/stacktrace/frame.go | 24 + pkg/stacktrace/stacktrace.go | 92 + sqlc.yaml | 29 + 144 files changed, 15758 insertions(+), 4 deletions(-) create mode 100644 .github/workflows/sqlc-verify.yml create mode 100644 .vscode/extensions.json create mode 100644 .vscode/settings.json create mode 100644 Dockerfile create mode 100644 cmd/cmd.go create mode 100644 cmd/cmd_migrate.go create mode 100644 cmd/cmd_run.go create mode 100644 cmd/cmd_version.go create mode 100644 cmd/migrate/cmd_down.go create mode 100644 cmd/migrate/cmd_up.go create mode 100644 cmd/migrate/logger.go create mode 100644 cmd/migrate/migrate.go delete mode 100644 common/.gitkeep create mode 100644 common/bitcoin.go create mode 100644 common/errs/errs.go create mode 100644 common/errs/public_errs.go create mode 100644 common/hash.go create mode 100644 common/network.go create mode 100644 config.example.yaml create mode 100644 core/constants/constants.go create mode 100644 core/datasources/bitcoin_node.go create mode 100644 core/datasources/datasources.go create mode 100644 core/indexers/bitcoin_indexer.go create mode 100644 core/indexers/indexers.go create mode 100644 core/types/bitcoin_block.go create mode 100644 core/types/bitcoin_transaction.go delete mode 100644 core/worker.go create mode 100644 docs/database_migration.md delete mode 100644 internal/.gitkeep create mode 100644 internal/config/config.go create mode 100644 internal/postgres/interface.go create mode 100644 internal/postgres/postgres.go create mode 100644 internal/subscription/client_subscription.go create mode 100644 internal/subscription/subscription.go create mode 100644 main.go create mode 100644 modules/bitcoin/btcclient/client_db.go create mode 100644 modules/bitcoin/btcclient/contract.go create mode 100644 modules/bitcoin/config/config.go create mode 100644 modules/bitcoin/constants.go create mode 100644 modules/bitcoin/database/postgresql/migrations/000001_initialize_table.down.sql create mode 100644 modules/bitcoin/database/postgresql/migrations/000001_initialize_table.up.sql create mode 100644 modules/bitcoin/database/postgresql/queries/data.sql create mode 100644 modules/bitcoin/database/postgresql/queries/info.sql create mode 100644 modules/bitcoin/datagateway/bitcoin_data.go create mode 100644 modules/bitcoin/datagateway/indexer_info.go create mode 100644 modules/bitcoin/processor.go create mode 100644 modules/bitcoin/processor_process.go create mode 100644 modules/bitcoin/processor_process_test.go create mode 100644 modules/bitcoin/repository/postgres/block.go create mode 100644 modules/bitcoin/repository/postgres/gen/data.sql.go create mode 100644 modules/bitcoin/repository/postgres/gen/db.go create mode 100644 modules/bitcoin/repository/postgres/gen/info.sql.go create mode 100644 modules/bitcoin/repository/postgres/gen/models.go create mode 100644 modules/bitcoin/repository/postgres/info.go create mode 100644 modules/bitcoin/repository/postgres/mappers.go create mode 100644 modules/bitcoin/repository/postgres/postgres.go create mode 100644 modules/bitcoin/repository/postgres/transaction.go delete mode 100644 modules/runes/.gitkeep create mode 100644 modules/runes/api/api.go create mode 100644 modules/runes/api/httphandler/get_balances_by_address.go create mode 100644 modules/runes/api/httphandler/get_balances_by_address_batch.go create mode 100644 modules/runes/api/httphandler/get_current_block.go create mode 100644 modules/runes/api/httphandler/get_holders.go create mode 100644 modules/runes/api/httphandler/get_token_info.go create mode 100644 modules/runes/api/httphandler/get_transactions.go create mode 100644 modules/runes/api/httphandler/get_utxos_by_address.go create mode 100644 modules/runes/api/httphandler/httphandler.go create mode 100644 modules/runes/api/httphandler/routes.go create mode 100644 modules/runes/config/config.go create mode 100644 modules/runes/constants.go create mode 100644 modules/runes/database/postgresql/migrations/000001_initialize_tables.down.sql create mode 100644 modules/runes/database/postgresql/migrations/000001_initialize_tables.up.sql create mode 100644 modules/runes/database/postgresql/queries/data.sql create mode 100644 modules/runes/database/postgresql/queries/info.sql create mode 100644 modules/runes/datagateway/indexer_info.go create mode 100644 modules/runes/datagateway/runes.go create mode 100644 modules/runes/datagateway/tx.go create mode 100644 modules/runes/event_hash.go create mode 100644 modules/runes/internal/entity/balance.go create mode 100644 modules/runes/internal/entity/indexed_block.go create mode 100644 modules/runes/internal/entity/indexer_state.go create mode 100644 modules/runes/internal/entity/outpoint_balance.go create mode 100644 modules/runes/internal/entity/rune_transaction.go create mode 100644 modules/runes/internal/entity/rune_transaction_test.go create mode 100644 modules/runes/processor.go create mode 100644 modules/runes/processor_process.go create mode 100644 modules/runes/repository/postgres/gen/batch.go create mode 100644 modules/runes/repository/postgres/gen/data.sql.go create mode 100644 modules/runes/repository/postgres/gen/db.go create mode 100644 modules/runes/repository/postgres/gen/info.sql.go create mode 100644 modules/runes/repository/postgres/gen/models.go create mode 100644 modules/runes/repository/postgres/indexer_info.go create mode 100644 modules/runes/repository/postgres/mapper.go create mode 100644 modules/runes/repository/postgres/mapper_test.go create mode 100644 modules/runes/repository/postgres/postgres.go create mode 100644 modules/runes/repository/postgres/runes.go create mode 100644 modules/runes/repository/postgres/tx.go create mode 100644 modules/runes/runes/edict.go create mode 100644 modules/runes/runes/etching.go create mode 100644 modules/runes/runes/etching_test.go create mode 100644 modules/runes/runes/flag.go create mode 100644 modules/runes/runes/flaw.go create mode 100644 modules/runes/runes/message.go create mode 100644 modules/runes/runes/rune.go create mode 100644 modules/runes/runes/rune_entry.go create mode 100644 modules/runes/runes/rune_id.go create mode 100644 modules/runes/runes/rune_id_test.go create mode 100644 modules/runes/runes/rune_test.go create mode 100644 modules/runes/runes/runestone.go create mode 100644 modules/runes/runes/runestone_test.go create mode 100644 modules/runes/runes/spaced_rune.go create mode 100644 modules/runes/runes/spaced_rune_test.go create mode 100644 modules/runes/runes/tag.go create mode 100644 modules/runes/usecase/get_balances.go create mode 100644 modules/runes/usecase/get_latest_block.go create mode 100644 modules/runes/usecase/get_outpoint_balances.go create mode 100644 modules/runes/usecase/get_rune_entry.go create mode 100644 modules/runes/usecase/get_transactions.go create mode 100644 modules/runes/usecase/usecase.go create mode 100644 pkg/btcutils/witness.go create mode 100644 pkg/bufferpool/bufferpool.go create mode 100644 pkg/errorhandler/http.go create mode 100644 pkg/httpclient/httpclient.go create mode 100644 pkg/leb128/leb128.go create mode 100644 pkg/leb128/leb128_test.go create mode 100644 pkg/logger/context.go create mode 100644 pkg/logger/duration.go create mode 100644 pkg/logger/error.go create mode 100644 pkg/logger/level.go create mode 100644 pkg/logger/logger.go create mode 100644 pkg/logger/logger_gcp.go create mode 100644 pkg/logger/multi_handlers.go create mode 100644 pkg/logger/slogx/attr.go create mode 100644 pkg/logger/slogx/attr_keys.go create mode 100644 pkg/logger/slogx/slogx.go create mode 100644 pkg/reportingclient/reportingclient.go create mode 100644 pkg/stacktrace/errors.go create mode 100644 pkg/stacktrace/frame.go create mode 100644 pkg/stacktrace/stacktrace.go create mode 100644 sqlc.yaml diff --git a/.github/workflows/sqlc-verify.yml b/.github/workflows/sqlc-verify.yml new file mode 100644 index 0000000..ef0a03f --- /dev/null +++ b/.github/workflows/sqlc-verify.yml @@ -0,0 +1,28 @@ +name: Sqlc ORM Framework Verify +on: + workflow_dispatch: + pull_request: + branches: + - develop + - main + paths: + - "sqlc.yaml" + - "**.sql" + - ".github/workflows/sqlc-verify.yml" + +jobs: + sqlc-diff: + name: Sqlc Diff Checker + runs-on: "ubuntu-latest" # "self-hosted", "ubuntu-latest", "macos-latest", "windows-latest" + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: "0" + + - name: Setup Sqlc + uses: sqlc-dev/setup-sqlc@v4 + with: + sqlc-version: "1.26.0" + + - name: Check Diff + run: sqlc diff diff --git a/.golangci.yaml b/.golangci.yaml index 44cd175..4864087 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -51,6 +51,8 @@ linters: - prealloc # performance - Find slice declarations that could potentially be pre-allocated, https://github.com/alexkohler/prealloc - gosec # bugs - Inspects source code for security problems - wrapcheck # style, error - Checks that errors returned from external packages are wrapped, we should wrap the error from external library + - depguard # import - Go linter that checks if package imports are in a list of acceptable packages. + - sloglint # style, format Ensure consistent code style when using log/slog. ### Annoying Linters # - dupl # style - code clone detection @@ -66,20 +68,36 @@ linters-settings: misspell: locale: US ignore-words: [] + errcheck: + exclude-functions: + - (github.com/jackc/pgx/v5.Tx).Rollback wrapcheck: ignoreSigs: - .Errorf( - errors.New( - errors.Unwrap( + - errors.Join( - .Wrap( - .Wrapf( - .WithMessage( - .WithMessagef( - .WithStack( + - errs.NewPublicError( + - errs.WithPublicMessage( + - withstack.WithStackDepth( ignoreSigRegexps: - \.New.*Error\( - ignorePackageGlobs: - - "github.com/gofiber/fiber/*" goconst: ignore-tests: true min-occurrences: 5 + depguard: + rules: + main: + # Packages that are not allowed. + deny: + - pkg: "github.com/pkg/errors" + desc: Should be replaced by "cockroachdb/errors" or "cleverse/go-utilities" package + sloglint: + attr-only: true + key-naming-case: snake + args-on-sep-lines: true diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 0000000..ba0c522 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,3 @@ +{ + "recommendations": ["dotenv.dotenv-vscode", "golang.go"] +} diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..164d963 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,82 @@ +{ + "editor.formatOnSave": true, + "files.exclude": { + "**/.git": true, + "**/.svn": true, + "**/.hg": true, + "**/CVS": true, + "**/.DS_Store": true + }, + "search.exclude": { + "**/node_modules": true, + "**/build": true, + "**/dist": true + }, + "[json]": { + "editor.formatOnSave": true, + "editor.defaultFormatter": "esbenp.prettier-vscode" + }, + // Golang + "[go]": { + "editor.formatOnSave": true, + "editor.codeActionsOnSave": { + "source.organizeImports": "explicit" + }, + "editor.codeLens": true + }, + "go.useLanguageServer": true, + "go.lintTool": "golangci-lint", + "go.lintFlags": ["--fix"], + "go.lintOnSave": "package", + "go.toolsManagement.autoUpdate": true, + "gopls": { + "formatting.gofumpt": true, // https://github.com/mvdan/gofumpt + "ui.codelenses": { + "gc_details": true + }, + "build.directoryFilters": ["-**/node_modules"], + "ui.semanticTokens": true, + "ui.completion.usePlaceholders": false, + "ui.diagnostic.analyses": { + // https://github.com/golang/tools/blob/master/gopls/doc/analyzers.md + // "fieldalignment": false, + "nilness": true, + "shadow": false, + "unusedparams": true, + "unusedvariable": true, + "unusedwrite": true, // ineffective assignment + "useany": true + }, + "ui.diagnostic.staticcheck": false, // use golangci-lint instead + "ui.diagnostic.annotations": { + // CMD+P and run command `Go: Toggle gc details` + "bounds": true, + "escape": true, + "inline": true, + "nil": true + }, + "ui.documentation.hoverKind": "FullDocumentation" + }, + "go.editorContextMenuCommands": { + // Right click on code to use this command + "toggleTestFile": false, + "addTags": false, + "removeTags": false, + "fillStruct": true, + "testAtCursor": false, + "testFile": false, + "testPackage": false, + "generateTestForFunction": true, + "generateTestForFile": false, + "generateTestForPackage": false, + "addImport": false, + "testCoverage": false, + "playground": false, + "debugTestAtCursor": false, + "benchmarkAtCursor": false + }, + "dotenv.enableAutocloaking": false, + "protoc": { + "options": ["--proto_path=pb"] + } +} diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..4fcf91b --- /dev/null +++ b/Dockerfile @@ -0,0 +1,27 @@ +FROM golang:1.22 as builder + +WORKDIR /app + +COPY go.mod go.sum ./ +RUN go mod download + +COPY ./ ./ + +ENV GOOS=linux +ENV CGO_ENABLED=0 + +RUN go build \ + -o main ./main.go + +FROM alpine:latest + +WORKDIR /app + +RUN apk --no-cache add ca-certificates tzdata + + +COPY --from=builder /app/main . + +# You can set `TZ` environment variable to change the timezone + +CMD ["/app/main", "run"] diff --git a/README.md b/README.md index 4f2b8e7..1a74efd 100644 --- a/README.md +++ b/README.md @@ -1 +1,176 @@ -# Gaze Indexer Network + + +# Gaze Indexer + +Gaze Indexer is an open-source and modular indexing client for Bitcoin meta-protocols. It has support for Bitcoin and Runes out of the box, with **Unified Consistent APIs** across fungible token protocols. + +Gaze Indexer is built with **modularity** in mind, allowing users to run all modules in one monolithic instance with a single command, or as a distributed cluster of micro-services. + +Gaze Indexer serves as a foundation for building ANY meta-protocol indexers, with efficient data fetching, reorg detection, and database migration tool. +This allows developers to focus on what **truly** matters: Meta-protocol indexing logic. New meta-protocols can be easily added by implementing new modules. + +Gaze Indexer also comes with a block reporting system for verifying data integrity of indexers. Visit the [Gaze Network dashboard](https://dash.gaze.network) to see the status of other indexers. + +- [Modules](#modules) + - [1. Bitcoin](#1-bitcoin) + - [2. Runes](#2-runes) +- [Installation](#installation) + - [Prerequisites](#prerequisites) + - [1. Hardware Requirements](#1-hardware-requirements) + - [2. Prepare Bitcoin Core RPC server.](#2-prepare-bitcoin-core-rpc-server) + - [3. Prepare database.](#3-prepare-database) + - [4. Prepare `config.yaml` file.](#4-prepare-configyaml-file) + - [Install with Docker (recommended)](#install-with-docker-recommended) + - [Install from source](#install-from-source) + +## Modules + +### 1. Bitcoin + +The Bitcoin Indexer, the heart of every meta-protocol, is responsible for indexing **Bitcoin transactions, blocks, and UTXOs**. It requires a Bitcoin Core RPC as source of Bitcoin transactions, +and stores the indexed data in database to be used by other modules. + +### 2. Runes + +The Runes Indexer is our first meta-protocol indexer. It indexes Runes states, transactions, runestones, and balances using Bitcoin transactions. +It comes with a set of APIs for querying historical Runes data. See our [API Reference](https://documenter.getpostman.com/view/28396285/2sA3Bn7Cxr) for full details. + +## Installation + +### Prerequisites + +#### 1. Hardware Requirements + +Each module requires different hardware requirements. +| Module | CPU | RAM | +| ------- | ---------- | ------ | +| Bitcoin | 0.25 cores | 256 MB | +| Runes | 0.5 cores | 1 GB | + +#### 2. Prepare Bitcoin Core RPC server. + +Gaze Indexer needs to fetch transaction data from a Bitcoin Core RPC, either self-hosted or using managed providers like QuickNode. +To self host a Bitcoin Core, see https://bitcoin.org/en/full-node. + +#### 3. Prepare database. + +Gaze Indexer has first-class support for PostgreSQL. If you wish to use other databases, you can implement your own database repository that satisfies each module's Data Gateway interface. +Here is our minimum database disk space requirement for each module. +| Module | Database Storage | +| ------- | ---------------- | +| Bitcoin | 240 GB | +| Runes | 150 GB | + +#### 4. Prepare `config.yaml` file. + +```yaml +# config.yaml +logger: + output: TEXT # Output format for logs. current supported formats: "TEXT" | "JSON" | "GCP" + debug: false + +# Network to run the indexer on. Current supported networks: "mainnet" | "testnet" +network: mainnet + +# Bitcoin Core RPC configuration options. +bitcoin_node: + host: "" # [Required] Host of Bitcoin Core RPC (without https://) + user: "" # Username to authenticate with Bitcoin Core RPC + pass: "" # Password to authenticate with Bitcoin Core RPC + disable_tls: false # Set to true to disable tls + +# Block reporting configuration options. See Block Reporting section for more details. +reporting: + disabled: false # Set to true to disable block reporting to Gaze Network. Default is false. + base_url: "https://indexer.api.gaze.network" # Defaults to "https://indexer.api.gaze.network" if left empty + name: "" # [Required if not disabled] Name of this indexer to show on the Gaze Network dashboard + website_url: "" # Public website URL to show on the dashboard. Can be left empty. + indexer_api_url: "" # Public url to access this indexer's API. Can be left empty if you want to keep your indexer private. + +# HTTP server configuration options. +http_server: + port: 8080 # Port to run the HTTP server on for modules with HTTP API handlers. + +# Meta-protocol modules configuration options. +modules: + # Configuration options for Bitcoin module. Can be removed if not used. + bitcoin: + database: "postgres" # Database to store bitcoin data. current supported databases: "postgres" + postgres: + host: "localhost" + port: 5432 + user: "postgres" + password: "password" + db_name: "postgres" + # url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above. + + # Configuration options for Runes module. Can be removed if not used. + runes: + database: "postgres" # Database to store Runes data. current supported databases: "postgres" + datasource: "database" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node" | "database". If "database" is used, it will use the database config in bitcoin module as datasource. + api_handlers: # API handlers to enable. current supported handlers: "http" + - http + postgres: + host: "localhost" + port: 5432 + user: "postgres" + password: "password" + db_name: "postgres" + # url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above. +``` + +### Install with Docker (recommended) + +We will be using `docker-compose` for our installation guide. Make sure the `docker-compose.yaml` file is in the same directory as the `config.yaml` file. + +```yaml +# docker-compose.yaml +services: + gaze-indexer: + image: ghcr.io/gaze-network/gaze-indexer:v1.0.0 + container_name: gaze-indexer + restart: unless-stopped + ports: + - 8080:8080 # Expose HTTP server port to host + volumes: + - "./config.yaml:/app/config.yaml" # mount config.yaml file to the container as "/app/config.yaml" + command: ["/app/main", "run", "--bitcoin", "--runes"] # Put module flags after "run" commands to select which modules to run. +``` + +### Install from source + +1. Install `go` version 1.22 or higher. See Go installation guide [here](https://go.dev/doc/install). +2. Clone this repository. + +```bash +git clone https://github.com/gaze-network/gaze-indexer.git +cd gaze-indexer +``` + +3. Build the main binary. + +```bash +# Get dependencies +go mod download + +# Build the main binary +go build -o gaze main.go +``` + +4. Run database migrations with the `migrate` command and module flags. + +```bash +./gaze migrate up --bitcoin --runes --database postgres://postgres:password@localhost:5432/postgres +``` + +5. Start the indexer with the `run` command and module flags. + +```bash +./gaze run --bitcoin --runes +``` + +If `config.yaml` is not located at `./app/config.yaml`, use the `--config` flag to specify the path to the `config.yaml` file. + +```bash +./gaze run --bitcoin --runes --config /path/to/config.yaml +``` diff --git a/cmd/cmd.go b/cmd/cmd.go new file mode 100644 index 0000000..da56f8a --- /dev/null +++ b/cmd/cmd.go @@ -0,0 +1,59 @@ +package cmd + +import ( + "context" + "log/slog" + + "github.com/gaze-network/indexer-network/internal/config" + "github.com/gaze-network/indexer-network/pkg/logger" + "github.com/gaze-network/indexer-network/pkg/logger/slogx" + "github.com/spf13/cobra" +) + +var ( + // root command + cmd = &cobra.Command{ + Use: "gaze", + Long: `Description of gaze indexer`, + } + + // sub-commands + cmds = []*cobra.Command{ + NewVersionCommand(), + NewRunCommand(), + NewMigrateCommand(), + } +) + +// Execute runs the root command +func Execute(ctx context.Context) { + var configFile string + + // Add global flags + flags := cmd.PersistentFlags() + flags.StringVar(&configFile, "config", "", "config file, E.g. `./config.yaml`") + flags.String("network", "mainnet", "network to connect to, E.g. `mainnet` or `testnet`") + + // Bind flags to configuration + config.BindPFlag("network", flags.Lookup("network")) + + // Initialize configuration and logger on start command + cobra.OnInitialize(func() { + // Initialize configuration + config := config.Parse(configFile) + + // Initialize logger + if err := logger.Init(config.Logger); err != nil { + logger.PanicContext(ctx, "Something went wrong, can't init logger", slogx.Error(err), slog.Any("config", config.Logger)) + } + }) + + // Register sub-commands + cmd.AddCommand(cmds...) + + // Execute command + if err := cmd.ExecuteContext(ctx); err != nil { + // Cobra will print the error message by default + logger.DebugContext(ctx, "Error executing command", slogx.Error(err)) + } +} diff --git a/cmd/cmd_migrate.go b/cmd/cmd_migrate.go new file mode 100644 index 0000000..55ee09f --- /dev/null +++ b/cmd/cmd_migrate.go @@ -0,0 +1,20 @@ +package cmd + +import ( + "github.com/gaze-network/indexer-network/cmd/migrate" + _ "github.com/golang-migrate/migrate/v4/database/postgres" + _ "github.com/golang-migrate/migrate/v4/source/file" + "github.com/spf13/cobra" +) + +func NewMigrateCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "migrate", + Short: "Migrate database schema", + } + cmd.AddCommand( + migrate.NewMigrateUpCommand(), + migrate.NewMigrateDownCommand(), + ) + return cmd +} diff --git a/cmd/cmd_run.go b/cmd/cmd_run.go new file mode 100644 index 0000000..fada561 --- /dev/null +++ b/cmd/cmd_run.go @@ -0,0 +1,370 @@ +package cmd + +import ( + "context" + "fmt" + "log/slog" + "net/http" + "os" + "os/signal" + "runtime" + "strings" + "syscall" + "time" + + "github.com/btcsuite/btcd/rpcclient" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/datasources" + "github.com/gaze-network/indexer-network/core/indexers" + "github.com/gaze-network/indexer-network/internal/config" + "github.com/gaze-network/indexer-network/internal/postgres" + "github.com/gaze-network/indexer-network/modules/bitcoin" + "github.com/gaze-network/indexer-network/modules/bitcoin/btcclient" + btcdatagateway "github.com/gaze-network/indexer-network/modules/bitcoin/datagateway" + btcpostgres "github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres" + "github.com/gaze-network/indexer-network/modules/runes" + runesapi "github.com/gaze-network/indexer-network/modules/runes/api" + runesdatagateway "github.com/gaze-network/indexer-network/modules/runes/datagateway" + runespostgres "github.com/gaze-network/indexer-network/modules/runes/repository/postgres" + runesusecase "github.com/gaze-network/indexer-network/modules/runes/usecase" + "github.com/gaze-network/indexer-network/pkg/errorhandler" + "github.com/gaze-network/indexer-network/pkg/logger" + "github.com/gaze-network/indexer-network/pkg/logger/slogx" + "github.com/gaze-network/indexer-network/pkg/reportingclient" + "github.com/gofiber/fiber/v2" + "github.com/gofiber/fiber/v2/middleware/compress" + fiberrecover "github.com/gofiber/fiber/v2/middleware/recover" + "github.com/samber/lo" + "github.com/spf13/cobra" +) + +const ( + shutdownTimeout = 60 * time.Second +) + +type runCmdOptions struct { + APIOnly bool + Bitcoin bool + Runes bool +} + +func NewRunCommand() *cobra.Command { + opts := &runCmdOptions{} + + // Create command + runCmd := &cobra.Command{ + Use: "run", + Short: "Start indexer-network service", + RunE: func(cmd *cobra.Command, args []string) error { + return runHandler(opts, cmd, args) + }, + } + + // TODO: separate flags and bind flags to each module cmd package. + + // Add local flags + flags := runCmd.Flags() + flags.BoolVar(&opts.APIOnly, "api-only", false, "Run only API server") + flags.BoolVar(&opts.Bitcoin, "bitcoin", false, "Enable Bitcoin indexer module") + flags.String("bitcoin-db", "postgres", `Database to store bitcoin data. current supported databases: "postgres"`) + flags.BoolVar(&opts.Runes, "runes", false, "Enable Runes indexer module") + flags.String("runes-db", "postgres", `Database to store runes data. current supported databases: "postgres"`) + flags.String("runes-datasource", "bitcoin-node", `Datasource to fetch bitcoin data for processing Meta-Protocol data. current supported datasources: "bitcoin-node" | "database"`) + + // Bind flags to configuration + config.BindPFlag("modules.bitcoin.database", flags.Lookup("bitcoin-db")) + config.BindPFlag("modules.runes.database", flags.Lookup("runes-db")) + config.BindPFlag("modules.runes.datasource", flags.Lookup("runes-datasource")) + + return runCmd +} + +type HttpHandler interface { + Mount(router fiber.Router) error +} + +func runHandler(opts *runCmdOptions, cmd *cobra.Command, _ []string) error { + conf := config.Load() + + // Validate inputs + { + if !conf.Network.IsSupported() { + return errors.Wrapf(errs.Unsupported, "%q network is not supported", conf.Network.String()) + } + } + + // Initialize application process context + ctx, stop := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + defer stop() + + // Initialize worker context to separate worker's lifecycle from main process + ctxWorker, stopWorker := context.WithCancel(context.Background()) + defer stopWorker() + + // Add logger context + ctxWorker = logger.WithContext(ctxWorker, slogx.Stringer("network", conf.Network)) + + // Initialize Bitcoin Core RPC Client + client, err := rpcclient.New(&rpcclient.ConnConfig{ + Host: conf.BitcoinNode.Host, + User: conf.BitcoinNode.User, + Pass: conf.BitcoinNode.Pass, + DisableTLS: conf.BitcoinNode.DisableTLS, + HTTPPostMode: true, + }, nil) + if err != nil { + logger.PanicContext(ctx, "Invalid Bitcoin node configuration", slogx.Error(err)) + } + defer client.Shutdown() + + // Check Bitcoin RPC connection + { + start := time.Now() + logger.InfoContext(ctx, "Connecting to Bitcoin Core RPC Server...", slogx.String("host", conf.BitcoinNode.Host)) + if err := client.Ping(); err != nil { + logger.PanicContext(ctx, "Can't connect to Bitcoin Core RPC Server", slogx.String("host", conf.BitcoinNode.Host), slogx.Error(err)) + } + logger.InfoContext(ctx, "Connected to Bitcoin Core RPC Server", slog.Duration("latency", time.Since(start))) + } + + // TODO: create module command package. + // each module should have its own command package and main package will routing the command to the module command package. + + // TODO: refactor module name to specific type instead of string? + httpHandlers := make(map[string]HttpHandler, 0) + + var reportingClient *reportingclient.ReportingClient + if !conf.Reporting.Disabled { + reportingClient, err = reportingclient.New(conf.Reporting) + if err != nil { + if errors.Is(err, errs.InvalidArgument) { + logger.PanicContext(ctx, "Invalid reporting configuration", slogx.Error(err)) + } + logger.PanicContext(ctx, "Something went wrong, can't create reporting client", slogx.Error(err)) + } + } + + // Initialize Bitcoin Indexer + if opts.Bitcoin { + ctx := logger.WithContext(ctx, slogx.String("module", "bitcoin")) + var ( + btcDB btcdatagateway.BitcoinDataGateway + indexerInfoDB btcdatagateway.IndexerInformationDataGateway + ) + switch strings.ToLower(conf.Modules.Bitcoin.Database) { + case "postgresql", "postgres", "pg": + pg, err := postgres.NewPool(ctx, conf.Modules.Bitcoin.Postgres) + if err != nil { + if errors.Is(err, errs.InvalidArgument) { + logger.PanicContext(ctx, "Invalid Postgres configuration for indexer", slogx.Error(err)) + } + logger.PanicContext(ctx, "Something went wrong, can't create Postgres connection pool", slogx.Error(err)) + } + defer pg.Close() + repo := btcpostgres.NewRepository(pg) + btcDB = repo + indexerInfoDB = repo + default: + return errors.Wrapf(errs.Unsupported, "%q database for indexer is not supported", conf.Modules.Bitcoin.Database) + } + if !opts.APIOnly { + processor := bitcoin.NewProcessor(conf, btcDB, indexerInfoDB) + datasource := datasources.NewBitcoinNode(client) + indexer := indexers.NewBitcoinIndexer(processor, datasource) + defer func() { + if err := indexer.ShutdownWithTimeout(shutdownTimeout); err != nil { + logger.ErrorContext(ctx, "Error during shutdown indexer", slogx.Error(err)) + return + } + logger.InfoContext(ctx, "Indexer stopped gracefully") + }() + + // Verify states before running Indexer + if err := processor.VerifyStates(ctx); err != nil { + return errors.WithStack(err) + } + + // Run Indexer + go func() { + // stop main process if indexer stopped + defer stop() + + logger.InfoContext(ctx, "Starting Gaze Indexer") + if err := indexer.Run(ctxWorker); err != nil { + logger.PanicContext(ctx, "Something went wrong, error during running indexer", slogx.Error(err)) + } + }() + } + } + + // Initialize Runes Indexer + if opts.Runes { + ctx := logger.WithContext(ctx, slogx.String("module", "runes")) + var ( + runesDg runesdatagateway.RunesDataGateway + indexerInfoDg runesdatagateway.IndexerInfoDataGateway + ) + switch strings.ToLower(conf.Modules.Runes.Database) { + case "postgresql", "postgres", "pg": + pg, err := postgres.NewPool(ctx, conf.Modules.Runes.Postgres) + if err != nil { + if errors.Is(err, errs.InvalidArgument) { + logger.PanicContext(ctx, "Invalid Postgres configuration for indexer", slogx.Error(err)) + } + logger.PanicContext(ctx, "Something went wrong, can't create Postgres connection pool", slogx.Error(err)) + } + defer pg.Close() + runesRepo := runespostgres.NewRepository(pg) + runesDg = runesRepo + indexerInfoDg = runesRepo + default: + return errors.Wrapf(errs.Unsupported, "%q database for indexer is not supported", conf.Modules.Runes.Database) + } + var bitcoinDatasource indexers.BitcoinDatasource + var bitcoinClient btcclient.Contract + switch strings.ToLower(conf.Modules.Runes.Datasource) { + case "bitcoin-node": + bitcoinNodeDatasource := datasources.NewBitcoinNode(client) + bitcoinDatasource = bitcoinNodeDatasource + bitcoinClient = bitcoinNodeDatasource + case "database": + pg, err := postgres.NewPool(ctx, conf.Modules.Bitcoin.Postgres) + if err != nil { + if errors.Is(err, errs.InvalidArgument) { + logger.PanicContext(ctx, "Invalid Postgres configuration for datasource", slogx.Error(err)) + } + logger.PanicContext(ctx, "Something went wrong, can't create Postgres connection pool", slogx.Error(err)) + } + defer pg.Close() + btcRepo := btcpostgres.NewRepository(pg) + btcClientDB := btcclient.NewClientDatabase(btcRepo) + bitcoinDatasource = btcClientDB + bitcoinClient = btcClientDB + default: + return errors.Wrapf(errs.Unsupported, "%q datasource is not supported", conf.Modules.Runes.Datasource) + } + + if !opts.APIOnly { + processor := runes.NewProcessor(runesDg, indexerInfoDg, bitcoinClient, bitcoinDatasource, conf.Network, reportingClient) + indexer := indexers.NewBitcoinIndexer(processor, bitcoinDatasource) + defer func() { + if err := indexer.ShutdownWithTimeout(shutdownTimeout); err != nil { + logger.ErrorContext(ctx, "Error during shutdown indexer", slogx.Error(err)) + return + } + logger.InfoContext(ctx, "Indexer stopped gracefully") + }() + + if err := processor.VerifyStates(ctx); err != nil { + return errors.WithStack(err) + } + + // Run Indexer + go func() { + // stop main process if indexer stopped + defer stop() + + logger.InfoContext(ctx, "Starting Gaze Indexer") + if err := indexer.Run(ctxWorker); err != nil { + logger.PanicContext(ctx, "Something went wrong, error during running indexer", slogx.Error(err)) + } + }() + } + + // Mount API + apiHandlers := lo.Uniq(conf.Modules.Runes.APIHandlers) + for _, handler := range apiHandlers { + switch handler { // TODO: support more handlers (e.g. gRPC) + case "http": + runesUsecase := runesusecase.New(runesDg, bitcoinClient) + runesHTTPHandler := runesapi.NewHTTPHandler(conf.Network, runesUsecase) + httpHandlers["runes"] = runesHTTPHandler + default: + logger.PanicContext(ctx, "Something went wrong, unsupported API handler", slogx.String("handler", handler)) + } + } + } + + // Wait for interrupt signal to gracefully stop the server with + // Setup HTTP server if there are any HTTP handlers + if len(httpHandlers) > 0 { + app := fiber.New(fiber.Config{ + AppName: "Gaze Indexer", + ErrorHandler: errorhandler.NewHTTPErrorHandler(), + }) + app. + Use(fiberrecover.New(fiberrecover.Config{ + EnableStackTrace: true, + StackTraceHandler: func(c *fiber.Ctx, e interface{}) { + buf := make([]byte, 1024) // bufLen = 1024 + buf = buf[:runtime.Stack(buf, false)] + logger.ErrorContext(c.UserContext(), "Something went wrong, panic in http handler", slogx.Any("panic", e), slog.String("stacktrace", string(buf))) + }, + })). + Use(compress.New(compress.Config{ + Level: compress.LevelDefault, + })) + + defer func() { + if err := app.ShutdownWithTimeout(shutdownTimeout); err != nil { + logger.ErrorContext(ctx, "Error during shutdown HTTP server", slogx.Error(err)) + return + } + logger.InfoContext(ctx, "HTTP server stopped gracefully") + }() + + // Health check + app.Get("/", func(c *fiber.Ctx) error { + return errors.WithStack(c.SendStatus(http.StatusOK)) + }) + + // mount http handlers from each http-enabled module + for module, handler := range httpHandlers { + if err := handler.Mount(app); err != nil { + logger.PanicContext(ctx, "Something went wrong, can't mount HTTP handler", slogx.Error(err), slogx.String("module", module)) + } + logger.InfoContext(ctx, "Mounted HTTP handler", slogx.String("module", module)) + } + + go func() { + // stop main process if API stopped + defer stop() + + logger.InfoContext(ctx, "Started HTTP server", slog.Int("port", conf.HTTPServer.Port)) + if err := app.Listen(fmt.Sprintf(":%d", conf.HTTPServer.Port)); err != nil { + logger.PanicContext(ctx, "Something went wrong, error during running HTTP server", slogx.Error(err)) + } + }() + } + + // Stop application if worker context is done + go func() { + <-ctxWorker.Done() + defer stop() + + logger.InfoContext(ctx, "Gaze Indexer Worker is stopped. Stopping application...") + }() + + logger.InfoContext(ctxWorker, "Gaze Indexer started") + + // Wait for interrupt signal to gracefully stop the server + <-ctx.Done() + + // Force shutdown if timeout exceeded or got signal again + go func() { + defer os.Exit(1) + + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + defer stop() + + select { + case <-ctx.Done(): + logger.FatalContext(ctx, "Received exit signal again. Force shutdown...") + case <-time.After(shutdownTimeout + 15*time.Second): + logger.FatalContext(ctx, "Shutdown timeout exceeded. Force shutdown...") + } + }() + + return nil +} diff --git a/cmd/cmd_version.go b/cmd/cmd_version.go new file mode 100644 index 0000000..77fe664 --- /dev/null +++ b/cmd/cmd_version.go @@ -0,0 +1,49 @@ +package cmd + +import ( + "fmt" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/constants" + "github.com/gaze-network/indexer-network/modules/bitcoin" + "github.com/gaze-network/indexer-network/modules/runes" + "github.com/spf13/cobra" +) + +var versions = map[string]string{ + "": constants.Version, + "bitcoin": bitcoin.Version, + "runes": runes.Version, +} + +type versionCmdOptions struct { + Modules string +} + +func NewVersionCommand() *cobra.Command { + opts := &versionCmdOptions{} + + cmd := &cobra.Command{ + Use: "version", + Short: "Show indexer-network version", + RunE: func(cmd *cobra.Command, args []string) error { + return versionHandler(opts, cmd, args) + }, + } + + flags := cmd.Flags() + flags.StringVar(&opts.Modules, "module", "", `Show version of a specific module. E.g. "bitcoin" | "runes"`) + + return cmd +} + +func versionHandler(opts *versionCmdOptions, _ *cobra.Command, _ []string) error { + version, ok := versions[opts.Modules] + if !ok { + // fmt.Fprintln(cmd.ErrOrStderr(), "Unknown module") + return errors.Wrap(errs.Unsupported, "Invalid module name") + } + fmt.Println(version) + return nil +} diff --git a/cmd/migrate/cmd_down.go b/cmd/migrate/cmd_down.go new file mode 100644 index 0000000..af4a503 --- /dev/null +++ b/cmd/migrate/cmd_down.go @@ -0,0 +1,132 @@ +package migrate + +import ( + "fmt" + "net/url" + "strconv" + "strings" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/golang-migrate/migrate/v4" + _ "github.com/golang-migrate/migrate/v4/database/postgres" + _ "github.com/golang-migrate/migrate/v4/source/file" + "github.com/samber/lo" + "github.com/spf13/cobra" +) + +type migrateDownCmdOptions struct { + DatabaseURL string + Bitcoin bool + Runes bool + All bool +} + +type migrateDownCmdArgs struct { + N int +} + +func (a *migrateDownCmdArgs) ParseArgs(args []string) error { + if len(args) > 0 { + // assume args already validated by cobra to be len(args) <= 1 + n, err := strconv.Atoi(args[0]) + if err != nil { + return errors.Wrap(err, "failed to parse N") + } + if n < 0 { + return errors.New("N must be a positive integer") + } + a.N = n + } + return nil +} + +func NewMigrateDownCommand() *cobra.Command { + opts := &migrateDownCmdOptions{} + + cmd := &cobra.Command{ + Use: "down [N]", + Short: "Apply all or N down migrations", + Args: cobra.MaximumNArgs(1), + Example: `gaze migrate down --database "postgres://postgres:postgres@localhost:5432/gaze-indexer?sslmode=disable"`, + RunE: func(cmd *cobra.Command, args []string) error { + // args already validated by cobra + var downArgs migrateDownCmdArgs + if err := downArgs.ParseArgs(args); err != nil { + return errors.Wrap(err, "failed to parse args") + } + return migrateDownHandler(opts, cmd, downArgs) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.Bitcoin, "bitcoin", false, "Apply Bitcoin down migrations") + flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes down migrations") + flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on") + flags.BoolVar(&opts.All, "all", false, "Confirm apply ALL down migrations without prompt") + + return cmd +} + +func migrateDownHandler(opts *migrateDownCmdOptions, _ *cobra.Command, args migrateDownCmdArgs) error { + if opts.DatabaseURL == "" { + return errors.New("--database is required") + } + databaseURL, err := url.Parse(opts.DatabaseURL) + if err != nil { + return errors.Wrap(err, "failed to parse database URL") + } + if _, ok := supportedDrivers[databaseURL.Scheme]; !ok { + return errors.Errorf("unsupported database driver: %s", databaseURL.Scheme) + } + // prevent accidental down all migrations + if args.N == 0 && !opts.All { + input := "" + fmt.Print("Are you sure you want to apply all down migrations? (y/N):") + fmt.Scanln(&input) + if !lo.Contains([]string{"y", "yes"}, strings.ToLower(input)) { + return nil + } + } + + applyDownMigrations := func(module string, sourcePath string, migrationTable string) error { + newDatabaseURL := cloneURLWithQuery(databaseURL, url.Values{"x-migrations-table": {migrationTable}}) + sourceURL := "file://" + sourcePath + m, err := migrate.New(sourceURL, newDatabaseURL.String()) + if err != nil { + if strings.Contains(err.Error(), "no such file or directory") { + return errors.Wrap(errs.InternalError, "migrations directory not found") + } + return errors.Wrap(err, "failed to open database") + } + m.Log = &consoleLogger{ + prefix: fmt.Sprintf("[%s] ", module), + } + if args.N == 0 { + m.Log.Printf("Applying down migrations...\n") + err = m.Down() + } else { + m.Log.Printf("Applying %d down migrations...\n", args.N) + err = m.Steps(-args.N) + } + if err != nil { + if !errors.Is(err, migrate.ErrNoChange) { + return errors.Wrapf(err, "failed to apply %s down migrations", module) + } + m.Log.Printf("No more down migrations to apply\n") + } + return nil + } + + if opts.Bitcoin { + if err := applyDownMigrations("Bitcoin", bitcoinMigrationSource, "bitcoin_schema_migrations"); err != nil { + return errors.WithStack(err) + } + } + if opts.Runes { + if err := applyDownMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil { + return errors.WithStack(err) + } + } + return nil +} diff --git a/cmd/migrate/cmd_up.go b/cmd/migrate/cmd_up.go new file mode 100644 index 0000000..ce2985c --- /dev/null +++ b/cmd/migrate/cmd_up.go @@ -0,0 +1,117 @@ +package migrate + +import ( + "fmt" + "net/url" + "strconv" + "strings" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/golang-migrate/migrate/v4" + _ "github.com/golang-migrate/migrate/v4/database/postgres" + _ "github.com/golang-migrate/migrate/v4/source/file" + "github.com/spf13/cobra" +) + +type migrateUpCmdOptions struct { + DatabaseURL string + Bitcoin bool + Runes bool +} + +type migrateUpCmdArgs struct { + N int +} + +func (a *migrateUpCmdArgs) ParseArgs(args []string) error { + if len(args) > 0 { + // assume args already validated by cobra to be len(args) <= 1 + n, err := strconv.Atoi(args[0]) + if err != nil { + return errors.Wrap(err, "failed to parse N") + } + a.N = n + } + return nil +} + +func NewMigrateUpCommand() *cobra.Command { + opts := &migrateUpCmdOptions{} + + cmd := &cobra.Command{ + Use: "up [N]", + Short: "Apply all or N up migrations", + Args: cobra.MaximumNArgs(1), + Example: `gaze migrate up --database "postgres://postgres:postgres@localhost:5432/gaze-indexer?sslmode=disable"`, + RunE: func(cmd *cobra.Command, args []string) error { + // args already validated by cobra + var upArgs migrateUpCmdArgs + if err := upArgs.ParseArgs(args); err != nil { + return errors.Wrap(err, "failed to parse args") + } + return migrateUpHandler(opts, cmd, upArgs) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&opts.Bitcoin, "bitcoin", false, "Apply Bitcoin up migrations") + flags.BoolVar(&opts.Runes, "runes", false, "Apply Runes up migrations") + flags.StringVar(&opts.DatabaseURL, "database", "", "Database url to run migration on") + + return cmd +} + +func migrateUpHandler(opts *migrateUpCmdOptions, _ *cobra.Command, args migrateUpCmdArgs) error { + if opts.DatabaseURL == "" { + return errors.New("--database is required") + } + databaseURL, err := url.Parse(opts.DatabaseURL) + if err != nil { + return errors.Wrap(err, "failed to parse database URL") + } + if _, ok := supportedDrivers[databaseURL.Scheme]; !ok { + return errors.Errorf("unsupported database driver: %s", databaseURL.Scheme) + } + + applyUpMigrations := func(module string, sourcePath string, migrationTable string) error { + newDatabaseURL := cloneURLWithQuery(databaseURL, url.Values{"x-migrations-table": {migrationTable}}) + sourceURL := "file://" + sourcePath + m, err := migrate.New(sourceURL, newDatabaseURL.String()) + if err != nil { + if strings.Contains(err.Error(), "no such file or directory") { + return errors.Wrap(errs.InternalError, "migrations directory not found") + } + return errors.Wrap(err, "failed to open database") + } + m.Log = &consoleLogger{ + prefix: fmt.Sprintf("[%s] ", module), + } + if args.N == 0 { + m.Log.Printf("Applying up migrations...\n") + err = m.Up() + } else { + m.Log.Printf("Applying %d up migrations...\n", args.N) + err = m.Steps(args.N) + } + if err != nil { + if !errors.Is(err, migrate.ErrNoChange) { + return errors.Wrapf(err, "failed to apply %s up migrations", module) + } + m.Log.Printf("Migrations already up-to-date\n") + } + return nil + } + + if opts.Bitcoin { + if err := applyUpMigrations("Bitcoin", bitcoinMigrationSource, "bitcoin_schema_migrations"); err != nil { + return errors.WithStack(err) + } + } + if opts.Runes { + if err := applyUpMigrations("Runes", runesMigrationSource, "runes_schema_migrations"); err != nil { + return errors.WithStack(err) + } + } + return nil +} diff --git a/cmd/migrate/logger.go b/cmd/migrate/logger.go new file mode 100644 index 0000000..371c4cb --- /dev/null +++ b/cmd/migrate/logger.go @@ -0,0 +1,22 @@ +package migrate + +import ( + "fmt" + + "github.com/golang-migrate/migrate/v4" +) + +var _ migrate.Logger = (*consoleLogger)(nil) + +type consoleLogger struct { + prefix string + verbose bool +} + +func (l *consoleLogger) Printf(format string, v ...interface{}) { + fmt.Printf(l.prefix+format, v...) +} + +func (l *consoleLogger) Verbose() bool { + return l.verbose +} diff --git a/cmd/migrate/migrate.go b/cmd/migrate/migrate.go new file mode 100644 index 0000000..7ea32e9 --- /dev/null +++ b/cmd/migrate/migrate.go @@ -0,0 +1,25 @@ +package migrate + +import "net/url" + +const ( + bitcoinMigrationSource = "modules/bitcoin/database/postgresql/migrations" + runesMigrationSource = "modules/runes/database/postgresql/migrations" +) + +func cloneURLWithQuery(u *url.URL, newQuery url.Values) *url.URL { + clone := *u + query := clone.Query() + for key, values := range newQuery { + for _, value := range values { + query.Add(key, value) + } + } + clone.RawQuery = query.Encode() + return &clone +} + +var supportedDrivers = map[string]struct{}{ + "postgres": {}, + "postgresql": {}, +} diff --git a/common/.gitkeep b/common/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/common/bitcoin.go b/common/bitcoin.go new file mode 100644 index 0000000..827ce07 --- /dev/null +++ b/common/bitcoin.go @@ -0,0 +1,4 @@ +package common + +// HalvingInterval is the number of blocks between each halving event. +const HalvingInterval = 210_000 diff --git a/common/errs/errs.go b/common/errs/errs.go new file mode 100644 index 0000000..40a4e33 --- /dev/null +++ b/common/errs/errs.go @@ -0,0 +1,99 @@ +package errs + +import ( + "github.com/cockroachdb/errors" +) + +// set depth to 10 to skip runtime stacks and current file. +const depth = 10 + +// Common Application Errors +var ( + // NotFound is returned when a resource is not found + NotFound = errors.NewWithDepth(depth, "not found") + + // InternalError is returned when internal logic got error + InternalError = errors.NewWithDepth(depth, "internal error") + + // SomethingWentWrong is returned when got some bug or unexpected case + // + // inherited error from InternalError, + // so errors.Is(err, InternalError) == true + SomethingWentWrong = errors.WrapWithDepth(depth, InternalError, "something went wrong") + + // Skippable is returned when got an error but it can be skipped or ignored and continue + Skippable = errors.NewWithDepth(depth, "skippable") + + // Unsupported is returned when a feature or result is not supported + Unsupported = errors.NewWithDepth(depth, "unsupported") + + // NotSupported is returned when a feature or result is not supported + // alias of Unsupported + NotSupported = Unsupported + + // Unauthorized is returned when a request is unauthorized + Unauthorized = errors.NewWithDepth(depth, "unauthorized") + + // Timeout is returned when a connection to a resource timed out + Timeout = errors.NewWithDepth(depth, "timeout") + + // BadRequest is returned when a request is invalid + BadRequest = errors.NewWithDepth(depth, "bad request") + + // InvalidArgument is returned when an argument is invalid + // + // inherited error from BadRequest, + // so errors.Is(err, BadRequest) == true + InvalidArgument = errors.WrapWithDepth(depth, BadRequest, "invalid argument") + + // ArgumentRequired is returned when an argument is required + // + // inherited error from BadRequest, + // so errors.Is(err, BadRequest) == true + ArgumentRequired = errors.WrapWithDepth(depth, BadRequest, "argument required") + + // Duplicate is returned when a resource already exists + Duplicate = errors.NewWithDepth(depth, "duplicate") + + // Unimplemented is returned when a feature or method is not implemented + // + // inherited error from Unsupported, + // so errors.Is(err, Unsupported) == true + Unimplemented = errors.WrapWithDepth(depth, Unsupported, "unimplemented") +) + +// Business Logic errors +var ( + // Overflow is returned when an overflow error occurs + // + // inherited error from InternalError, + // so errors.Is(err, InternalError) == true + Overflow = errors.WrapWithDepth(depth, InternalError, "overflow") + + // OverflowUint64 is returned when an uint64 overflow error occurs + // + // inherited error from Overflow, + // so errors.Is(err, Overflow) == true + OverflowUint32 = errors.WrapWithDepth(depth, Overflow, "overflow uint32") + + // OverflowUint64 is returned when an uint64 overflow error occurs + // + // inherited error from Overflow, + // so errors.Is(err, Overflow) == true + OverflowUint64 = errors.WrapWithDepth(depth, Overflow, "overflow uint64") + + // OverflowUint128 is returned when an uint128 overflow error occurs + // + // inherited error from Overflow, + // so errors.Is(err, Overflow) == true + OverflowUint128 = errors.WrapWithDepth(depth, Overflow, "overflow uint128") + + // InvalidState is returned when a state is invalid + InvalidState = errors.NewWithDepth(depth, "invalid state") + + // ConflictSetting is returned when an indexer setting is conflicted + ConflictSetting = errors.NewWithDepth(depth, "conflict setting") + + // Closed is returned when a resource is closed + Closed = errors.NewWithDepth(depth, "closed") +) diff --git a/common/errs/public_errs.go b/common/errs/public_errs.go new file mode 100644 index 0000000..8eac68e --- /dev/null +++ b/common/errs/public_errs.go @@ -0,0 +1,43 @@ +package errs + +import ( + "fmt" + + "github.com/cockroachdb/errors" + "github.com/cockroachdb/errors/withstack" +) + +// PublicError is an error that, when caught by error handler, should return a user-friendly error response to the user. Responses vary between each protocol (http, grpc, etc.). +type PublicError struct { + err error + message string +} + +func (p PublicError) Error() string { + return p.err.Error() +} + +func (p PublicError) Message() string { + return p.message +} + +func (p PublicError) Unwrap() error { + return p.err +} + +func NewPublicError(message string) error { + return withstack.WithStackDepth(&PublicError{err: errors.New(message), message: message}, 1) +} + +func WithPublicMessage(err error, prefix string) error { + if err == nil { + return nil + } + var message string + if prefix != "" { + message = fmt.Sprintf("%s: %s", prefix, err.Error()) + } else { + message = err.Error() + } + return withstack.WithStackDepth(&PublicError{err: err, message: message}, 1) +} diff --git a/common/hash.go b/common/hash.go new file mode 100644 index 0000000..10c8760 --- /dev/null +++ b/common/hash.go @@ -0,0 +1,12 @@ +package common + +import ( + "github.com/Cleverse/go-utilities/utils" + "github.com/btcsuite/btcd/chaincfg/chainhash" +) + +// Zero value of chainhash.Hash +var ( + ZeroHash = *utils.Must(chainhash.NewHashFromStr("0000000000000000000000000000000000000000000000000000000000000000")) + NullHash = ZeroHash +) diff --git a/common/network.go b/common/network.go new file mode 100644 index 0000000..4710b0d --- /dev/null +++ b/common/network.go @@ -0,0 +1,33 @@ +package common + +import "github.com/btcsuite/btcd/chaincfg" + +type Network string + +const ( + NetworkMainnet Network = "mainnet" + NetworkTestnet Network = "testnet" +) + +var supportedNetworks = map[Network]struct{}{ + NetworkMainnet: {}, + NetworkTestnet: {}, +} + +var chainParams = map[Network]*chaincfg.Params{ + NetworkMainnet: &chaincfg.MainNetParams, + NetworkTestnet: &chaincfg.TestNet3Params, +} + +func (n Network) IsSupported() bool { + _, ok := supportedNetworks[n] + return ok +} + +func (n Network) ChainParams() *chaincfg.Params { + return chainParams[n] +} + +func (n Network) String() string { + return string(n) +} diff --git a/config.example.yaml b/config.example.yaml new file mode 100644 index 0000000..8c2aef8 --- /dev/null +++ b/config.example.yaml @@ -0,0 +1,52 @@ +logger: + output: TEXT # Output format for logs. current supported formats: "TEXT" | "JSON" | "GCP" + debug: false + +# Network to run the indexer on. Current supported networks: "mainnet" | "testnet" +network: mainnet + +# Bitcoin Core RPC configuration options. +bitcoin_node: + host: "" # [Required] Host of Bitcoin Core RPC (without https://) + user: "" # Username to authenticate with Bitcoin Core RPC + pass: "" # Password to authenticate with Bitcoin Core RPC + disable_tls: false # Set to true to disable tls + +# Block reporting configuration options. See Block Reporting section for more details. +reporting: + disabled: false # Set to true to disable block reporting to Gaze Network. Default is false. + base_url: "https://indexer.api.gaze.network" # Defaults to "https://indexer.api.gaze.network" if left empty + name: "" # [Required if not disabled] Name of this indexer to show on the Gaze Network dashboard + website_url: "" # Public website URL to show on the dashboard. Can be left empty. + indexer_api_url: "" # Public url to access this indexer's API. Can be left empty if you want to keep your indexer private. + +# HTTP server configuration options. +http_server: + port: 8080 # Port to run the HTTP server on for modules with HTTP API handlers. + +# Meta-protocol modules configuration options. +modules: + # Configuration options for Bitcoin module. Can be removed if not used. + bitcoin: + database: "postgres" # Database to store bitcoin data. current supported databases: "postgres" + postgres: + host: "localhost" + port: 5432 + user: "postgres" + password: "password" + db_name: "postgres" + # url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above. + + # Configuration options for Runes module. Can be removed if not used. + runes: + database: "postgres" # Database to store Runes data. current supported databases: "postgres" + datasource: "database" # Data source to be used for Bitcoin data. current supported data sources: "bitcoin-node" | "database". If "database" is used, it will use the database config in bitcoin module as datasource. + api_handlers: # API handlers to enable. current supported handlers: "http" + - http + postgres: + host: "localhost" + port: 5432 + user: "postgres" + password: "password" + db_name: "postgres" + # url: "postgres://postgres:password@localhost:5432/postgres?sslmode=prefer" # [Optional] This will override other database credentials above. diff --git a/core/constants/constants.go b/core/constants/constants.go new file mode 100644 index 0000000..0526759 --- /dev/null +++ b/core/constants/constants.go @@ -0,0 +1,5 @@ +package constants + +const ( + Version = "v0.0.1" +) diff --git a/core/datasources/bitcoin_node.go b/core/datasources/bitcoin_node.go new file mode 100644 index 0000000..7dd43c7 --- /dev/null +++ b/core/datasources/bitcoin_node.go @@ -0,0 +1,294 @@ +package datasources + +import ( + "bytes" + "context" + "encoding/hex" + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/rpcclient" + "github.com/btcsuite/btcd/wire" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/internal/subscription" + "github.com/gaze-network/indexer-network/pkg/logger" + "github.com/gaze-network/indexer-network/pkg/logger/slogx" + cstream "github.com/planxnx/concurrent-stream" + "github.com/samber/lo" +) + +const ( + blockStreamChunkSize = 5 +) + +// Make sure to implement the BitcoinDatasource interface +var _ Datasource[[]*types.Block] = (*BitcoinNodeDatasource)(nil) + +// BitcoinNodeDatasource fetch data from Bitcoin node for Bitcoin Indexer +type BitcoinNodeDatasource struct { + btcclient *rpcclient.Client +} + +// NewBitcoinNode create new BitcoinNodeDatasource with Bitcoin Core RPC Client +func NewBitcoinNode(btcclient *rpcclient.Client) *BitcoinNodeDatasource { + return &BitcoinNodeDatasource{ + btcclient: btcclient, + } +} + +func (p BitcoinNodeDatasource) Name() string { + return "bitcoin_node" +} + +// Fetch polling blocks from Bitcoin node +// +// - from: block height to start fetching, if -1, it will start from genesis block +// - to: block height to stop fetching, if -1, it will fetch until the latest block +func (d *BitcoinNodeDatasource) Fetch(ctx context.Context, from, to int64) ([]*types.Block, error) { + ch := make(chan []*types.Block) + subscription, err := d.FetchAsync(ctx, from, to, ch) + if err != nil { + return nil, errors.WithStack(err) + } + defer subscription.Unsubscribe() + + blocks := make([]*types.Block, 0) + for { + select { + case b, ok := <-ch: + if !ok { + return blocks, nil + } + blocks = append(blocks, b...) + case <-subscription.Done(): + if err := ctx.Err(); err != nil { + return nil, errors.Wrap(err, "context done") + } + return blocks, nil + case err := <-subscription.Err(): + if err != nil { + return nil, errors.Wrap(err, "got error while fetch async") + } + return blocks, nil + case <-ctx.Done(): + return nil, errors.Wrap(ctx.Err(), "context done") + } + } +} + +// FetchAsync polling blocks from Bitcoin node asynchronously (non-blocking) +// +// - from: block height to start fetching, if -1, it will start from genesis block +// - to: block height to stop fetching, if -1, it will fetch until the latest block +func (d *BitcoinNodeDatasource) FetchAsync(ctx context.Context, from, to int64, ch chan<- []*types.Block) (*subscription.ClientSubscription[[]*types.Block], error) { + ctx = logger.WithContext(ctx, + slogx.String("package", "datasources"), + slogx.String("datasource", d.Name()), + ) + + from, to, skip, err := d.prepareRange(from, to) + if err != nil { + return nil, errors.Wrap(err, "failed to prepare fetch range") + } + + subscription := subscription.NewSubscription(ch) + if skip { + if err := subscription.UnsubscribeWithContext(ctx); err != nil { + return nil, errors.Wrap(err, "failed to unsubscribe") + } + return subscription.Client(), nil + } + + // Create parallel stream + out := make(chan []*types.Block) + stream := cstream.NewStream(ctx, 8, out) + + // create slice of block height to fetch + blockHeights := make([]int64, 0, to-from+1) + for i := from; i <= to; i++ { + blockHeights = append(blockHeights, i) + } + + // Wait for stream to finish and close out channel + go func() { + defer close(out) + _ = stream.Wait() + }() + + // Fan-out blocks to subscription channel + go func() { + defer func() { + // add a bit delay to prevent shutdown before client receive all blocks + time.Sleep(100 * time.Millisecond) + + subscription.Unsubscribe() + }() + for { + select { + case data, ok := <-out: + // stream closed + if !ok { + return + } + + // empty blocks + if len(data) == 0 { + continue + } + + // send blocks to subscription channel + if err := subscription.Send(ctx, data); err != nil { + if errors.Is(err, errs.Closed) { + return + } + logger.WarnContext(ctx, "Failed to send bitcoin blocks to subscription client", + slogx.Int64("start", data[0].Header.Height), + slogx.Int64("end", data[len(data)-1].Header.Height), + slogx.Error(err), + ) + } + case <-ctx.Done(): + return + } + } + }() + + // Parallel fetch blocks from Bitcoin node until complete all block heights + // or subscription is done. + go func() { + defer stream.Close() + done := subscription.Done() + chunks := lo.Chunk(blockHeights, blockStreamChunkSize) + for _, chunk := range chunks { + // TODO: Implement throttling logic to control the rate of fetching blocks (block/sec) + chunk := chunk + select { + case <-done: + return + case <-ctx.Done(): + return + default: + stream.Go(func() []*types.Block { + startAt := time.Now() + defer func() { + logger.DebugContext(ctx, "Fetched chunk of blocks from Bitcoin node", + slogx.Int("total_blocks", len(chunk)), + slogx.Int64("from", chunk[0]), + slogx.Int64("to", chunk[len(chunk)-1]), + slogx.Duration("duration", time.Since(startAt)), + ) + }() + // TODO: should concurrent fetch block or not ? + blocks := make([]*types.Block, 0, len(chunk)) + for _, height := range chunk { + hash, err := d.btcclient.GetBlockHash(height) + if err != nil { + logger.ErrorContext(ctx, "Can't get block hash from Bitcoin node rpc", slogx.Error(err), slogx.Int64("height", height)) + if err := subscription.SendError(ctx, errors.Wrapf(err, "failed to get block hash: height: %d", height)); err != nil { + logger.WarnContext(ctx, "Failed to send datasource error to subscription client", slogx.Error(err)) + } + return nil + } + + block, err := d.btcclient.GetBlock(hash) + if err != nil { + logger.ErrorContext(ctx, "Can't get block data from Bitcoin node rpc", slogx.Error(err), slogx.Int64("height", height)) + if err := subscription.SendError(ctx, errors.Wrapf(err, "failed to get block: height: %d, hash: %s", height, hash)); err != nil { + logger.WarnContext(ctx, "Failed to send datasource error to subscription client", slogx.Error(err)) + } + return nil + } + + blocks = append(blocks, types.ParseMsgBlock(block, height)) + } + return blocks + }) + } + } + }() + + return subscription.Client(), nil +} + +func (d *BitcoinNodeDatasource) prepareRange(fromHeight, toHeight int64) (start, end int64, skip bool, err error) { + start = fromHeight + end = toHeight + + // get current bitcoin block height + latestBlockHeight, err := d.btcclient.GetBlockCount() + if err != nil { + return -1, -1, false, errors.Wrap(err, "failed to get block count") + } + + // set start to genesis block height + if start < 0 { + start = 0 + } + + // set end to current bitcoin block height if + // - end is -1 + // - end is greater that current bitcoin block height + if end < 0 || end > latestBlockHeight { + end = latestBlockHeight + } + + // if start is greater than end, skip this round + if start > end { + return -1, -1, true, nil + } + + return start, end, false, nil +} + +// GetTransaction fetch transaction from Bitcoin node +func (d *BitcoinNodeDatasource) GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error) { + rawTxVerbose, err := d.btcclient.GetRawTransactionVerbose(&txHash) + if err != nil { + return nil, errors.Wrap(err, "failed to get raw transaction") + } + + blockHash, err := chainhash.NewHashFromStr(rawTxVerbose.BlockHash) + if err != nil { + return nil, errors.Wrap(err, "failed to parse block hash") + } + block, err := d.btcclient.GetBlockVerboseTx(blockHash) + if err != nil { + return nil, errors.Wrap(err, "failed to get block header") + } + + // parse tx + txBytes, err := hex.DecodeString(rawTxVerbose.Hex) + if err != nil { + return nil, errors.Wrap(err, "failed to decode transaction hex") + } + var msgTx wire.MsgTx + if err := msgTx.Deserialize(bytes.NewReader(txBytes)); err != nil { + return nil, errors.Wrap(err, "failed to deserialize transaction") + } + var txIndex uint32 + for i, tx := range block.Tx { + if tx.Hex == rawTxVerbose.Hex { + txIndex = uint32(i) + break + } + } + + return types.ParseMsgTx(&msgTx, block.Height, *blockHash, txIndex), nil +} + +// GetBlockHeader fetch block header from Bitcoin node +func (d *BitcoinNodeDatasource) GetBlockHeader(ctx context.Context, height int64) (types.BlockHeader, error) { + hash, err := d.btcclient.GetBlockHash(height) + if err != nil { + return types.BlockHeader{}, errors.Wrap(err, "failed to get block hash") + } + + block, err := d.btcclient.GetBlockHeader(hash) + if err != nil { + return types.BlockHeader{}, errors.Wrap(err, "failed to get block header") + } + + return types.ParseMsgBlockHeader(*block, height), nil +} diff --git a/core/datasources/datasources.go b/core/datasources/datasources.go new file mode 100644 index 0000000..ff2f0d4 --- /dev/null +++ b/core/datasources/datasources.go @@ -0,0 +1,16 @@ +package datasources + +import ( + "context" + + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/internal/subscription" +) + +// Datasource is an interface for indexer data sources. +type Datasource[T any] interface { + Name() string + Fetch(ctx context.Context, from, to int64) (T, error) + FetchAsync(ctx context.Context, from, to int64, ch chan<- T) (*subscription.ClientSubscription[T], error) + GetBlockHeader(ctx context.Context, height int64) (types.BlockHeader, error) +} diff --git a/core/indexers/bitcoin_indexer.go b/core/indexers/bitcoin_indexer.go new file mode 100644 index 0000000..2ea99e6 --- /dev/null +++ b/core/indexers/bitcoin_indexer.go @@ -0,0 +1,257 @@ +package indexers + +import ( + "context" + "log/slog" + "sync" + "time" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/datasources" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/pkg/logger" + "github.com/gaze-network/indexer-network/pkg/logger/slogx" +) + +const ( + maxReorgLookBack = 1000 +) + +type ( + BitcoinProcessor Processor[[]*types.Block] + BitcoinDatasource datasources.Datasource[[]*types.Block] +) + +// Make sure to implement the IndexerWorker interface +var _ IndexerWorker = (*BitcoinIndexer)(nil) + +// BitcoinIndexer is the polling indexer for sync Bitcoin data to the database. +type BitcoinIndexer struct { + Processor BitcoinProcessor + Datasource BitcoinDatasource + currentBlock types.BlockHeader + + quitOnce sync.Once + quit chan struct{} + done chan struct{} +} + +// NewBitcoinIndexer create new BitcoinIndexer +func NewBitcoinIndexer(processor BitcoinProcessor, datasource BitcoinDatasource) *BitcoinIndexer { + return &BitcoinIndexer{ + Processor: processor, + Datasource: datasource, + + quit: make(chan struct{}), + done: make(chan struct{}), + } +} + +func (*BitcoinIndexer) Type() string { + return "bitcoin" +} + +func (i *BitcoinIndexer) Shutdown() error { + return i.ShutdownWithContext(context.Background()) +} + +func (i *BitcoinIndexer) ShutdownWithTimeout(timeout time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return i.ShutdownWithContext(ctx) +} + +func (i *BitcoinIndexer) ShutdownWithContext(ctx context.Context) (err error) { + i.quitOnce.Do(func() { + close(i.quit) + select { + case <-i.done: + case <-time.After(180 * time.Second): + err = errors.Wrap(errs.Timeout, "indexer shutdown timeout") + case <-ctx.Done(): + err = errors.Wrap(ctx.Err(), "indexer shutdown context canceled") + } + }) + return +} + +func (i *BitcoinIndexer) Run(ctx context.Context) (err error) { + defer close(i.done) + + ctx = logger.WithContext(ctx, + slog.String("package", "indexers"), + slog.String("indexer", i.Type()), + slog.String("processor", i.Processor.Name()), + slog.String("datasource", i.Datasource.Name()), + ) + + // set to -1 to start from genesis block + i.currentBlock, err = i.Processor.CurrentBlock(ctx) + if err != nil { + if !errors.Is(err, errs.NotFound) { + return errors.Wrap(err, "can't init state, failed to get indexer current block") + } + i.currentBlock.Height = -1 + } + + ticker := time.NewTicker(pollingInterval) + defer ticker.Stop() + for { + select { + case <-i.quit: + logger.InfoContext(ctx, "Got quit signal, stopping indexer") + return nil + case <-ctx.Done(): + return nil + case <-ticker.C: + if err := i.process(ctx); err != nil { + logger.ErrorContext(ctx, "Indexer failed while processing", slogx.Error(err)) + return errors.Wrap(err, "process failed") + } + logger.DebugContext(ctx, "Waiting for next polling interval") + } + } +} + +func (i *BitcoinIndexer) process(ctx context.Context) (err error) { + // height range to fetch data + from, to := i.currentBlock.Height+1, int64(-1) + + logger.InfoContext(ctx, "Start fetching bitcoin blocks", slog.Int64("from", from)) + ch := make(chan []*types.Block) + subscription, err := i.Datasource.FetchAsync(ctx, from, to, ch) + if err != nil { + return errors.Wrap(err, "failed to fetch data") + } + defer subscription.Unsubscribe() + + for { + select { + case <-i.quit: + return nil + case blocks := <-ch: + // empty blocks + if len(blocks) == 0 { + continue + } + + startAt := time.Now() + ctx := logger.WithContext(ctx, + slogx.Int64("from", blocks[0].Header.Height), + slogx.Int64("to", blocks[len(blocks)-1].Header.Height), + ) + + // validate reorg from first block + { + remoteBlockHeader := blocks[0].Header + if !remoteBlockHeader.PrevBlock.IsEqual(&i.currentBlock.Hash) { + logger.WarnContext(ctx, "Detected chain reorganization. Searching for fork point...", + slogx.String("event", "reorg_detected"), + slogx.Stringer("current_hash", i.currentBlock.Hash), + slogx.Stringer("expected_hash", remoteBlockHeader.PrevBlock), + ) + + var ( + start = time.Now() + targetHeight = i.currentBlock.Height - 1 + beforeReorgBlockHeader = types.BlockHeader{ + Height: -1, + } + ) + for n := 0; n < maxReorgLookBack; n++ { + // TODO: concurrent fetch + indexedHeader, err := i.Processor.GetIndexedBlock(ctx, targetHeight) + if err != nil { + return errors.Wrapf(err, "failed to get indexed block, height: %d", targetHeight) + } + + remoteHeader, err := i.Datasource.GetBlockHeader(ctx, targetHeight) + if err != nil { + return errors.Wrapf(err, "failed to get remote block header, height: %d", targetHeight) + } + + // Found no reorg block + if indexedHeader.Hash.IsEqual(&remoteHeader.Hash) { + beforeReorgBlockHeader = remoteHeader + break + } + + // Walk back to find fork point + targetHeight -= 1 + } + + // Reorg look back limit reached + if beforeReorgBlockHeader.Height < 0 { + return errors.Wrap(errs.SomethingWentWrong, "reorg look back limit reached") + } + + logger.InfoContext(ctx, "Found reorg fork point, starting to revert data...", + slogx.String("event", "reorg_forkpoint"), + slogx.Int64("since", beforeReorgBlockHeader.Height+1), + slogx.Int64("total_blocks", i.currentBlock.Height-beforeReorgBlockHeader.Height), + slogx.Duration("search_duration", time.Since(start)), + ) + + // Revert all data since the reorg block + start = time.Now() + if err := i.Processor.RevertData(ctx, beforeReorgBlockHeader.Height+1); err != nil { + return errors.Wrap(err, "failed to revert data") + } + + // Set current block to before reorg block and + // end current round to fetch again + i.currentBlock = beforeReorgBlockHeader + logger.Info("Fixing chain reorganization completed", + slogx.Int64("current_block", i.currentBlock.Height), + slogx.Duration("duration", time.Since(start)), + ) + return nil + } + } + + // validate is block is continuous and no reorg + for i := 1; i < len(blocks); i++ { + if blocks[i].Header.Height != blocks[i-1].Header.Height+1 { + return errors.Wrapf(errs.InternalError, "block is not continuous, block[%d] height: %d, block[%d] height: %d", i-1, blocks[i-1].Header.Height, i, blocks[i].Header.Height) + } + + if !blocks[i].Header.PrevBlock.IsEqual(&blocks[i-1].Header.Hash) { + logger.WarnContext(ctx, "Chain Reorganization occurred in the middle of batch fetching blocks, need to try to fetch again") + + // end current round + return nil + } + } + + ctx = logger.WithContext(ctx, slog.Int("total_blocks", len(blocks))) + + // Start processing blocks + logger.InfoContext(ctx, "Processing blocks") + if err := i.Processor.Process(ctx, blocks); err != nil { + return errors.WithStack(err) + } + + // Update current state + i.currentBlock = blocks[len(blocks)-1].Header + + logger.InfoContext(ctx, "Processed blocks successfully", + slogx.String("event", "processed_blocks"), + slogx.Int64("current_block", i.currentBlock.Height), + slogx.Duration("duration", time.Since(startAt)), + ) + case <-subscription.Done(): + // end current round + if err := ctx.Err(); err != nil { + return errors.Wrap(err, "context done") + } + return nil + case <-ctx.Done(): + return errors.WithStack(ctx.Err()) + case err := <-subscription.Err(): + if err != nil { + return errors.Wrap(err, "got error while fetch async") + } + } + } +} diff --git a/core/indexers/indexers.go b/core/indexers/indexers.go new file mode 100644 index 0000000..9c195a5 --- /dev/null +++ b/core/indexers/indexers.go @@ -0,0 +1,41 @@ +package indexers + +import ( + "context" + "time" + + "github.com/gaze-network/indexer-network/core/types" +) + +const ( + // pollingInterval is the default polling interval for the indexer polling worker + pollingInterval = 15 * time.Second +) + +type IndexerWorker interface { + Type() string + Run(ctx context.Context) error + Shutdown() error + ShutdownWithTimeout(timeout time.Duration) error + ShutdownWithContext(ctx context.Context) error +} + +type Processor[T any] interface { + Name() string + + // Process processes the input data and indexes it. + Process(ctx context.Context, inputs T) error + + // CurrentBlock returns the latest indexed block header. + CurrentBlock(ctx context.Context) (types.BlockHeader, error) + + // GetIndexedBlock returns the indexed block header by the specified block height. + GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error) + + // RevertData revert synced data to the specified block height for re-indexing. + RevertData(ctx context.Context, from int64) error + + // VerifyStates verifies the states of the indexed data and the indexer + // to ensure the last shutdown was graceful and no missing data. + VerifyStates(ctx context.Context) error +} diff --git a/core/types/bitcoin_block.go b/core/types/bitcoin_block.go new file mode 100644 index 0000000..9a41072 --- /dev/null +++ b/core/types/bitcoin_block.go @@ -0,0 +1,47 @@ +package types + +import ( + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/samber/lo" +) + +type BlockHeader struct { + Hash chainhash.Hash + Height int64 + Version int32 + PrevBlock chainhash.Hash + MerkleRoot chainhash.Hash + Timestamp time.Time + Bits uint32 + Nonce uint32 +} + +func ParseMsgBlockHeader(src wire.BlockHeader, height int64) BlockHeader { + hash := src.BlockHash() + return BlockHeader{ + Hash: hash, + Height: height, + Version: src.Version, + PrevBlock: src.PrevBlock, + MerkleRoot: src.MerkleRoot, + Timestamp: src.Timestamp, + Bits: src.Bits, + Nonce: src.Nonce, + } +} + +type Block struct { + Header BlockHeader + Transactions []*Transaction +} + +func ParseMsgBlock(src *wire.MsgBlock, height int64) *Block { + hash := src.Header.BlockHash() + return &Block{ + Header: ParseMsgBlockHeader(src.Header, height), + Transactions: lo.Map(src.Transactions, func(item *wire.MsgTx, index int) *Transaction { return ParseMsgTx(item, height, hash, uint32(index)) }), + } +} diff --git a/core/types/bitcoin_transaction.go b/core/types/bitcoin_transaction.go new file mode 100644 index 0000000..dd41832 --- /dev/null +++ b/core/types/bitcoin_transaction.go @@ -0,0 +1,73 @@ +package types + +import ( + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/samber/lo" +) + +type Transaction struct { + BlockHeight int64 + BlockHash chainhash.Hash + Index uint32 + TxHash chainhash.Hash + Version int32 + LockTime uint32 + TxIn []*TxIn + TxOut []*TxOut +} + +type TxIn struct { + SignatureScript []byte + Witness [][]byte + Sequence uint32 + PreviousOutIndex uint32 + PreviousOutTxHash chainhash.Hash +} + +type TxOut struct { + PkScript []byte + Value int64 +} + +func (o TxOut) IsOpReturn() bool { + return len(o.PkScript) > 0 && o.PkScript[0] == txscript.OP_RETURN +} + +// ParseMsgTx parses btcd/wire.MsgTx to Transaction. +func ParseMsgTx(src *wire.MsgTx, blockHeight int64, blockHash chainhash.Hash, index uint32) *Transaction { + return &Transaction{ + BlockHeight: blockHeight, + BlockHash: blockHash, + Index: index, + TxHash: src.TxHash(), + Version: src.Version, + LockTime: src.LockTime, + TxIn: lo.Map(src.TxIn, func(item *wire.TxIn, _ int) *TxIn { + return ParseTxIn(item) + }), + TxOut: lo.Map(src.TxOut, func(item *wire.TxOut, _ int) *TxOut { + return ParseTxOut(item) + }), + } +} + +// ParseTxIn parses btcd/wire.TxIn to TxIn. +func ParseTxIn(src *wire.TxIn) *TxIn { + return &TxIn{ + SignatureScript: src.SignatureScript, + Witness: src.Witness, + Sequence: src.Sequence, + PreviousOutIndex: src.PreviousOutPoint.Index, + PreviousOutTxHash: src.PreviousOutPoint.Hash, + } +} + +// ParseTxOut parses btcd/wire.TxOut to TxOut. +func ParseTxOut(src *wire.TxOut) *TxOut { + return &TxOut{ + PkScript: src.PkScript, + Value: src.Value, + } +} diff --git a/core/worker.go b/core/worker.go deleted file mode 100644 index 9a8bc95..0000000 --- a/core/worker.go +++ /dev/null @@ -1 +0,0 @@ -package core diff --git a/docs/database_migration.md b/docs/database_migration.md new file mode 100644 index 0000000..fb69480 --- /dev/null +++ b/docs/database_migration.md @@ -0,0 +1,34 @@ +# Database Migration + +We've used the golang-migrate library to manage the database migration. + +### Install golang-migrate + +```shell +$ brew install golang-migrate +``` + +### Commands + +#### Create new database sequence + +```shell +$ migrate create -ext sql -dir . -seq file_name +``` + +#### Up version database + +```shell +$ migrate -source file://. -database "postgres://postgres:$PASSWORD@localhost:5432/postgres?sslmode=disable" up +``` + +#### Down version database 1 version + +```shell +$ migrate -source file://. -database "postgres://postgres:$PASSWORD@localhost:5432/postgres?sslmode=disable" down 1 +``` + +### References: + +- Golang-Migrate: https://github.com/golang-migrate +- Connection string: https://www.connectionstrings.com/postgresql/ diff --git a/go.mod b/go.mod index c32f745..dad0bfc 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,81 @@ module github.com/gaze-network/indexer-network go 1.22 + +require ( + github.com/Cleverse/go-utilities/utils v0.0.0-20240119201306-d71eb577ef11 + github.com/btcsuite/btcd v0.24.0 + github.com/btcsuite/btcd/btcutil v1.1.5 + github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 + github.com/cockroachdb/errors v1.11.1 + github.com/gaze-network/uint128 v1.3.0 + github.com/gofiber/fiber/v2 v2.52.4 + github.com/golang-migrate/migrate/v4 v4.17.1 + github.com/jackc/pgx v3.6.2+incompatible + github.com/jackc/pgx/v5 v5.5.5 + github.com/mcosta74/pgx-slog v0.3.0 + github.com/planxnx/concurrent-stream v0.1.5 + github.com/samber/lo v1.39.0 + github.com/shopspring/decimal v1.3.1 + github.com/spf13/cobra v1.8.0 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.18.2 + github.com/stretchr/testify v1.8.4 + github.com/valyala/fasthttp v1.51.0 + go.uber.org/automaxprocs v1.5.3 + golang.org/x/sync v0.5.0 +) + +require ( + github.com/andybalholm/brotli v1.0.5 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.1.3 // indirect + github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect + github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect + github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/getsentry/sentry-go v0.18.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/uuid v1.5.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/puddle/v2 v2.2.1 // indirect + github.com/klauspost/compress v1.17.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/rivo/uniseg v0.2.0 // indirect + github.com/rogpeppe/go-internal v1.9.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/tcplisten v1.0.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/crypto v0.20.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/text v0.14.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum index e69de29..36499d3 100644 --- a/go.sum +++ b/go.sum @@ -0,0 +1,311 @@ +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Cleverse/go-utilities/utils v0.0.0-20240119201306-d71eb577ef11 h1:Xpbu03JdzqWEXcL6xr43Wxjnwh/Txt16WXJ7IlzvoxA= +github.com/Cleverse/go-utilities/utils v0.0.0-20240119201306-d71eb577ef11/go.mod h1:ft8CEDBt0csuZ+yM/bKf7ZlV6lWvWY/TFXzp7+Ze9Jw= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= +github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= +github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= +github.com/btcsuite/btcd v0.24.0 h1:gL3uHE/IaFj6fcZSu03SvqPMSx7s/dPzfpG/atRwWdo= +github.com/btcsuite/btcd v0.24.0/go.mod h1:K4IDc1593s8jKXIF7yS7yCTSxrknB9z0STzc2j6XgE4= +github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= +github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE= +github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= +github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= +github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8= +github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= +github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/dhui/dktest v0.4.1 h1:/w+IWuDXVymg3IrRJCHHOkMK10m9aNVMOyD0X12YVTg= +github.com/dhui/dktest v0.4.1/go.mod h1:DdOqcUpL7vgyP4GlF3X3w7HbSlz8cEQzwewPveYEQbA= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= +github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/gaze-network/uint128 v1.3.0 h1:25qtRiDKQXa+mD5rN0nbUkbvY26/uzfSF97eWvhIr0I= +github.com/gaze-network/uint128 v1.3.0/go.mod h1:zAwwcnoRUNiiQj0vjLmHgNgJ+w2RUgzMAJgl8d7tRug= +github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= +github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/gofiber/fiber/v2 v2.52.4 h1:P+T+4iK7VaqUsq2PALYEfBBo6bJZ4q3FP8cZ84EggTM= +github.com/gofiber/fiber/v2 v2.52.4/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-migrate/migrate/v4 v4.17.1 h1:4zQ6iqL6t6AiItphxJctQb3cFqWiSpMnX7wLTPnnYO4= +github.com/golang-migrate/migrate/v4 v4.17.1/go.mod h1:m8hinFyWBn0SA4QKHuKh175Pm9wjmxj3S2Mia7dbXzM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o= +github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw= +github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mcosta74/pgx-slog v0.3.0 h1:v7nl8XKE4ObGxZfYUUs8uUWrimvNib2V4P7Mp0WjSyw= +github.com/mcosta74/pgx-slog v0.3.0/go.mod h1:73/rhilX7+ybQ9RH/BZBtOkTDiGAH1yBrcatN6jQW5E= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planxnx/concurrent-stream v0.1.5 h1:qSMM27m7AApvalS0rSmovxOtDCnLy0/HinYJPe3oQfQ= +github.com/planxnx/concurrent-stream v0.1.5/go.mod h1:vxnW2qxkCLppMo5+Zns3b5/CiVxYQjXRLVFGJ9xvkXk= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= +github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.51.0 h1:8b30A5JlZ6C7AS81RsWjYMQmrZG6feChmgAolCl1SqA= +github.com/valyala/fasthttp v1.51.0/go.mod h1:oI2XroL+lI7vdXyYoQk03bXBThfFl2cVdIA3Xl7cH8g= +github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/.gitkeep b/internal/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 0000000..1ba88f1 --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,120 @@ +package config + +import ( + "context" + "log/slog" + "strings" + "sync" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common" + btcconfig "github.com/gaze-network/indexer-network/modules/bitcoin/config" + runesconfig "github.com/gaze-network/indexer-network/modules/runes/config" + "github.com/gaze-network/indexer-network/pkg/logger" + "github.com/gaze-network/indexer-network/pkg/logger/slogx" + "github.com/gaze-network/indexer-network/pkg/reportingclient" + "github.com/spf13/pflag" + "github.com/spf13/viper" +) + +var ( + isInit bool + mu sync.Mutex + config = &Config{ + Logger: logger.Config{ + Output: "TEXT", + }, + Network: common.NetworkMainnet, + BitcoinNode: BitcoinNodeClient{ + User: "user", + Pass: "pass", + }, + } +) + +type Config struct { + Logger logger.Config `mapstructure:"logger"` + BitcoinNode BitcoinNodeClient `mapstructure:"bitcoin_node"` + Network common.Network `mapstructure:"network"` + HTTPServer HTTPServerConfig `mapstructure:"http_server"` + Modules Modules `mapstructure:"modules"` + Reporting reportingclient.Config `mapstructure:"reporting"` +} + +type BitcoinNodeClient struct { + Host string `mapstructure:"host"` + User string `mapstructure:"user"` + Pass string `mapstructure:"pass"` + DisableTLS bool `mapstructure:"disable_tls"` +} + +type Modules struct { + Bitcoin btcconfig.Config `mapstructure:"bitcoin"` + Runes runesconfig.Config `mapstructure:"runes"` +} + +type HTTPServerConfig struct { + Port int `mapstructure:"port"` +} + +// Parse parse the configuration from environment variables +func Parse(configFile ...string) Config { + mu.Lock() + defer mu.Unlock() + return parse(configFile...) +} + +// Load returns the loaded configuration +func Load() Config { + mu.Lock() + defer mu.Unlock() + if isInit { + return *config + } + return parse() +} + +// BindPFlag binds a specific key to a pflag (as used by cobra). +// Example (where serverCmd is a Cobra instance): +// +// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") +// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) +func BindPFlag(key string, flag *pflag.Flag) { + if err := viper.BindPFlag(key, flag); err != nil { + logger.Panic("Something went wrong, failed to bind flag for config", slog.String("package", "config"), slogx.Error(err)) + } +} + +// SetDefault sets the default value for this key. +// SetDefault is case-insensitive for a key. +// Default only used when no value is provided by the user via flag, config or ENV. +func SetDefault(key string, value any) { viper.SetDefault(key, value) } + +func parse(configFile ...string) Config { + ctx := logger.WithContext(context.Background(), slog.String("package", "config")) + + if len(configFile) > 0 && configFile[0] != "" { + viper.SetConfigFile(configFile[0]) + } else { + viper.AddConfigPath("./") + viper.SetConfigName("config") + } + + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + if err := viper.ReadInConfig(); err != nil { + var errNotfound viper.ConfigFileNotFoundError + if errors.As(err, &errNotfound) { + logger.WarnContext(ctx, "Config file not found, use default config value", slogx.Error(err)) + } else { + logger.PanicContext(ctx, "Invalid config file", slogx.Error(err)) + } + } + + if err := viper.Unmarshal(&config); err != nil { + logger.PanicContext(ctx, "Something went wrong, failed to unmarshal config", slogx.Error(err)) + } + + isInit = true + return *config +} diff --git a/internal/postgres/interface.go b/internal/postgres/interface.go new file mode 100644 index 0000000..496f59e --- /dev/null +++ b/internal/postgres/interface.go @@ -0,0 +1,37 @@ +package postgres + +import ( + "context" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgx/v5/pgxpool" +) + +// Make sure that interfaces are compatible with the pgx package +var ( + _ DB = (*pgx.Conn)(nil) + _ DB = (*pgxpool.Conn)(nil) +) + +// Queryable is an interface that can be used to execute queries and commands +type Queryable interface { + Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error) + Query(context.Context, string, ...interface{}) (pgx.Rows, error) + QueryRow(context.Context, string, ...interface{}) pgx.Row +} + +// TxQueryable is an interface that can be used to execute queries and commands within a transaction +type TxQueryable interface { + Queryable + Begin(context.Context) (pgx.Tx, error) + BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error) +} + +// DB is an interface that can be used to execute queries and commands, and also to send batches +type DB interface { + Queryable + TxQueryable + SendBatch(ctx context.Context, b *pgx.Batch) (br pgx.BatchResults) + Ping(ctx context.Context) error +} diff --git a/internal/postgres/postgres.go b/internal/postgres/postgres.go new file mode 100644 index 0000000..120c1b0 --- /dev/null +++ b/internal/postgres/postgres.go @@ -0,0 +1,127 @@ +package postgres + +import ( + "context" + "fmt" + + "github.com/Cleverse/go-utilities/utils" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/pkg/logger" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/jackc/pgx/v5/tracelog" + pgxslog "github.com/mcosta74/pgx-slog" +) + +const ( + DefaultMaxConns = 16 + DefaultMinConns = 0 + DefaultLogLevel = tracelog.LogLevelError +) + +type Config struct { + Host string `mapstructure:"host"` // Default is 127.0.0.1 + Port string `mapstructure:"port"` // Default is 5432 + User string `mapstructure:"user"` // Default is empty + Password string `mapstructure:"password"` // Default is empty + DBName string `mapstructure:"db_name"` // Default is postgres + SSLMode string `mapstructure:"ssl_mode"` // Default is prefer + URL string `mapstructure:"url"` // If URL is provided, other fields are ignored + + MaxConns int32 `mapstructure:"max_conns"` // Default is 16 + MinConns int32 `mapstructure:"min_conns"` // Default is 0 + + Debug bool `mapstructure:"debug"` +} + +// New creates a new connection to the database +func New(ctx context.Context, conf Config) (*pgx.Conn, error) { + // Prepare connection pool configuration + connConfig, err := pgx.ParseConfig(conf.String()) + if err != nil { + return nil, errors.Join(errs.InvalidArgument, errors.Wrap(err, "failed while parse config")) + } + connConfig.Tracer = conf.QueryTracer() + + // Create a new connection + conn, err := pgx.ConnectConfig(ctx, connConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to create a new connection") + } + + // Test the connection + if err := conn.Ping(ctx); err != nil { + return nil, errors.Wrap(err, "failed to connect to the database") + } + + return conn, nil +} + +// NewPool creates a new connection pool to the database +func NewPool(ctx context.Context, conf Config) (*pgxpool.Pool, error) { + // Prepare connection pool configuration + connConfig, err := pgxpool.ParseConfig(conf.String()) + if err != nil { + return nil, errors.Join(errs.InvalidArgument, errors.Wrap(err, "failed while parse config")) + } + connConfig.MaxConns = utils.Default(conf.MaxConns, DefaultMaxConns) + connConfig.MinConns = utils.Default(conf.MinConns, DefaultMinConns) + connConfig.ConnConfig.Tracer = conf.QueryTracer() + + // Create a new connection pool + connPool, err := pgxpool.NewWithConfig(ctx, connConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to create a new connection pool") + } + + // Test the connection + if err := connPool.Ping(ctx); err != nil { + return nil, errors.Wrap(err, "failed to connect to the database") + } + + return connPool, nil +} + +// String returns the connection string (DSN format or URL format) +func (conf Config) String() string { + if conf.Host == "" { + conf.Host = "127.0.0.1" + } + if conf.Port == "" { + conf.Port = "5432" + } + if conf.SSLMode == "" { + conf.SSLMode = "prefer" + } + if conf.DBName == "" { + conf.DBName = "postgres" + } + + // Construct DSN + connString := fmt.Sprintf("host=%s dbname=%s port=%s sslmode=%s", conf.Host, conf.DBName, conf.Port, conf.SSLMode) + if conf.User != "" { + connString = fmt.Sprintf("%s user=%s", connString, conf.User) + } + if conf.Password != "" { + connString = fmt.Sprintf("%s password=%s", connString, conf.Password) + } + + // Prefer URL over DSN format + if conf.URL != "" { + connString = conf.URL + } + + return connString +} + +func (conf Config) QueryTracer() pgx.QueryTracer { + loglevel := DefaultLogLevel + if conf.Debug { + loglevel = tracelog.LogLevelTrace + } + return &tracelog.TraceLog{ + Logger: pgxslog.NewLogger(logger.With("package", "postgres")), + LogLevel: loglevel, + } +} diff --git a/internal/subscription/client_subscription.go b/internal/subscription/client_subscription.go new file mode 100644 index 0000000..20ec75e --- /dev/null +++ b/internal/subscription/client_subscription.go @@ -0,0 +1,31 @@ +package subscription + +import "context" + +// ClientSubscription is a subscription that can be used by the client to unsubscribe from the subscription. +type ClientSubscription[T any] struct { + subscription *Subscription[T] +} + +func (c *ClientSubscription[T]) Unsubscribe() { + c.subscription.Unsubscribe() +} + +func (c *ClientSubscription[T]) UnsubscribeWithContext(ctx context.Context) (err error) { + return c.subscription.UnsubscribeWithContext(ctx) +} + +// Err returns the error channel of the subscription. +func (c *ClientSubscription[T]) Err() <-chan error { + return c.subscription.Err() +} + +// Done returns the done channel of the subscription +func (c *ClientSubscription[T]) Done() <-chan struct{} { + return c.subscription.Done() +} + +// IsClosed returns status of the subscription +func (c *ClientSubscription[T]) IsClosed() bool { + return c.subscription.IsClosed() +} diff --git a/internal/subscription/subscription.go b/internal/subscription/subscription.go new file mode 100644 index 0000000..5d0294b --- /dev/null +++ b/internal/subscription/subscription.go @@ -0,0 +1,132 @@ +package subscription + +import ( + "context" + "sync" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" +) + +// SubscriptionBufferSize is the buffer size of the subscription channel. +// It is used to prevent blocking the client dispatcher when the client is slow to consume values. +var SubscriptionBufferSize = 8 + +// Subscription is a subscription to a stream of values from the client dispatcher. +// It has two channels: one for values, and one for errors. +type Subscription[T any] struct { + // The channel which the subscription sends values. + channel chan<- T + + // The in channel receives values from client dispatcher. + in chan T + + // The error channel receives the error from the client dispatcher. + err chan error + quiteOnce sync.Once + + // Closing of the subscription is requested by sending on 'quit'. This is handled by + // the forwarding loop, which closes 'forwardDone' when it has stopped sending to + // sub.channel. Finally, 'unsubDone' is closed after unsubscribing on the server side. + quit chan struct{} + quitDone chan struct{} +} + +func NewSubscription[T any](channel chan<- T) *Subscription[T] { + subscription := &Subscription[T]{ + channel: channel, + in: make(chan T, SubscriptionBufferSize), + err: make(chan error, SubscriptionBufferSize), + quit: make(chan struct{}), + quitDone: make(chan struct{}), + } + go func() { + subscription.run() + }() + return subscription +} + +func (s *Subscription[T]) Unsubscribe() { + _ = s.UnsubscribeWithContext(context.Background()) +} + +func (s *Subscription[T]) UnsubscribeWithContext(ctx context.Context) (err error) { + s.quiteOnce.Do(func() { + select { + case s.quit <- struct{}{}: + <-s.quitDone + case <-ctx.Done(): + err = ctx.Err() + } + }) + return errors.WithStack(err) +} + +// Client returns a client subscription for this subscription. +func (s *Subscription[T]) Client() *ClientSubscription[T] { + return &ClientSubscription[T]{ + subscription: s, + } +} + +// Err returns the error channel of the subscription. +func (s *Subscription[T]) Err() <-chan error { + return s.err +} + +// Done returns the done channel of the subscription +func (s *Subscription[T]) Done() <-chan struct{} { + return s.quitDone +} + +// IsClosed returns status of the subscription +func (s *Subscription[T]) IsClosed() bool { + select { + case <-s.quitDone: + return true + default: + return false + } +} + +// Send sends a value to the subscription channel. If the subscription is closed, it returns an error. +func (s *Subscription[T]) Send(ctx context.Context, value T) error { + select { + case s.in <- value: + case <-s.quitDone: + return errors.Wrap(errs.Closed, "subscription is closed") + case <-ctx.Done(): + return errors.WithStack(ctx.Err()) + } + return nil +} + +// SendError sends an error to the subscription error channel. If the subscription is closed, it returns an error. +func (s *Subscription[T]) SendError(ctx context.Context, err error) error { + select { + case s.err <- err: + case <-s.quitDone: + return errors.Wrap(errs.Closed, "subscription is closed") + case <-ctx.Done(): + return errors.WithStack(ctx.Err()) + } + return nil +} + +// run starts the forwarding loop for the subscription. +func (s *Subscription[T]) run() { + defer close(s.quitDone) + + for { + select { + case <-s.quit: + return + case value := <-s.in: + select { + case s.channel <- value: + case <-s.quit: + return + } + } + } +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..61f0e17 --- /dev/null +++ b/main.go @@ -0,0 +1,18 @@ +package main + +import ( + "context" + "os" + "os/signal" + "syscall" + + "github.com/gaze-network/indexer-network/cmd" + _ "go.uber.org/automaxprocs" +) + +func main() { + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + defer stop() + + cmd.Execute(ctx) +} diff --git a/modules/bitcoin/btcclient/client_db.go b/modules/bitcoin/btcclient/client_db.go new file mode 100644 index 0000000..69237d8 --- /dev/null +++ b/modules/bitcoin/btcclient/client_db.go @@ -0,0 +1,244 @@ +package btcclient + +import ( + "context" + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/datasources" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/internal/subscription" + "github.com/gaze-network/indexer-network/modules/bitcoin/datagateway" + "github.com/gaze-network/indexer-network/pkg/logger" + "github.com/gaze-network/indexer-network/pkg/logger/slogx" + cstream "github.com/planxnx/concurrent-stream" + "github.com/samber/lo" +) + +// TODO: Refactor this, datasources.BitcoinNode and This package is the same. + +const ( + blockStreamChunkSize = 100 +) + +// Make sure to implement the BitcoinDatasource interface +var _ datasources.Datasource[[]*types.Block] = (*ClientDatabase)(nil) + +// ClientDatabase is a client to connect to the bitcoin database. +type ClientDatabase struct { + bitcoinDg datagateway.BitcoinDataGateway +} + +func NewClientDatabase(bitcoinDg datagateway.BitcoinDataGateway) *ClientDatabase { + return &ClientDatabase{ + bitcoinDg: bitcoinDg, + } +} + +func (d ClientDatabase) Name() string { + return "bitcoin_database" +} + +func (d *ClientDatabase) Fetch(ctx context.Context, from, to int64) ([]*types.Block, error) { + ch := make(chan []*types.Block) + subscription, err := d.FetchAsync(ctx, from, to, ch) + if err != nil { + return nil, errors.WithStack(err) + } + defer subscription.Unsubscribe() + + blocks := make([]*types.Block, 0) + for { + select { + case b, ok := <-ch: + if !ok { + return blocks, nil + } + blocks = append(blocks, b...) + case <-subscription.Done(): + if err := ctx.Err(); err != nil { + return nil, errors.Wrap(err, "context done") + } + return blocks, nil + case err := <-subscription.Err(): + if err != nil { + return nil, errors.Wrap(err, "got error while fetch async") + } + return blocks, nil + case <-ctx.Done(): + return nil, errors.Wrap(ctx.Err(), "context done") + } + } +} + +func (d *ClientDatabase) FetchAsync(ctx context.Context, from, to int64, ch chan<- []*types.Block) (*subscription.ClientSubscription[[]*types.Block], error) { + ctx = logger.WithContext(ctx, + slogx.String("package", "datasources"), + slogx.String("datasource", d.Name()), + ) + + from, to, skip, err := d.prepareRange(ctx, from, to) + if err != nil { + return nil, errors.Wrap(err, "failed to prepare fetch range") + } + + subscription := subscription.NewSubscription(ch) + if skip { + if err := subscription.UnsubscribeWithContext(ctx); err != nil { + return nil, errors.Wrap(err, "failed to unsubscribe") + } + return subscription.Client(), nil + } + + // Create parallel stream + out := make(chan []*types.Block) + stream := cstream.NewStream(ctx, 8, out) + + // create slice of block height to fetch + blockHeights := make([]int64, 0, to-from+1) + for i := from; i <= to; i++ { + blockHeights = append(blockHeights, i) + } + + // Wait for stream to finish and close out channel + go func() { + defer close(out) + _ = stream.Wait() + }() + + // Fan-out blocks to subscription channel + go func() { + defer func() { + // add a bit delay to prevent shutdown before client receive all blocks + time.Sleep(100 * time.Millisecond) + + subscription.Unsubscribe() + }() + for { + select { + case data, ok := <-out: + // stream closed + if !ok { + return + } + + // empty blocks + if len(data) == 0 { + continue + } + + // send blocks to subscription channel + if err := subscription.Send(ctx, data); err != nil { + if errors.Is(err, errs.Closed) { + return + } + logger.WarnContext(ctx, "Failed to send bitcoin blocks to subscription client", + slogx.Int64("start", data[0].Header.Height), + slogx.Int64("end", data[len(data)-1].Header.Height), + slogx.Error(err), + ) + } + case <-ctx.Done(): + return + } + } + }() + + // Parallel fetch blocks from Bitcoin node until complete all block heights + // or subscription is done. + go func() { + defer stream.Close() + done := subscription.Done() + chunks := lo.Chunk(blockHeights, blockStreamChunkSize) + for _, chunk := range chunks { + chunk := chunk + select { + case <-done: + return + case <-ctx.Done(): + return + default: + if len(chunk) == 0 { + continue + } + stream.Go(func() []*types.Block { + startAt := time.Now() + defer func() { + logger.DebugContext(ctx, "Fetched chunk of blocks from Bitcoin node", + slogx.Int("total_blocks", len(chunk)), + slogx.Int64("from", chunk[0]), + slogx.Int64("to", chunk[len(chunk)-1]), + slogx.Duration("duration", time.Since(startAt)), + ) + }() + + fromHeight, toHeight := chunk[0], chunk[len(chunk)-1] + blocks, err := d.bitcoinDg.GetBlocksByHeightRange(ctx, fromHeight, toHeight) + if err != nil { + logger.ErrorContext(ctx, "Can't get block data from Bitcoin database", + slogx.Error(err), + slogx.Int64("from", fromHeight), + slogx.Int64("to", toHeight), + ) + if err := subscription.SendError(ctx, errors.Wrapf(err, "failed to get blocks: from_height: %d, to_height: %d", fromHeight, toHeight)); err != nil { + logger.WarnContext(ctx, "Failed to send datasource error to subscription client", slogx.Error(err)) + } + return nil + } + return blocks + }) + } + } + }() + + return subscription.Client(), nil +} + +func (c *ClientDatabase) GetBlockHeader(ctx context.Context, height int64) (types.BlockHeader, error) { + header, err := c.bitcoinDg.GetBlockHeaderByHeight(ctx, height) + if err != nil { + return types.BlockHeader{}, errors.WithStack(err) + } + return header, nil +} + +func (c *ClientDatabase) prepareRange(ctx context.Context, fromHeight, toHeight int64) (start, end int64, skip bool, err error) { + start = fromHeight + end = toHeight + + // get current bitcoin block height + latestBlock, err := c.bitcoinDg.GetLatestBlockHeader(ctx) + if err != nil { + return -1, -1, false, errors.Wrap(err, "failed to get block count") + } + + // set start to genesis block height + if start < 0 { + start = 0 + } + + // set end to current bitcoin block height if + // - end is -1 + // - end is greater that current bitcoin block height + if end < 0 || end > latestBlock.Height { + end = latestBlock.Height + } + + // if start is greater than end, skip this round + if start > end { + return -1, -1, true, nil + } + + return start, end, false, nil +} + +// GetTransactionByHash returns a transaction with the given hash. Returns errs.NotFound if transaction does not exist. +func (c *ClientDatabase) GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error) { + tx, err := c.bitcoinDg.GetTransactionByHash(ctx, txHash) + if err != nil { + return nil, errors.Wrap(err, "failed to get transaction by hash") + } + return tx, nil +} diff --git a/modules/bitcoin/btcclient/contract.go b/modules/bitcoin/btcclient/contract.go new file mode 100644 index 0000000..3a718d1 --- /dev/null +++ b/modules/bitcoin/btcclient/contract.go @@ -0,0 +1,12 @@ +package btcclient + +import ( + "context" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/gaze-network/indexer-network/core/types" +) + +type Contract interface { + GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error) +} diff --git a/modules/bitcoin/config/config.go b/modules/bitcoin/config/config.go new file mode 100644 index 0000000..fce6f03 --- /dev/null +++ b/modules/bitcoin/config/config.go @@ -0,0 +1,8 @@ +package config + +import "github.com/gaze-network/indexer-network/internal/postgres" + +type Config struct { + Database string `mapstructure:"database"` // Database to store bitcoin data. + Postgres postgres.Config `mapstructure:"postgres"` +} diff --git a/modules/bitcoin/constants.go b/modules/bitcoin/constants.go new file mode 100644 index 0000000..4405b19 --- /dev/null +++ b/modules/bitcoin/constants.go @@ -0,0 +1,26 @@ +package bitcoin + +import ( + "github.com/Cleverse/go-utilities/utils" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/gaze-network/indexer-network/common" + "github.com/gaze-network/indexer-network/core/types" +) + +const ( + Version = "v0.0.1" + DBVersion = 1 +) + +var ( + // defaultCurrentBlockHeight is the default value for the current block height for first time indexing + defaultCurrentBlock = types.BlockHeader{ + Hash: common.ZeroHash, + Height: -1, + } + + lastV1Block = types.BlockHeader{ + Hash: *utils.Must(chainhash.NewHashFromStr("00000000000001aa077d7aa84c532a4d69bdbff519609d1da0835261b7a74eb6")), + Height: 227835, + } +) diff --git a/modules/bitcoin/database/postgresql/migrations/000001_initialize_table.down.sql b/modules/bitcoin/database/postgresql/migrations/000001_initialize_table.down.sql new file mode 100644 index 0000000..d41fd0e --- /dev/null +++ b/modules/bitcoin/database/postgresql/migrations/000001_initialize_table.down.sql @@ -0,0 +1,18 @@ +BEGIN; + +-- DROP INDEX +DROP INDEX IF EXISTS bitcoin_blocks_block_hash_idx; +DROP INDEX IF EXISTS bitcoin_transactions_tx_hash_idx; +DROP INDEX IF EXISTS bitcoin_transactions_block_hash_idx; +DROP INDEX IF EXISTS bitcoin_transaction_txouts_pkscript_idx; +DROP INDEX IF EXISTS bitcoin_transaction_txins_prevout_idx; + +-- DROP TABLE +DROP TABLE IF EXISTS "bitcoin_indexer_stats"; +DROP TABLE IF EXISTS "bitcoin_indexer_db_version"; +DROP TABLE IF EXISTS "bitcoin_transaction_txins"; +DROP TABLE IF EXISTS "bitcoin_transaction_txouts"; +DROP TABLE IF EXISTS "bitcoin_transactions"; +DROP TABLE IF EXISTS "bitcoin_blocks"; + +COMMIT; \ No newline at end of file diff --git a/modules/bitcoin/database/postgresql/migrations/000001_initialize_table.up.sql b/modules/bitcoin/database/postgresql/migrations/000001_initialize_table.up.sql new file mode 100644 index 0000000..9b291a9 --- /dev/null +++ b/modules/bitcoin/database/postgresql/migrations/000001_initialize_table.up.sql @@ -0,0 +1,72 @@ +BEGIN; + +-- Indexer Client Information + +CREATE TABLE IF NOT EXISTS "bitcoin_indexer_stats" ( + "id" BIGSERIAL PRIMARY KEY, + "client_version" TEXT NOT NULL, + "network" TEXT NOT NULL, + "created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE IF NOT EXISTS "bitcoin_indexer_db_version" ( + "id" BIGSERIAL PRIMARY KEY, + "version" INT NOT NULL, + "created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP +); +INSERT INTO "bitcoin_indexer_db_version" ("version") VALUES (1); + +-- Bitcoin Data + +CREATE TABLE IF NOT EXISTS "bitcoin_blocks" ( + "block_height" INT NOT NULL PRIMARY KEY, + "block_hash" TEXT NOT NULL, + "version" INT NOT NULL, + "merkle_root" TEXT NOT NULL, + "prev_block_hash" TEXT NOT NULL, + "timestamp" TIMESTAMP WITH TIME ZONE NOT NULL, + "bits" BIGINT NOT NULL, + "nonce" BIGINT NOT NULL +); + +CREATE INDEX IF NOT EXISTS bitcoin_blocks_block_hash_idx ON "bitcoin_blocks" USING HASH ("block_hash"); + +CREATE TABLE IF NOT EXISTS "bitcoin_transactions" ( + "tx_hash" TEXT NOT NULL, -- can't use as primary key because block v1 has duplicate tx hashes (coinbase tx). See: https://github.com/bitcoin/bitcoin/commit/a206b0ea12eb4606b93323268fc81a4f1f952531 + "version" INT NOT NULL, + "locktime" BIGINT NOT NULL, + "block_height" INT NOT NULL, + "block_hash" TEXT NOT NULL, + "idx" INT NOT NULL, + PRIMARY KEY ("block_height", "idx") +); + +CREATE INDEX IF NOT EXISTS bitcoin_transactions_tx_hash_idx ON "bitcoin_transactions" USING HASH ("tx_hash"); +CREATE INDEX IF NOT EXISTS bitcoin_transactions_block_hash_idx ON "bitcoin_transactions" USING HASH ("block_hash"); + +CREATE TABLE IF NOT EXISTS "bitcoin_transaction_txouts" ( + "tx_hash" TEXT NOT NULL, + "tx_idx" INT NOT NULL, + "pkscript" TEXT NOT NULL, -- Hex String + "value" BIGINT NOT NULL, + "is_spent" BOOLEAN NOT NULL DEFAULT false, + PRIMARY KEY ("tx_hash", "tx_idx") +); + +CREATE INDEX IF NOT EXISTS bitcoin_transaction_txouts_pkscript_idx ON "bitcoin_transaction_txouts" USING HASH ("pkscript"); + +CREATE TABLE IF NOT EXISTS "bitcoin_transaction_txins" ( + "tx_hash" TEXT NOT NULL, + "tx_idx" INT NOT NULL, + "prevout_tx_hash" TEXT NOT NULL, + "prevout_tx_idx" INT NOT NULL, + "prevout_pkscript" TEXT NULL, -- Hex String, Can be NULL if the prevout is a coinbase transaction + "scriptsig" TEXT NOT NULL, -- Hex String + "witness" TEXT NOT NULL DEFAULT '', -- Hex String + "sequence" BIGINT NOT NULL, + PRIMARY KEY ("tx_hash", "tx_idx") +); + +CREATE INDEX IF NOT EXISTS bitcoin_transaction_txins_prevout_idx ON "bitcoin_transaction_txins" USING BTREE ("prevout_tx_hash", "prevout_tx_idx"); + +COMMIT; \ No newline at end of file diff --git a/modules/bitcoin/database/postgresql/queries/data.sql b/modules/bitcoin/database/postgresql/queries/data.sql new file mode 100644 index 0000000..2c27e9c --- /dev/null +++ b/modules/bitcoin/database/postgresql/queries/data.sql @@ -0,0 +1,99 @@ +-- name: GetLatestBlockHeader :one +SELECT * FROM bitcoin_blocks ORDER BY block_height DESC LIMIT 1; + +-- name: InsertBlock :exec +INSERT INTO bitcoin_blocks ("block_height","block_hash","version","merkle_root","prev_block_hash","timestamp","bits","nonce") VALUES ($1, $2, $3, $4, $5, $6, $7, $8); + +-- name: BatchInsertBlocks :exec +INSERT INTO bitcoin_blocks ("block_height","block_hash","version","merkle_root","prev_block_hash","timestamp","bits","nonce") +VALUES ( + unnest(@block_height_arr::INT[]), + unnest(@block_hash_arr::TEXT[]), + unnest(@version_arr::INT[]), + unnest(@merkle_root_arr::TEXT[]), + unnest(@prev_block_hash_arr::TEXT[]), + unnest(@timestamp_arr::TIMESTAMP WITH TIME ZONE[]), -- or use TIMESTAMPTZ + unnest(@bits_arr::BIGINT[]), + unnest(@nonce_arr::BIGINT[]) +); + +-- name: BatchInsertTransactions :exec +INSERT INTO bitcoin_transactions ("tx_hash","version","locktime","block_height","block_hash","idx") +VALUES ( + unnest(@tx_hash_arr::TEXT[]), + unnest(@version_arr::INT[]), + unnest(@locktime_arr::BIGINT[]), + unnest(@block_height_arr::INT[]), + unnest(@block_hash_arr::TEXT[]), + unnest(@idx_arr::INT[]) +); + +-- name: BatchInsertTransactionTxIns :exec +WITH update_txout AS ( + UPDATE "bitcoin_transaction_txouts" + SET "is_spent" = true + FROM (SELECT unnest(@prevout_tx_hash_arr::TEXT[]) as tx_hash, unnest(@prevout_tx_idx_arr::INT[]) as tx_idx) as txin + WHERE "bitcoin_transaction_txouts"."tx_hash" = txin.tx_hash AND "bitcoin_transaction_txouts"."tx_idx" = txin.tx_idx AND "is_spent" = false + RETURNING "bitcoin_transaction_txouts"."tx_hash", "bitcoin_transaction_txouts"."tx_idx", "pkscript" +), prepare_insert AS ( + SELECT input.tx_hash, input.tx_idx, prevout_tx_hash, prevout_tx_idx, update_txout.pkscript as prevout_pkscript, scriptsig, witness, sequence + FROM ( + SELECT + unnest(@tx_hash_arr::TEXT[]) as tx_hash, + unnest(@tx_idx_arr::INT[]) as tx_idx, + unnest(@prevout_tx_hash_arr::TEXT[]) as prevout_tx_hash, + unnest(@prevout_tx_idx_arr::INT[]) as prevout_tx_idx, + unnest(@scriptsig_arr::TEXT[]) as scriptsig, + unnest(@witness_arr::TEXT[]) as witness, + unnest(@sequence_arr::INT[]) as sequence + ) input LEFT JOIN update_txout ON "update_txout"."tx_hash" = "input"."prevout_tx_hash" AND "update_txout"."tx_idx" = "input"."prevout_tx_idx" +) +INSERT INTO bitcoin_transaction_txins ("tx_hash","tx_idx","prevout_tx_hash","prevout_tx_idx", "prevout_pkscript","scriptsig","witness","sequence") +SELECT "tx_hash", "tx_idx", "prevout_tx_hash", "prevout_tx_idx", "prevout_pkscript", "scriptsig", "witness", "sequence" FROM prepare_insert; + +-- name: BatchInsertTransactionTxOuts :exec +INSERT INTO bitcoin_transaction_txouts ("tx_hash","tx_idx","pkscript","value") +VALUES ( + unnest(@tx_hash_arr::TEXT[]), + unnest(@tx_idx_arr::INT[]), + unnest(@pkscript_arr::TEXT[]), + unnest(@value_arr::BIGINT[]) +); + +-- name: RevertData :exec +WITH delete_tx AS ( + DELETE FROM "bitcoin_transactions" WHERE "block_height" >= @from_height + RETURNING "tx_hash" +), delete_txin AS ( + DELETE FROM "bitcoin_transaction_txins" WHERE "tx_hash" = ANY(SELECT "tx_hash" FROM delete_tx) + RETURNING "prevout_tx_hash", "prevout_tx_idx" +), delete_txout AS ( + DELETE FROM "bitcoin_transaction_txouts" WHERE "tx_hash" = ANY(SELECT "tx_hash" FROM delete_tx) + RETURNING "tx_hash", "tx_idx" +), revert_txout_spent AS ( + UPDATE "bitcoin_transaction_txouts" + SET "is_spent" = false + WHERE + ("tx_hash", "tx_idx") IN (SELECT "prevout_tx_hash", "prevout_tx_idx" FROM delete_txin) AND + ("tx_hash", "tx_idx") NOT IN (SELECT "tx_hash", "tx_idx" FROM delete_txout) -- avoid to modified same row twice (modified the same row twice in a single statement is not supported) + RETURNING NULL +) +DELETE FROM "bitcoin_blocks" WHERE "bitcoin_blocks"."block_height" >= @from_height; + +-- name: GetBlockByHeight :one +SELECT * FROM bitcoin_blocks WHERE block_height = $1; + +-- name: GetBlocksByHeightRange :many +SELECT * FROM bitcoin_blocks WHERE block_height >= @from_height AND block_height <= @to_height ORDER BY block_height ASC; + +-- name: GetTransactionsByHeightRange :many +SELECT * FROM bitcoin_transactions WHERE block_height >= @from_height AND block_height <= @to_height; + +-- name: GetTransactionByHash :one +SELECT * FROM bitcoin_transactions WHERE tx_hash = $1; + +-- name: GetTransactionTxOutsByTxHashes :many +SELECT * FROM bitcoin_transaction_txouts WHERE tx_hash = ANY(@tx_hashes::TEXT[]); + +-- name: GetTransactionTxInsByTxHashes :many +SELECT * FROM bitcoin_transaction_txins WHERE tx_hash = ANY(@tx_hashes::TEXT[]); diff --git a/modules/bitcoin/database/postgresql/queries/info.sql b/modules/bitcoin/database/postgresql/queries/info.sql new file mode 100644 index 0000000..a9600c0 --- /dev/null +++ b/modules/bitcoin/database/postgresql/queries/info.sql @@ -0,0 +1,8 @@ +-- name: GetCurrentDBVersion :one +SELECT "version" FROM bitcoin_indexer_db_version ORDER BY id DESC LIMIT 1; + +-- name: GetCurrentIndexerStats :one +SELECT "client_version", "network" FROM bitcoin_indexer_stats ORDER BY id DESC LIMIT 1; + +-- name: UpdateIndexerStats :exec +INSERT INTO bitcoin_indexer_stats (client_version, network) VALUES ($1, $2); \ No newline at end of file diff --git a/modules/bitcoin/datagateway/bitcoin_data.go b/modules/bitcoin/datagateway/bitcoin_data.go new file mode 100644 index 0000000..f0af397 --- /dev/null +++ b/modules/bitcoin/datagateway/bitcoin_data.go @@ -0,0 +1,25 @@ +package datagateway + +import ( + "context" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/gaze-network/indexer-network/core/types" +) + +type BitcoinDataGateway interface { + BitcoinWriterDataDataGateway + BitcoinReaderDataDataGateway +} + +type BitcoinWriterDataDataGateway interface { + InsertBlocks(ctx context.Context, blocks []*types.Block) error + RevertBlocks(context.Context, int64) error +} + +type BitcoinReaderDataDataGateway interface { + GetLatestBlockHeader(context.Context) (types.BlockHeader, error) + GetBlockHeaderByHeight(ctx context.Context, blockHeight int64) (types.BlockHeader, error) + GetBlocksByHeightRange(ctx context.Context, from int64, to int64) ([]*types.Block, error) + GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error) +} diff --git a/modules/bitcoin/datagateway/indexer_info.go b/modules/bitcoin/datagateway/indexer_info.go new file mode 100644 index 0000000..4270a5b --- /dev/null +++ b/modules/bitcoin/datagateway/indexer_info.go @@ -0,0 +1,13 @@ +package datagateway + +import ( + "context" + + "github.com/gaze-network/indexer-network/common" +) + +type IndexerInformationDataGateway interface { + GetCurrentDBVersion(ctx context.Context) (int32, error) + GetLatestIndexerStats(ctx context.Context) (version string, network common.Network, err error) + UpdateIndexerStats(ctx context.Context, clientVersion string, network common.Network) error +} diff --git a/modules/bitcoin/processor.go b/modules/bitcoin/processor.go new file mode 100644 index 0000000..ce52cf5 --- /dev/null +++ b/modules/bitcoin/processor.go @@ -0,0 +1,122 @@ +package bitcoin + +import ( + "context" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/indexers" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/internal/config" + "github.com/gaze-network/indexer-network/modules/bitcoin/datagateway" +) + +// Make sure to implement the BitcoinProcessor interface +var _ indexers.BitcoinProcessor = (*Processor)(nil) + +type Processor struct { + config config.Config + bitcoinDg datagateway.BitcoinDataGateway + indexerInfoDg datagateway.IndexerInformationDataGateway +} + +func NewProcessor(config config.Config, bitcoinDg datagateway.BitcoinDataGateway, indexerInfoDg datagateway.IndexerInformationDataGateway) *Processor { + return &Processor{ + config: config, + bitcoinDg: bitcoinDg, + indexerInfoDg: indexerInfoDg, + } +} + +func (p Processor) Name() string { + return "bitcoin" +} + +func (p *Processor) Process(ctx context.Context, inputs []*types.Block) error { + if len(inputs) == 0 { + return nil + } + + // Process the given blocks before inserting to the database + inputs, err := p.process(ctx, inputs) + if err != nil { + return errors.WithStack(err) + } + + // Insert blocks + if err := p.bitcoinDg.InsertBlocks(ctx, inputs); err != nil { + return errors.Wrapf(err, "error during insert blocks, from: %d, to: %d", inputs[0].Header.Height, inputs[len(inputs)-1].Header.Height) + } + + return nil +} + +func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error) { + b, err := p.bitcoinDg.GetLatestBlockHeader(ctx) + if err != nil { + if errors.Is(err, errs.NotFound) { + return defaultCurrentBlock, nil + } + return types.BlockHeader{}, errors.WithStack(err) + } + return b, nil +} + +func (p *Processor) GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error) { + header, err := p.bitcoinDg.GetBlockHeaderByHeight(ctx, height) + if err != nil { + return types.BlockHeader{}, errors.WithStack(err) + } + return header, nil +} + +func (p *Processor) RevertData(ctx context.Context, from int64) error { + // to prevent remove txin/txout of duplicated coinbase transaction in the blocks 91842 and 91880 + // if you really want to revert the data before the block `227835`, you should reset the database and reindex the data instead. + if from <= lastV1Block.Height { + return errors.Wrapf(errs.InvalidArgument, "can't revert data before block version 2, height: %d", lastV1Block.Height) + } + + if err := p.bitcoinDg.RevertBlocks(ctx, from); err != nil { + return errors.WithStack(err) + } + return nil +} + +func (p *Processor) VerifyStates(ctx context.Context) error { + // Check current db version with the required db version + { + dbVersion, err := p.indexerInfoDg.GetCurrentDBVersion(ctx) + if err != nil { + return errors.Wrap(err, "can't get current db version") + } + + if dbVersion != DBVersion { + return errors.Wrapf(errs.ConflictSetting, "db version mismatch, please upgrade to version %d", DBVersion) + } + } + + // Check if the latest indexed network is mismatched with configured network + { + _, network, err := p.indexerInfoDg.GetLatestIndexerStats(ctx) + if err != nil { + if errors.Is(err, errs.NotFound) { + goto end + } + return errors.Wrap(err, "can't get latest indexer stats") + } + + if network != p.config.Network { + return errors.Wrapf(errs.ConflictSetting, "network mismatch, latest indexed network: %q, configured network: %q. If you want to change the network, please reset the database", network, p.config.Network) + } + } + + // TODO: Verify the states of the indexed data to ensure the last shutdown was graceful and no missing data. + +end: + if err := p.indexerInfoDg.UpdateIndexerStats(ctx, Version, p.config.Network); err != nil { + return errors.Wrap(err, "can't update indexer stats") + } + + return nil +} diff --git a/modules/bitcoin/processor_process.go b/modules/bitcoin/processor_process.go new file mode 100644 index 0000000..450eec2 --- /dev/null +++ b/modules/bitcoin/processor_process.go @@ -0,0 +1,91 @@ +package bitcoin + +import ( + "cmp" + "context" + "slices" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/core/types" +) + +// process is a processing rules for the given blocks before inserting to the database +// +// this function will modify the given data directly. +func (p *Processor) process(ctx context.Context, blocks []*types.Block) ([]*types.Block, error) { + if len(blocks) == 0 { + return blocks, nil + } + + // Sort ASC by block height + slices.SortFunc(blocks, func(t1, t2 *types.Block) int { + return cmp.Compare(t1.Header.Height, t2.Header.Height) + }) + + if !p.isContinueFromLatestIndexedBlock(ctx, blocks[0]) { + return nil, errors.New("given blocks are not continue from the latest indexed block") + } + + if !p.isBlocksSequential(blocks) { + return nil, errors.New("given blocks are not in sequence") + } + + p.removeDuplicateCoinbaseTxInputsOutputs(blocks) + + return blocks, nil +} + +// check if the given blocks are continue from the latest indexed block +// to prevent inserting out-of-order blocks or duplicate blocks +func (p *Processor) isBlocksSequential(blocks []*types.Block) bool { + if len(blocks) == 0 { + return true + } + + for i, block := range blocks { + if i == 0 { + continue + } + + if block.Header.Height != blocks[i-1].Header.Height+1 { + return false + } + } + + return true +} + +// check if the given blocks are continue from the latest indexed block +// to prevent inserting out-of-order blocks or duplicate blocks +func (p *Processor) isContinueFromLatestIndexedBlock(ctx context.Context, block *types.Block) bool { + latestBlock, err := p.CurrentBlock(ctx) + if err != nil { + return false + } + + return block.Header.Height == latestBlock.Height+1 +} + +// there 2 coinbase transaction that are duplicated in the blocks 91842 and 91880. +// if the given block version is v1 and height is `91842` or `91880`, +// then remove transaction inputs/outputs to prevent duplicate txin/txout error when inserting to the database. +// +// Theses duplicated coinbase transactions are having the same transaction input/output and +// utxo from these 2 duplicated coinbase txs can redeem only once. so, it's safe to remove them and can +// use inputs/outputs from the previous block. +// +// Duplicate Coinbase Transactions: +// - `454279874213763724535987336644243549a273058910332236515429488599` in blocks 91812, 91842 +// - `e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468` in blocks 91722, 91880 +// +// This function will modify the given data directly. +func (p *Processor) removeDuplicateCoinbaseTxInputsOutputs(blocks []*types.Block) { + for _, block := range blocks { + header := block.Header + if header.Version == 1 && (header.Height == 91842 || header.Height == 91880) { + // remove transaction inputs/outputs from coinbase transaction (first transaction) + block.Transactions[0].TxIn = nil + block.Transactions[0].TxOut = nil + } + } +} diff --git a/modules/bitcoin/processor_process_test.go b/modules/bitcoin/processor_process_test.go new file mode 100644 index 0000000..6e70940 --- /dev/null +++ b/modules/bitcoin/processor_process_test.go @@ -0,0 +1,144 @@ +package bitcoin + +import ( + "fmt" + "testing" + + "github.com/gaze-network/indexer-network/core/types" + "github.com/stretchr/testify/assert" +) + +func TestDuplicateCoinbaseTxHashHandling(t *testing.T) { + processor := Processor{} + generator := func() []*types.Block { + return []*types.Block{ + { + Header: types.BlockHeader{Height: 91842, Version: 1}, + Transactions: []*types.Transaction{ + { + TxIn: []*types.TxIn{{}, {}, {}, {}}, + TxOut: []*types.TxOut{{}, {}, {}, {}}, + }, + { + TxIn: []*types.TxIn{{}, {}, {}, {}}, + TxOut: []*types.TxOut{{}, {}, {}, {}}, + }, + }, + }, + { + Header: types.BlockHeader{Height: 91880, Version: 1}, + Transactions: []*types.Transaction{ + { + TxIn: []*types.TxIn{{}, {}, {}, {}}, + TxOut: []*types.TxOut{{}, {}, {}, {}}, + }, + { + TxIn: []*types.TxIn{{}, {}, {}, {}}, + TxOut: []*types.TxOut{{}, {}, {}, {}}, + }, + }, + }, + } + } + + t.Run("all_duplicated_txs", func(t *testing.T) { + blocks := generator() + processor.removeDuplicateCoinbaseTxInputsOutputs(blocks) + + assert.Len(t, blocks, 2, "should not remove any blocks") + for _, block := range blocks { + assert.Len(t, block.Transactions, 2, "should not remove any transactions") + assert.Len(t, block.Transactions[0].TxIn, 0, "should remove tx inputs from coinbase transaction") + assert.Len(t, block.Transactions[0].TxOut, 0, "should remove tx outputs from coinbase transaction") + } + }) + + t.Run("not_duplicated_txs", func(t *testing.T) { + blocks := []*types.Block{ + { + Header: types.BlockHeader{Height: 91812, Version: 1}, + Transactions: []*types.Transaction{ + { + TxIn: []*types.TxIn{{}, {}, {}, {}}, + TxOut: []*types.TxOut{{}, {}, {}, {}}, + }, + { + TxIn: []*types.TxIn{{}, {}, {}, {}}, + TxOut: []*types.TxOut{{}, {}, {}, {}}, + }, + }, + }, + { + Header: types.BlockHeader{Height: 91722, Version: 1}, + Transactions: []*types.Transaction{ + { + TxIn: []*types.TxIn{{}, {}, {}, {}}, + TxOut: []*types.TxOut{{}, {}, {}, {}}, + }, + { + TxIn: []*types.TxIn{{}, {}, {}, {}}, + TxOut: []*types.TxOut{{}, {}, {}, {}}, + }, + }, + }, + } + processor.removeDuplicateCoinbaseTxInputsOutputs(blocks) + + assert.Len(t, blocks, 2, "should not remove any blocks") + for _, block := range blocks { + assert.Len(t, block.Transactions, 2, "should not remove any transactions") + assert.Len(t, block.Transactions[0].TxIn, 4, "should not remove tx inputs from coinbase transaction") + assert.Len(t, block.Transactions[0].TxOut, 4, "should not remove tx outputs from coinbase transaction") + } + }) + + t.Run("mixed", func(t *testing.T) { + blocks := []*types.Block{ + { + Header: types.BlockHeader{Height: 91812, Version: 1}, + Transactions: []*types.Transaction{ + { + TxIn: []*types.TxIn{{}, {}, {}, {}}, + TxOut: []*types.TxOut{{}, {}, {}, {}}, + }, + { + TxIn: []*types.TxIn{{}, {}, {}, {}}, + TxOut: []*types.TxOut{{}, {}, {}, {}}, + }, + }, + }, + } + blocks = append(blocks, generator()...) + blocks = append(blocks, &types.Block{ + Header: types.BlockHeader{Height: 91722, Version: 1}, + Transactions: []*types.Transaction{ + { + TxIn: []*types.TxIn{{}, {}, {}, {}}, + TxOut: []*types.TxOut{{}, {}, {}, {}}, + }, + { + TxIn: []*types.TxIn{{}, {}, {}, {}}, + TxOut: []*types.TxOut{{}, {}, {}, {}}, + }, + }, + }) + processor.removeDuplicateCoinbaseTxInputsOutputs(blocks) + + assert.Len(t, blocks, 4, "should not remove any blocks") + + // only 2nd and 3rd blocks should be modified + for i, block := range blocks { + t.Run(fmt.Sprint(i), func(t *testing.T) { + if i == 1 || i == 2 { + assert.Len(t, block.Transactions, 2, "should not remove any transactions") + assert.Len(t, block.Transactions[0].TxIn, 0, "should remove tx inputs from coinbase transaction") + assert.Len(t, block.Transactions[0].TxOut, 0, "should remove tx outputs from coinbase transaction") + } else { + assert.Len(t, block.Transactions, 2, "should not remove any transactions") + assert.Lenf(t, block.Transactions[0].TxIn, 4, "should not remove tx inputs from coinbase transaction") + assert.Len(t, block.Transactions[0].TxOut, 4, "should not remove tx outputs from coinbase transaction") + } + }) + } + }) +} diff --git a/modules/bitcoin/repository/postgres/block.go b/modules/bitcoin/repository/postgres/block.go new file mode 100644 index 0000000..fabe78e --- /dev/null +++ b/modules/bitcoin/repository/postgres/block.go @@ -0,0 +1,169 @@ +package postgres + +import ( + "context" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres/gen" + "github.com/jackc/pgx/v5" + "github.com/samber/lo" +) + +func (r *Repository) GetLatestBlockHeader(ctx context.Context) (types.BlockHeader, error) { + model, err := r.queries.GetLatestBlockHeader(ctx) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return types.BlockHeader{}, errors.Join(errs.NotFound, err) + } + return types.BlockHeader{}, errors.Wrap(err, "failed to get latest block header") + } + + data, err := mapBlockHeaderModelToType(model) + if err != nil { + return types.BlockHeader{}, errors.Wrap(err, "failed to map block header model to type") + } + + return data, nil +} + +func (r *Repository) InsertBlocks(ctx context.Context, blocks []*types.Block) error { + if len(blocks) == 0 { + return nil + } + + blockParams, txParams, txoutParams, txinParams := mapBlocksTypeToParams(blocks) + + tx, err := r.db.Begin(ctx) + if err != nil { + return errors.Wrap(err, "failed to begin transaction") + } + defer tx.Rollback(ctx) + + queries := r.queries.WithTx(tx) + + if err := queries.BatchInsertBlocks(ctx, blockParams); err != nil { + return errors.Wrap(err, "failed to batch insert block headers") + } + + if err := queries.BatchInsertTransactions(ctx, txParams); err != nil { + return errors.Wrap(err, "failed to batch insert transactions") + } + + // Should insert txout first, then txin + // Because txin references txout + if err := queries.BatchInsertTransactionTxOuts(ctx, txoutParams); err != nil { + return errors.Wrap(err, "failed to batch insert transaction txins") + } + + if err := queries.BatchInsertTransactionTxIns(ctx, txinParams); err != nil { + return errors.Wrap(err, "failed to batch insert transaction txins") + } + + if err := tx.Commit(ctx); err != nil { + return errors.Wrap(err, "failed to commit transaction") + } + + return nil +} + +func (r *Repository) RevertBlocks(ctx context.Context, from int64) error { + tx, err := r.db.Begin(ctx) + if err != nil { + return errors.Wrap(err, "failed to begin transaction") + } + defer tx.Rollback(ctx) + + queries := r.queries.WithTx(tx) + if err := queries.RevertData(ctx, int32(from)); err != nil && !errors.Is(err, pgx.ErrNoRows) { + return errors.Wrap(err, "failed to revert data") + } + + if err := tx.Commit(ctx); err != nil { + return errors.Wrap(err, "failed to commit transaction") + } + + return nil +} + +func (r *Repository) GetBlockHeaderByHeight(ctx context.Context, blockHeight int64) (types.BlockHeader, error) { + blockModel, err := r.queries.GetBlockByHeight(ctx, int32(blockHeight)) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return types.BlockHeader{}, errors.Join(errs.NotFound, err) + } + return types.BlockHeader{}, errors.Wrap(err, "failed to get block by height") + } + + data, err := mapBlockHeaderModelToType(blockModel) + if err != nil { + return types.BlockHeader{}, errors.Wrap(err, "failed to map block header model to type") + } + return data, nil +} + +func (r *Repository) GetBlocksByHeightRange(ctx context.Context, from int64, to int64) ([]*types.Block, error) { + blocks, err := r.queries.GetBlocksByHeightRange(ctx, gen.GetBlocksByHeightRangeParams{ + FromHeight: int32(from), + ToHeight: int32(to), + }) + if err != nil { + return nil, errors.Wrap(err, "failed to get blocks by height range") + } + + if len(blocks) == 0 { + return []*types.Block{}, nil + } + + txs, err := r.queries.GetTransactionsByHeightRange(ctx, gen.GetTransactionsByHeightRangeParams{ + FromHeight: int32(from), + ToHeight: int32(to), + }) + if err != nil { + return nil, errors.Wrap(err, "failed to get transactions by height range") + } + + txHashes := lo.Map(txs, func(tx gen.BitcoinTransaction, _ int) string { return tx.TxHash }) + + txOuts, err := r.queries.GetTransactionTxOutsByTxHashes(ctx, txHashes) + if err != nil { + return nil, errors.Wrap(err, "failed to get transaction txouts by tx hashes") + } + + txIns, err := r.queries.GetTransactionTxInsByTxHashes(ctx, txHashes) + if err != nil { + return nil, errors.Wrap(err, "failed to get transaction txins by tx hashes") + } + + // Grouping result by block height and tx hash + groupedTxs := lo.GroupBy(txs, func(tx gen.BitcoinTransaction) int32 { return tx.BlockHeight }) + groupedTxOuts := lo.GroupBy(txOuts, func(txOut gen.BitcoinTransactionTxout) string { return txOut.TxHash }) + groupedTxIns := lo.GroupBy(txIns, func(txIn gen.BitcoinTransactionTxin) string { return txIn.TxHash }) + + var errs []error + result := lo.Map(blocks, func(blockModel gen.BitcoinBlock, _ int) *types.Block { + header, err := mapBlockHeaderModelToType(blockModel) + if err != nil { + errs = append(errs, errors.Wrap(err, "failed to map block header model to type")) + return nil + } + + txsModel := groupedTxs[blockModel.BlockHeight] + return &types.Block{ + Header: header, + Transactions: lo.Map(txsModel, func(txModel gen.BitcoinTransaction, _ int) *types.Transaction { + tx, err := mapTransactionModelToType(txModel, groupedTxIns[txModel.TxHash], groupedTxOuts[txModel.TxHash]) + if err != nil { + errs = append(errs, errors.Wrap(err, "failed to map transaction model to type")) + return nil + } + return &tx + }), + } + }) + if len(errs) > 0 { + return nil, errors.Wrap(errors.Join(errs...), "failed while mapping result") + } + return result, nil +} diff --git a/modules/bitcoin/repository/postgres/gen/data.sql.go b/modules/bitcoin/repository/postgres/gen/data.sql.go new file mode 100644 index 0000000..75e0570 --- /dev/null +++ b/modules/bitcoin/repository/postgres/gen/data.sql.go @@ -0,0 +1,408 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 +// source: data.sql + +package gen + +import ( + "context" + + "github.com/jackc/pgx/v5/pgtype" +) + +const batchInsertBlocks = `-- name: BatchInsertBlocks :exec +INSERT INTO bitcoin_blocks ("block_height","block_hash","version","merkle_root","prev_block_hash","timestamp","bits","nonce") +VALUES ( + unnest($1::INT[]), + unnest($2::TEXT[]), + unnest($3::INT[]), + unnest($4::TEXT[]), + unnest($5::TEXT[]), + unnest($6::TIMESTAMP WITH TIME ZONE[]), -- or use TIMESTAMPTZ + unnest($7::BIGINT[]), + unnest($8::BIGINT[]) +) +` + +type BatchInsertBlocksParams struct { + BlockHeightArr []int32 + BlockHashArr []string + VersionArr []int32 + MerkleRootArr []string + PrevBlockHashArr []string + TimestampArr []pgtype.Timestamptz + BitsArr []int64 + NonceArr []int64 +} + +func (q *Queries) BatchInsertBlocks(ctx context.Context, arg BatchInsertBlocksParams) error { + _, err := q.db.Exec(ctx, batchInsertBlocks, + arg.BlockHeightArr, + arg.BlockHashArr, + arg.VersionArr, + arg.MerkleRootArr, + arg.PrevBlockHashArr, + arg.TimestampArr, + arg.BitsArr, + arg.NonceArr, + ) + return err +} + +const batchInsertTransactionTxIns = `-- name: BatchInsertTransactionTxIns :exec +WITH update_txout AS ( + UPDATE "bitcoin_transaction_txouts" + SET "is_spent" = true + FROM (SELECT unnest($1::TEXT[]) as tx_hash, unnest($2::INT[]) as tx_idx) as txin + WHERE "bitcoin_transaction_txouts"."tx_hash" = txin.tx_hash AND "bitcoin_transaction_txouts"."tx_idx" = txin.tx_idx AND "is_spent" = false + RETURNING "bitcoin_transaction_txouts"."tx_hash", "bitcoin_transaction_txouts"."tx_idx", "pkscript" +), prepare_insert AS ( + SELECT input.tx_hash, input.tx_idx, prevout_tx_hash, prevout_tx_idx, update_txout.pkscript as prevout_pkscript, scriptsig, witness, sequence + FROM ( + SELECT + unnest($3::TEXT[]) as tx_hash, + unnest($4::INT[]) as tx_idx, + unnest($1::TEXT[]) as prevout_tx_hash, + unnest($2::INT[]) as prevout_tx_idx, + unnest($5::TEXT[]) as scriptsig, + unnest($6::TEXT[]) as witness, + unnest($7::INT[]) as sequence + ) input LEFT JOIN update_txout ON "update_txout"."tx_hash" = "input"."prevout_tx_hash" AND "update_txout"."tx_idx" = "input"."prevout_tx_idx" +) +INSERT INTO bitcoin_transaction_txins ("tx_hash","tx_idx","prevout_tx_hash","prevout_tx_idx", "prevout_pkscript","scriptsig","witness","sequence") +SELECT "tx_hash", "tx_idx", "prevout_tx_hash", "prevout_tx_idx", "prevout_pkscript", "scriptsig", "witness", "sequence" FROM prepare_insert +` + +type BatchInsertTransactionTxInsParams struct { + PrevoutTxHashArr []string + PrevoutTxIdxArr []int32 + TxHashArr []string + TxIdxArr []int32 + ScriptsigArr []string + WitnessArr []string + SequenceArr []int32 +} + +func (q *Queries) BatchInsertTransactionTxIns(ctx context.Context, arg BatchInsertTransactionTxInsParams) error { + _, err := q.db.Exec(ctx, batchInsertTransactionTxIns, + arg.PrevoutTxHashArr, + arg.PrevoutTxIdxArr, + arg.TxHashArr, + arg.TxIdxArr, + arg.ScriptsigArr, + arg.WitnessArr, + arg.SequenceArr, + ) + return err +} + +const batchInsertTransactionTxOuts = `-- name: BatchInsertTransactionTxOuts :exec +INSERT INTO bitcoin_transaction_txouts ("tx_hash","tx_idx","pkscript","value") +VALUES ( + unnest($1::TEXT[]), + unnest($2::INT[]), + unnest($3::TEXT[]), + unnest($4::BIGINT[]) +) +` + +type BatchInsertTransactionTxOutsParams struct { + TxHashArr []string + TxIdxArr []int32 + PkscriptArr []string + ValueArr []int64 +} + +func (q *Queries) BatchInsertTransactionTxOuts(ctx context.Context, arg BatchInsertTransactionTxOutsParams) error { + _, err := q.db.Exec(ctx, batchInsertTransactionTxOuts, + arg.TxHashArr, + arg.TxIdxArr, + arg.PkscriptArr, + arg.ValueArr, + ) + return err +} + +const batchInsertTransactions = `-- name: BatchInsertTransactions :exec +INSERT INTO bitcoin_transactions ("tx_hash","version","locktime","block_height","block_hash","idx") +VALUES ( + unnest($1::TEXT[]), + unnest($2::INT[]), + unnest($3::BIGINT[]), + unnest($4::INT[]), + unnest($5::TEXT[]), + unnest($6::INT[]) +) +` + +type BatchInsertTransactionsParams struct { + TxHashArr []string + VersionArr []int32 + LocktimeArr []int64 + BlockHeightArr []int32 + BlockHashArr []string + IdxArr []int32 +} + +func (q *Queries) BatchInsertTransactions(ctx context.Context, arg BatchInsertTransactionsParams) error { + _, err := q.db.Exec(ctx, batchInsertTransactions, + arg.TxHashArr, + arg.VersionArr, + arg.LocktimeArr, + arg.BlockHeightArr, + arg.BlockHashArr, + arg.IdxArr, + ) + return err +} + +const getBlockByHeight = `-- name: GetBlockByHeight :one +SELECT block_height, block_hash, version, merkle_root, prev_block_hash, timestamp, bits, nonce FROM bitcoin_blocks WHERE block_height = $1 +` + +func (q *Queries) GetBlockByHeight(ctx context.Context, blockHeight int32) (BitcoinBlock, error) { + row := q.db.QueryRow(ctx, getBlockByHeight, blockHeight) + var i BitcoinBlock + err := row.Scan( + &i.BlockHeight, + &i.BlockHash, + &i.Version, + &i.MerkleRoot, + &i.PrevBlockHash, + &i.Timestamp, + &i.Bits, + &i.Nonce, + ) + return i, err +} + +const getBlocksByHeightRange = `-- name: GetBlocksByHeightRange :many +SELECT block_height, block_hash, version, merkle_root, prev_block_hash, timestamp, bits, nonce FROM bitcoin_blocks WHERE block_height >= $1 AND block_height <= $2 ORDER BY block_height ASC +` + +type GetBlocksByHeightRangeParams struct { + FromHeight int32 + ToHeight int32 +} + +func (q *Queries) GetBlocksByHeightRange(ctx context.Context, arg GetBlocksByHeightRangeParams) ([]BitcoinBlock, error) { + rows, err := q.db.Query(ctx, getBlocksByHeightRange, arg.FromHeight, arg.ToHeight) + if err != nil { + return nil, err + } + defer rows.Close() + var items []BitcoinBlock + for rows.Next() { + var i BitcoinBlock + if err := rows.Scan( + &i.BlockHeight, + &i.BlockHash, + &i.Version, + &i.MerkleRoot, + &i.PrevBlockHash, + &i.Timestamp, + &i.Bits, + &i.Nonce, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const getLatestBlockHeader = `-- name: GetLatestBlockHeader :one +SELECT block_height, block_hash, version, merkle_root, prev_block_hash, timestamp, bits, nonce FROM bitcoin_blocks ORDER BY block_height DESC LIMIT 1 +` + +func (q *Queries) GetLatestBlockHeader(ctx context.Context) (BitcoinBlock, error) { + row := q.db.QueryRow(ctx, getLatestBlockHeader) + var i BitcoinBlock + err := row.Scan( + &i.BlockHeight, + &i.BlockHash, + &i.Version, + &i.MerkleRoot, + &i.PrevBlockHash, + &i.Timestamp, + &i.Bits, + &i.Nonce, + ) + return i, err +} + +const getTransactionByHash = `-- name: GetTransactionByHash :one +SELECT tx_hash, version, locktime, block_height, block_hash, idx FROM bitcoin_transactions WHERE tx_hash = $1 +` + +func (q *Queries) GetTransactionByHash(ctx context.Context, txHash string) (BitcoinTransaction, error) { + row := q.db.QueryRow(ctx, getTransactionByHash, txHash) + var i BitcoinTransaction + err := row.Scan( + &i.TxHash, + &i.Version, + &i.Locktime, + &i.BlockHeight, + &i.BlockHash, + &i.Idx, + ) + return i, err +} + +const getTransactionTxInsByTxHashes = `-- name: GetTransactionTxInsByTxHashes :many +SELECT tx_hash, tx_idx, prevout_tx_hash, prevout_tx_idx, prevout_pkscript, scriptsig, witness, sequence FROM bitcoin_transaction_txins WHERE tx_hash = ANY($1::TEXT[]) +` + +func (q *Queries) GetTransactionTxInsByTxHashes(ctx context.Context, txHashes []string) ([]BitcoinTransactionTxin, error) { + rows, err := q.db.Query(ctx, getTransactionTxInsByTxHashes, txHashes) + if err != nil { + return nil, err + } + defer rows.Close() + var items []BitcoinTransactionTxin + for rows.Next() { + var i BitcoinTransactionTxin + if err := rows.Scan( + &i.TxHash, + &i.TxIdx, + &i.PrevoutTxHash, + &i.PrevoutTxIdx, + &i.PrevoutPkscript, + &i.Scriptsig, + &i.Witness, + &i.Sequence, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const getTransactionTxOutsByTxHashes = `-- name: GetTransactionTxOutsByTxHashes :many +SELECT tx_hash, tx_idx, pkscript, value, is_spent FROM bitcoin_transaction_txouts WHERE tx_hash = ANY($1::TEXT[]) +` + +func (q *Queries) GetTransactionTxOutsByTxHashes(ctx context.Context, txHashes []string) ([]BitcoinTransactionTxout, error) { + rows, err := q.db.Query(ctx, getTransactionTxOutsByTxHashes, txHashes) + if err != nil { + return nil, err + } + defer rows.Close() + var items []BitcoinTransactionTxout + for rows.Next() { + var i BitcoinTransactionTxout + if err := rows.Scan( + &i.TxHash, + &i.TxIdx, + &i.Pkscript, + &i.Value, + &i.IsSpent, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const getTransactionsByHeightRange = `-- name: GetTransactionsByHeightRange :many +SELECT tx_hash, version, locktime, block_height, block_hash, idx FROM bitcoin_transactions WHERE block_height >= $1 AND block_height <= $2 +` + +type GetTransactionsByHeightRangeParams struct { + FromHeight int32 + ToHeight int32 +} + +func (q *Queries) GetTransactionsByHeightRange(ctx context.Context, arg GetTransactionsByHeightRangeParams) ([]BitcoinTransaction, error) { + rows, err := q.db.Query(ctx, getTransactionsByHeightRange, arg.FromHeight, arg.ToHeight) + if err != nil { + return nil, err + } + defer rows.Close() + var items []BitcoinTransaction + for rows.Next() { + var i BitcoinTransaction + if err := rows.Scan( + &i.TxHash, + &i.Version, + &i.Locktime, + &i.BlockHeight, + &i.BlockHash, + &i.Idx, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const insertBlock = `-- name: InsertBlock :exec +INSERT INTO bitcoin_blocks ("block_height","block_hash","version","merkle_root","prev_block_hash","timestamp","bits","nonce") VALUES ($1, $2, $3, $4, $5, $6, $7, $8) +` + +type InsertBlockParams struct { + BlockHeight int32 + BlockHash string + Version int32 + MerkleRoot string + PrevBlockHash string + Timestamp pgtype.Timestamptz + Bits int64 + Nonce int64 +} + +func (q *Queries) InsertBlock(ctx context.Context, arg InsertBlockParams) error { + _, err := q.db.Exec(ctx, insertBlock, + arg.BlockHeight, + arg.BlockHash, + arg.Version, + arg.MerkleRoot, + arg.PrevBlockHash, + arg.Timestamp, + arg.Bits, + arg.Nonce, + ) + return err +} + +const revertData = `-- name: RevertData :exec +WITH delete_tx AS ( + DELETE FROM "bitcoin_transactions" WHERE "block_height" >= $1 + RETURNING "tx_hash" +), delete_txin AS ( + DELETE FROM "bitcoin_transaction_txins" WHERE "tx_hash" = ANY(SELECT "tx_hash" FROM delete_tx) + RETURNING "prevout_tx_hash", "prevout_tx_idx" +), delete_txout AS ( + DELETE FROM "bitcoin_transaction_txouts" WHERE "tx_hash" = ANY(SELECT "tx_hash" FROM delete_tx) + RETURNING "tx_hash", "tx_idx" +), revert_txout_spent AS ( + UPDATE "bitcoin_transaction_txouts" + SET "is_spent" = false + WHERE + ("tx_hash", "tx_idx") IN (SELECT "prevout_tx_hash", "prevout_tx_idx" FROM delete_txin) AND + ("tx_hash", "tx_idx") NOT IN (SELECT "tx_hash", "tx_idx" FROM delete_txout) -- avoid to modified same row twice (modified the same row twice in a single statement is not supported) + RETURNING NULL +) +DELETE FROM "bitcoin_blocks" WHERE "bitcoin_blocks"."block_height" >= $1 +` + +func (q *Queries) RevertData(ctx context.Context, fromHeight int32) error { + _, err := q.db.Exec(ctx, revertData, fromHeight) + return err +} diff --git a/modules/bitcoin/repository/postgres/gen/db.go b/modules/bitcoin/repository/postgres/gen/db.go new file mode 100644 index 0000000..3ccd3c9 --- /dev/null +++ b/modules/bitcoin/repository/postgres/gen/db.go @@ -0,0 +1,32 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 + +package gen + +import ( + "context" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" +) + +type DBTX interface { + Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error) + Query(context.Context, string, ...interface{}) (pgx.Rows, error) + QueryRow(context.Context, string, ...interface{}) pgx.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx pgx.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/modules/bitcoin/repository/postgres/gen/info.sql.go b/modules/bitcoin/repository/postgres/gen/info.sql.go new file mode 100644 index 0000000..c5d4c5d --- /dev/null +++ b/modules/bitcoin/repository/postgres/gen/info.sql.go @@ -0,0 +1,51 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 +// source: info.sql + +package gen + +import ( + "context" +) + +const getCurrentDBVersion = `-- name: GetCurrentDBVersion :one +SELECT "version" FROM bitcoin_indexer_db_version ORDER BY id DESC LIMIT 1 +` + +func (q *Queries) GetCurrentDBVersion(ctx context.Context) (int32, error) { + row := q.db.QueryRow(ctx, getCurrentDBVersion) + var version int32 + err := row.Scan(&version) + return version, err +} + +const getCurrentIndexerStats = `-- name: GetCurrentIndexerStats :one +SELECT "client_version", "network" FROM bitcoin_indexer_stats ORDER BY id DESC LIMIT 1 +` + +type GetCurrentIndexerStatsRow struct { + ClientVersion string + Network string +} + +func (q *Queries) GetCurrentIndexerStats(ctx context.Context) (GetCurrentIndexerStatsRow, error) { + row := q.db.QueryRow(ctx, getCurrentIndexerStats) + var i GetCurrentIndexerStatsRow + err := row.Scan(&i.ClientVersion, &i.Network) + return i, err +} + +const updateIndexerStats = `-- name: UpdateIndexerStats :exec +INSERT INTO bitcoin_indexer_stats (client_version, network) VALUES ($1, $2) +` + +type UpdateIndexerStatsParams struct { + ClientVersion string + Network string +} + +func (q *Queries) UpdateIndexerStats(ctx context.Context, arg UpdateIndexerStatsParams) error { + _, err := q.db.Exec(ctx, updateIndexerStats, arg.ClientVersion, arg.Network) + return err +} diff --git a/modules/bitcoin/repository/postgres/gen/models.go b/modules/bitcoin/repository/postgres/gen/models.go new file mode 100644 index 0000000..d9d9c94 --- /dev/null +++ b/modules/bitcoin/repository/postgres/gen/models.go @@ -0,0 +1,61 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 + +package gen + +import ( + "github.com/jackc/pgx/v5/pgtype" +) + +type BitcoinBlock struct { + BlockHeight int32 + BlockHash string + Version int32 + MerkleRoot string + PrevBlockHash string + Timestamp pgtype.Timestamptz + Bits int64 + Nonce int64 +} + +type BitcoinIndexerDbVersion struct { + Id int64 + Version int32 + CreatedAt pgtype.Timestamptz +} + +type BitcoinIndexerStat struct { + Id int64 + ClientVersion string + Network string + CreatedAt pgtype.Timestamptz +} + +type BitcoinTransaction struct { + TxHash string + Version int32 + Locktime int64 + BlockHeight int32 + BlockHash string + Idx int32 +} + +type BitcoinTransactionTxin struct { + TxHash string + TxIdx int32 + PrevoutTxHash string + PrevoutTxIdx int32 + PrevoutPkscript pgtype.Text + Scriptsig string + Witness string + Sequence int64 +} + +type BitcoinTransactionTxout struct { + TxHash string + TxIdx int32 + Pkscript string + Value int64 + IsSpent bool +} diff --git a/modules/bitcoin/repository/postgres/info.go b/modules/bitcoin/repository/postgres/info.go new file mode 100644 index 0000000..9b21d04 --- /dev/null +++ b/modules/bitcoin/repository/postgres/info.go @@ -0,0 +1,44 @@ +package postgres + +import ( + "context" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/modules/bitcoin/datagateway" + "github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres/gen" + "github.com/jackc/pgx/v5" +) + +// Make sure Repository implements the IndexerInformationDataGateway interface +var _ datagateway.IndexerInformationDataGateway = (*Repository)(nil) + +func (r *Repository) GetCurrentDBVersion(ctx context.Context) (int32, error) { + version, err := r.queries.GetCurrentDBVersion(ctx) + if err != nil { + return 0, errors.WithStack(err) + } + return version, nil +} + +func (r *Repository) GetLatestIndexerStats(ctx context.Context) (string, common.Network, error) { + stats, err := r.queries.GetCurrentIndexerStats(ctx) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return "", "", errors.Join(errs.NotFound, err) + } + return "", "", errors.WithStack(err) + } + return stats.ClientVersion, common.Network(stats.Network), nil +} + +func (r *Repository) UpdateIndexerStats(ctx context.Context, clientVersion string, network common.Network) error { + if err := r.queries.UpdateIndexerStats(ctx, gen.UpdateIndexerStatsParams{ + ClientVersion: clientVersion, + Network: network.String(), + }); err != nil { + return errors.WithStack(err) + } + return nil +} diff --git a/modules/bitcoin/repository/postgres/mappers.go b/modules/bitcoin/repository/postgres/mappers.go new file mode 100644 index 0000000..4b24ed0 --- /dev/null +++ b/modules/bitcoin/repository/postgres/mappers.go @@ -0,0 +1,197 @@ +package postgres + +import ( + "cmp" + "encoding/hex" + "slices" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres/gen" + "github.com/gaze-network/indexer-network/pkg/btcutils" + "github.com/jackc/pgx/v5/pgtype" +) + +func mapBlockHeaderModelToType(src gen.BitcoinBlock) (types.BlockHeader, error) { + hash, err := chainhash.NewHashFromStr(src.BlockHash) + if err != nil { + return types.BlockHeader{}, errors.Join(errors.Wrap(err, "failed to parse block hash"), errs.InternalError) + } + prevHash, err := chainhash.NewHashFromStr(src.PrevBlockHash) + if err != nil { + return types.BlockHeader{}, errors.Join(errors.Wrap(err, "failed to parse prev block hash"), errs.InternalError) + } + merkleRoot, err := chainhash.NewHashFromStr(src.MerkleRoot) + if err != nil { + return types.BlockHeader{}, errors.Join(errors.Wrap(err, "failed to parse merkle root"), errs.InternalError) + } + return types.BlockHeader{ + Hash: *hash, + Height: int64(src.BlockHeight), + Version: src.Version, + PrevBlock: *prevHash, + MerkleRoot: *merkleRoot, + Timestamp: src.Timestamp.Time, + Bits: uint32(src.Bits), + Nonce: uint32(src.Nonce), + }, nil +} + +func mapBlocksTypeToParams(src []*types.Block) (gen.BatchInsertBlocksParams, gen.BatchInsertTransactionsParams, gen.BatchInsertTransactionTxOutsParams, gen.BatchInsertTransactionTxInsParams) { + blocks := gen.BatchInsertBlocksParams{ + BlockHeightArr: make([]int32, 0, len(src)), + BlockHashArr: make([]string, 0, len(src)), + VersionArr: make([]int32, 0, len(src)), + MerkleRootArr: make([]string, 0, len(src)), + PrevBlockHashArr: make([]string, 0, len(src)), + TimestampArr: make([]pgtype.Timestamptz, 0, len(src)), + BitsArr: make([]int64, 0, len(src)), + NonceArr: make([]int64, 0, len(src)), + } + txs := gen.BatchInsertTransactionsParams{ + TxHashArr: []string{}, + VersionArr: []int32{}, + LocktimeArr: []int64{}, + BlockHeightArr: []int32{}, + BlockHashArr: []string{}, + IdxArr: []int32{}, + } + txouts := gen.BatchInsertTransactionTxOutsParams{ + TxHashArr: []string{}, + TxIdxArr: []int32{}, + PkscriptArr: []string{}, + ValueArr: []int64{}, + } + txins := gen.BatchInsertTransactionTxInsParams{ + PrevoutTxHashArr: []string{}, + PrevoutTxIdxArr: []int32{}, + TxHashArr: []string{}, + TxIdxArr: []int32{}, + ScriptsigArr: []string{}, + WitnessArr: []string{}, + SequenceArr: []int32{}, + } + + for _, block := range src { + blockHash := block.Header.Hash.String() + + // Batch insert blocks + blocks.BlockHeightArr = append(blocks.BlockHeightArr, int32(block.Header.Height)) + blocks.BlockHashArr = append(blocks.BlockHashArr, blockHash) + blocks.VersionArr = append(blocks.VersionArr, block.Header.Version) + blocks.MerkleRootArr = append(blocks.MerkleRootArr, block.Header.MerkleRoot.String()) + blocks.PrevBlockHashArr = append(blocks.PrevBlockHashArr, block.Header.PrevBlock.String()) + blocks.TimestampArr = append(blocks.TimestampArr, pgtype.Timestamptz{ + Time: block.Header.Timestamp, + Valid: true, + }) + blocks.BitsArr = append(blocks.BitsArr, int64(block.Header.Bits)) + blocks.NonceArr = append(blocks.NonceArr, int64(block.Header.Nonce)) + + for txIdx, srcTx := range block.Transactions { + txHash := srcTx.TxHash.String() + + // Batch insert transactions + txs.TxHashArr = append(txs.TxHashArr, txHash) + txs.VersionArr = append(txs.VersionArr, srcTx.Version) + txs.LocktimeArr = append(txs.LocktimeArr, int64(srcTx.LockTime)) + txs.BlockHeightArr = append(txs.BlockHeightArr, int32(block.Header.Height)) + txs.BlockHashArr = append(txs.BlockHashArr, blockHash) + txs.IdxArr = append(txs.IdxArr, int32(txIdx)) + + // Batch insert txins + for idx, txin := range srcTx.TxIn { + var witness string + if len(txin.Witness) > 0 { + witness = btcutils.WitnessToString(txin.Witness) + } + txins.TxHashArr = append(txins.TxHashArr, txHash) + txins.TxIdxArr = append(txins.TxIdxArr, int32(idx)) + txins.PrevoutTxHashArr = append(txins.PrevoutTxHashArr, txin.PreviousOutTxHash.String()) + txins.PrevoutTxIdxArr = append(txins.PrevoutTxIdxArr, int32(txin.PreviousOutIndex)) + txins.ScriptsigArr = append(txins.ScriptsigArr, hex.EncodeToString(txin.SignatureScript)) + txins.WitnessArr = append(txins.WitnessArr, witness) + txins.SequenceArr = append(txins.SequenceArr, int32(txin.Sequence)) + } + + // Batch insert txouts + for idx, txout := range srcTx.TxOut { + txouts.TxHashArr = append(txouts.TxHashArr, txHash) + txouts.TxIdxArr = append(txouts.TxIdxArr, int32(idx)) + txouts.PkscriptArr = append(txouts.PkscriptArr, hex.EncodeToString(txout.PkScript)) + txouts.ValueArr = append(txouts.ValueArr, txout.Value) + } + } + } + return blocks, txs, txouts, txins +} + +func mapTransactionModelToType(src gen.BitcoinTransaction, txInModel []gen.BitcoinTransactionTxin, txOutModels []gen.BitcoinTransactionTxout) (types.Transaction, error) { + blockHash, err := chainhash.NewHashFromStr(src.BlockHash) + if err != nil { + return types.Transaction{}, errors.Wrap(err, "failed to parse block hash") + } + + txHash, err := chainhash.NewHashFromStr(src.TxHash) + if err != nil { + return types.Transaction{}, errors.Wrap(err, "failed to parse tx hash") + } + + // Sort txins and txouts by index (Asc) + slices.SortFunc(txOutModels, func(i, j gen.BitcoinTransactionTxout) int { + return cmp.Compare(i.TxIdx, j.TxIdx) + }) + slices.SortFunc(txInModel, func(i, j gen.BitcoinTransactionTxin) int { + return cmp.Compare(i.TxIdx, j.TxIdx) + }) + + txIns := make([]*types.TxIn, 0, len(txInModel)) + txOuts := make([]*types.TxOut, 0, len(txOutModels)) + for _, txInModel := range txInModel { + scriptsig, err := hex.DecodeString(txInModel.Scriptsig) + if err != nil { + return types.Transaction{}, errors.Wrap(err, "failed to decode scriptsig") + } + + prevoutTxHash, err := chainhash.NewHashFromStr(txInModel.PrevoutTxHash) + if err != nil { + return types.Transaction{}, errors.Wrap(err, "failed to parse prevout tx hash") + } + + witness, err := btcutils.WitnessFromString(txInModel.Witness) + if err != nil { + return types.Transaction{}, errors.Wrap(err, "failed to parse witness from hex string") + } + + txIns = append(txIns, &types.TxIn{ + SignatureScript: scriptsig, + Witness: witness, + Sequence: uint32(txInModel.Sequence), + PreviousOutIndex: uint32(txInModel.PrevoutTxIdx), + PreviousOutTxHash: *prevoutTxHash, + }) + } + for _, txOutModel := range txOutModels { + pkscript, err := hex.DecodeString(txOutModel.Pkscript) + if err != nil { + return types.Transaction{}, errors.Wrap(err, "failed to decode pkscript") + } + txOuts = append(txOuts, &types.TxOut{ + PkScript: pkscript, + Value: txOutModel.Value, + }) + } + + return types.Transaction{ + BlockHeight: int64(src.BlockHeight), + BlockHash: *blockHash, + Index: uint32(src.Idx), + TxHash: *txHash, + Version: src.Version, + LockTime: uint32(src.Locktime), + TxIn: txIns, + TxOut: txOuts, + }, nil +} diff --git a/modules/bitcoin/repository/postgres/postgres.go b/modules/bitcoin/repository/postgres/postgres.go new file mode 100644 index 0000000..714f494 --- /dev/null +++ b/modules/bitcoin/repository/postgres/postgres.go @@ -0,0 +1,22 @@ +package postgres + +import ( + "github.com/gaze-network/indexer-network/internal/postgres" + "github.com/gaze-network/indexer-network/modules/bitcoin/datagateway" + "github.com/gaze-network/indexer-network/modules/bitcoin/repository/postgres/gen" +) + +// Make sure Repository implements the BitcoinDataGateway interface +var _ datagateway.BitcoinDataGateway = (*Repository)(nil) + +type Repository struct { + db postgres.DB + queries *gen.Queries +} + +func NewRepository(db postgres.DB) *Repository { + return &Repository{ + db: db, + queries: gen.New(db), + } +} diff --git a/modules/bitcoin/repository/postgres/transaction.go b/modules/bitcoin/repository/postgres/transaction.go new file mode 100644 index 0000000..c932da7 --- /dev/null +++ b/modules/bitcoin/repository/postgres/transaction.go @@ -0,0 +1,35 @@ +package postgres + +import ( + "context" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/types" + "github.com/jackc/pgx/v5" +) + +func (r *Repository) GetTransactionByHash(ctx context.Context, txHash chainhash.Hash) (*types.Transaction, error) { + model, err := r.queries.GetTransactionByHash(ctx, txHash.String()) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errors.Join(errs.NotFound, err) + } + return nil, errors.Wrap(err, "failed to get transaction by hash") + } + txIns, err := r.queries.GetTransactionTxInsByTxHashes(ctx, []string{txHash.String()}) + if err != nil { + return nil, errors.Wrap(err, "failed to get transaction txins by tx hashes") + } + txOuts, err := r.queries.GetTransactionTxOutsByTxHashes(ctx, []string{txHash.String()}) + if err != nil && !errors.Is(err, pgx.ErrNoRows) { + return nil, errors.Wrap(err, "failed to get transaction txouts by tx hashes") + } + + tx, err := mapTransactionModelToType(model, txIns, txOuts) + if err != nil { + return nil, errors.Wrap(err, "failed to map transaction model to type") + } + return &tx, nil +} diff --git a/modules/runes/.gitkeep b/modules/runes/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/modules/runes/api/api.go b/modules/runes/api/api.go new file mode 100644 index 0000000..55aed5f --- /dev/null +++ b/modules/runes/api/api.go @@ -0,0 +1,11 @@ +package api + +import ( + "github.com/gaze-network/indexer-network/common" + "github.com/gaze-network/indexer-network/modules/runes/api/httphandler" + "github.com/gaze-network/indexer-network/modules/runes/usecase" +) + +func NewHTTPHandler(network common.Network, usecase *usecase.Usecase) *httphandler.HttpHandler { + return httphandler.New(network, usecase) +} diff --git a/modules/runes/api/httphandler/get_balances_by_address.go b/modules/runes/api/httphandler/get_balances_by_address.go new file mode 100644 index 0000000..515903d --- /dev/null +++ b/modules/runes/api/httphandler/get_balances_by_address.go @@ -0,0 +1,116 @@ +package httphandler + +import ( + "slices" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/uint128" + "github.com/gofiber/fiber/v2" + "github.com/samber/lo" +) + +type getBalancesByAddressRequest struct { + Wallet string `params:"wallet"` + Id string `query:"id"` + BlockHeight uint64 `query:"blockHeight"` +} + +func (r getBalancesByAddressRequest) Validate() error { + var errList []error + if r.Wallet == "" { + errList = append(errList, errors.New("'wallet' is required")) + } + if r.Id != "" && !isRuneIdOrRuneName(r.Id) { + errList = append(errList, errors.New("'id' is not valid rune id or rune name")) + } + return errs.WithPublicMessage(errors.Join(errList...), "validation error") +} + +type balance struct { + Amount uint128.Uint128 `json:"amount"` + Id runes.RuneId `json:"id"` + Name runes.SpacedRune `json:"name"` + Symbol string `json:"symbol"` + Decimals uint8 `json:"decimals"` +} + +type getBalancesByAddressResult struct { + List []balance `json:"list"` + BlockHeight uint64 `json:"blockHeight"` +} + +type getBalancesByAddressResponse = HttpResponse[getBalancesByAddressResult] + +func (h *HttpHandler) GetBalancesByAddress(ctx *fiber.Ctx) (err error) { + var req getBalancesByAddressRequest + if err := ctx.ParamsParser(&req); err != nil { + return errors.WithStack(err) + } + if err := ctx.QueryParser(&req); err != nil { + return errors.WithStack(err) + } + if err := req.Validate(); err != nil { + return errors.WithStack(err) + } + + pkScript, ok := resolvePkScript(h.network, req.Wallet) + if !ok { + return errs.NewPublicError("unable to resolve pkscript from \"wallet\"") + } + + blockHeight := req.BlockHeight + if blockHeight == 0 { + blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext()) + if err != nil { + return errors.Wrap(err, "error during GetLatestBlock") + } + blockHeight = uint64(blockHeader.Height) + } + + balances, err := h.usecase.GetBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight) + if err != nil { + return errors.Wrap(err, "error during GetBalancesByPkScript") + } + + runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id) + if ok { + // filter out balances that don't match the requested rune id + for key := range balances { + if key != runeId { + delete(balances, key) + } + } + } + + balanceRuneIds := lo.Keys(balances) + runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), balanceRuneIds) + if err != nil { + return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch") + } + + balanceList := make([]balance, 0, len(balances)) + for id, b := range balances { + runeEntry := runeEntries[id] + balanceList = append(balanceList, balance{ + Amount: b.Amount, + Id: id, + Name: runeEntry.SpacedRune, + Symbol: string(runeEntry.Symbol), + Decimals: runeEntry.Divisibility, + }) + } + slices.SortFunc(balanceList, func(i, j balance) int { + return j.Amount.Cmp(i.Amount) + }) + + resp := getBalancesByAddressResponse{ + Result: &getBalancesByAddressResult{ + BlockHeight: blockHeight, + List: balanceList, + }, + } + + return errors.WithStack(ctx.JSON(resp)) +} diff --git a/modules/runes/api/httphandler/get_balances_by_address_batch.go b/modules/runes/api/httphandler/get_balances_by_address_batch.go new file mode 100644 index 0000000..77ca29b --- /dev/null +++ b/modules/runes/api/httphandler/get_balances_by_address_batch.go @@ -0,0 +1,139 @@ +package httphandler + +import ( + "context" + "fmt" + "slices" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gofiber/fiber/v2" + "github.com/samber/lo" + "golang.org/x/sync/errgroup" +) + +type getBalanceQuery struct { + Wallet string `json:"wallet"` + Id string `json:"id"` + BlockHeight uint64 `json:"blockHeight"` +} + +type getBalancesByAddressBatchRequest struct { + Queries []getBalanceQuery `json:"queries"` +} + +func (r getBalancesByAddressBatchRequest) Validate() error { + var errList []error + for _, query := range r.Queries { + if query.Wallet == "" { + errList = append(errList, errors.Errorf("queries[%d]: 'wallet' is required")) + } + if query.Id != "" && !isRuneIdOrRuneName(query.Id) { + errList = append(errList, errors.Errorf("queries[%d]: 'id' is not valid rune id or rune name")) + } + } + return errs.WithPublicMessage(errors.Join(errList...), "validation error") +} + +type getBalancesByAddressBatchResult struct { + List []*getBalancesByAddressResult `json:"list"` +} + +type getBalancesByAddressBatchResponse = HttpResponse[getBalancesByAddressBatchResult] + +func (h *HttpHandler) GetBalancesByAddressBatch(ctx *fiber.Ctx) (err error) { + var req getBalancesByAddressBatchRequest + if err := ctx.BodyParser(&req); err != nil { + return errors.WithStack(err) + } + if err := req.Validate(); err != nil { + return errors.WithStack(err) + } + + var latestBlockHeight uint64 + blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext()) + if err != nil { + return errors.Wrap(err, "error during GetLatestBlock") + } + latestBlockHeight = uint64(blockHeader.Height) + + processQuery := func(ctx context.Context, query getBalanceQuery, queryIndex int) (*getBalancesByAddressResult, error) { + pkScript, ok := resolvePkScript(h.network, query.Wallet) + if !ok { + return nil, errs.NewPublicError(fmt.Sprintf("unable to resolve pkscript from \"queries[%d].wallet\"", queryIndex)) + } + + blockHeight := query.BlockHeight + if blockHeight == 0 { + blockHeight = latestBlockHeight + } + + balances, err := h.usecase.GetBalancesByPkScript(ctx, pkScript, blockHeight) + if err != nil { + return nil, errors.Wrap(err, "error during GetBalancesByPkScript") + } + + runeId, ok := h.resolveRuneId(ctx, query.Id) + if ok { + // filter out balances that don't match the requested rune id + for key := range balances { + if key != runeId { + delete(balances, key) + } + } + } + + balanceRuneIds := lo.Keys(balances) + runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx, balanceRuneIds) + if err != nil { + return nil, errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch") + } + + balanceList := make([]balance, 0, len(balances)) + for id, b := range balances { + runeEntry := runeEntries[id] + balanceList = append(balanceList, balance{ + Amount: b.Amount, + Id: id, + Name: runeEntry.SpacedRune, + Symbol: string(runeEntry.Symbol), + Decimals: runeEntry.Divisibility, + }) + } + slices.SortFunc(balanceList, func(i, j balance) int { + return j.Amount.Cmp(i.Amount) + }) + + result := getBalancesByAddressResult{ + BlockHeight: blockHeight, + List: balanceList, + } + return &result, nil + } + + results := make([]*getBalancesByAddressResult, len(req.Queries)) + eg, ectx := errgroup.WithContext(ctx.UserContext()) + for i, query := range req.Queries { + i := i + query := query + eg.Go(func() error { + result, err := processQuery(ectx, query, i) + if err != nil { + return errors.Wrapf(err, "error during processQuery for query %d", i) + } + results[i] = result + return nil + }) + } + if err := eg.Wait(); err != nil { + return errors.WithStack(err) + } + + resp := getBalancesByAddressBatchResponse{ + Result: &getBalancesByAddressBatchResult{ + List: results, + }, + } + + return errors.WithStack(ctx.JSON(resp)) +} diff --git a/modules/runes/api/httphandler/get_current_block.go b/modules/runes/api/httphandler/get_current_block.go new file mode 100644 index 0000000..b11db8b --- /dev/null +++ b/modules/runes/api/httphandler/get_current_block.go @@ -0,0 +1,50 @@ +package httphandler + +import ( + "github.com/Cleverse/go-utilities/utils" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gofiber/fiber/v2" +) + +var startingBlockHeader = map[common.Network]types.BlockHeader{ + common.NetworkMainnet: { + Height: 839999, + Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000172014ba58d66455762add0512355ad651207918494ab")), + PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000000000001dcce6ce7c8a45872cafd1fb04732b447a14a91832591")), + }, + common.NetworkTestnet: { + Height: 2583200, + Hash: *utils.Must(chainhash.NewHashFromStr("000000000006c5f0dfcd9e0e81f27f97a87aef82087ffe69cd3c390325bb6541")), + PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000668f3bafac992f53424774515440cb47e1cb9e73af3f496139e28")), + }, +} + +type getCurrentBlockResult struct { + Hash string `json:"hash"` + Height int64 `json:"height"` +} + +type getCurrentBlockResponse = HttpResponse[getCurrentBlockResult] + +func (h *HttpHandler) GetCurrentBlock(ctx *fiber.Ctx) (err error) { + blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext()) + if err != nil { + if !errors.Is(err, errs.NotFound) { + return errors.Wrap(err, "error during GetLatestBlock") + } + blockHeader = startingBlockHeader[h.network] + } + + resp := getCurrentBlockResponse{ + Result: &getCurrentBlockResult{ + Hash: blockHeader.Hash.String(), + Height: blockHeader.Height, + }, + } + + return errors.WithStack(ctx.JSON(resp)) +} diff --git a/modules/runes/api/httphandler/get_holders.go b/modules/runes/api/httphandler/get_holders.go new file mode 100644 index 0000000..66b5457 --- /dev/null +++ b/modules/runes/api/httphandler/get_holders.go @@ -0,0 +1,114 @@ +package httphandler + +import ( + "encoding/hex" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/uint128" + "github.com/gofiber/fiber/v2" + "github.com/shopspring/decimal" +) + +type getHoldersRequest struct { + Id string `params:"id"` + BlockHeight uint64 `query:"blockHeight"` +} + +func (r getHoldersRequest) Validate() error { + var errList []error + if !isRuneIdOrRuneName(r.Id) { + errList = append(errList, errors.New("'id' is not valid rune id or rune name")) + } + return errs.WithPublicMessage(errors.Join(errList...), "validation error") +} + +type holdingBalance struct { + Address string `json:"address"` + PkScript string `json:"pkScript"` + Amount uint128.Uint128 `json:"amount"` + Percent float64 `json:"percent"` +} + +type getHoldersResult struct { + BlockHeight uint64 `json:"blockHeight"` + TotalSupply uint128.Uint128 `json:"totalSupply"` + MintedAmount uint128.Uint128 `json:"mintedAmount"` + List []holdingBalance `json:"list"` +} + +type getHoldersResponse = HttpResponse[getHoldersResult] + +func (h *HttpHandler) GetHolders(ctx *fiber.Ctx) (err error) { + var req getHoldersRequest + if err := ctx.ParamsParser(&req); err != nil { + return errors.WithStack(err) + } + if err := ctx.QueryParser(&req); err != nil { + return errors.WithStack(err) + } + if err := req.Validate(); err != nil { + return errors.WithStack(err) + } + + blockHeight := req.BlockHeight + if blockHeight == 0 { + blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext()) + if err != nil { + return errors.Wrap(err, "error during GetLatestBlock") + } + blockHeight = uint64(blockHeader.Height) + } + + var runeId runes.RuneId + if req.Id != "" { + var ok bool + runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id) + if !ok { + return errs.NewPublicError("unable to resolve rune id from \"id\"") + } + } + + runeEntry, err := h.usecase.GetRuneEntryByRuneIdAndHeight(ctx.UserContext(), runeId, blockHeight) + if err != nil { + return errors.Wrap(err, "error during GetHoldersByHeight") + } + holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight) + if err != nil { + return errors.Wrap(err, "error during GetBalancesByRuneId") + } + + totalSupply, err := runeEntry.Supply() + if err != nil { + return errors.Wrap(err, "cannot get total supply of rune") + } + mintedAmount, err := runeEntry.MintedAmount() + if err != nil { + return errors.Wrap(err, "cannot get minted amount of rune") + } + + list := make([]holdingBalance, 0, len(holdingBalances)) + for _, balance := range holdingBalances { + address := addressFromPkScript(balance.PkScript, h.network) + amount := decimal.NewFromBigInt(balance.Amount.Big(), 0) + percent := amount.Div(decimal.NewFromBigInt(totalSupply.Big(), 0)) + list = append(list, holdingBalance{ + Address: address, + PkScript: hex.EncodeToString(balance.PkScript), + Amount: balance.Amount, + Percent: percent.InexactFloat64(), + }) + } + + resp := getHoldersResponse{ + Result: &getHoldersResult{ + BlockHeight: blockHeight, + TotalSupply: totalSupply, + MintedAmount: mintedAmount, + List: list, + }, + } + + return errors.WithStack(ctx.JSON(resp)) +} diff --git a/modules/runes/api/httphandler/get_token_info.go b/modules/runes/api/httphandler/get_token_info.go new file mode 100644 index 0000000..d5b762e --- /dev/null +++ b/modules/runes/api/httphandler/get_token_info.go @@ -0,0 +1,165 @@ +package httphandler + +import ( + "slices" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/modules/runes/internal/entity" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/uint128" + "github.com/gofiber/fiber/v2" + "github.com/samber/lo" +) + +type getTokenInfoRequest struct { + Id string `params:"id"` + BlockHeight uint64 `query:"blockHeight"` +} + +func (r getTokenInfoRequest) Validate() error { + var errList []error + if !isRuneIdOrRuneName(r.Id) { + errList = append(errList, errors.New("'id' is not valid rune id or rune name")) + } + return errs.WithPublicMessage(errors.Join(errList...), "validation error") +} + +type entryTerms struct { + Amount uint128.Uint128 `json:"amount"` + Cap uint128.Uint128 `json:"cap"` + HeightStart *uint64 `json:"heightStart"` + HeightEnd *uint64 `json:"heightEnd"` + OffsetStart *uint64 `json:"offsetStart"` + OffsetEnd *uint64 `json:"offsetEnd"` +} + +type entry struct { + Divisibility uint8 `json:"divisibility"` + Premine uint128.Uint128 `json:"premine"` + Rune runes.Rune `json:"rune"` + Spacers uint32 `json:"spacers"` + Symbol string `json:"symbol"` + Terms entryTerms `json:"terms"` + Turbo bool `json:"turbo"` +} + +type tokenInfoExtend struct { + Entry entry `json:"entry"` +} + +type getTokenInfoResult struct { + Id runes.RuneId `json:"id"` + Name runes.SpacedRune `json:"name"` // rune name + Symbol string `json:"symbol"` + TotalSupply uint128.Uint128 `json:"totalSupply"` + CirculatingSupply uint128.Uint128 `json:"circulatingSupply"` + MintedAmount uint128.Uint128 `json:"mintedAmount"` + BurnedAmount uint128.Uint128 `json:"burnedAmount"` + Decimals uint8 `json:"decimals"` + DeployedAt uint64 `json:"deployedAt"` // unix timestamp + DeployedAtHeight uint64 `json:"deployedAtHeight"` + CompletedAt *uint64 `json:"completedAt"` // unix timestamp + CompletedAtHeight *uint64 `json:"completedAtHeight"` + HoldersCount int `json:"holdersCount"` + Extend tokenInfoExtend `json:"extend"` +} + +type getTokenInfoResponse = HttpResponse[getTokenInfoResult] + +func (h *HttpHandler) GetTokenInfo(ctx *fiber.Ctx) (err error) { + var req getTokenInfoRequest + if err := ctx.ParamsParser(&req); err != nil { + return errors.WithStack(err) + } + if err := ctx.QueryParser(&req); err != nil { + return errors.WithStack(err) + } + if err := req.Validate(); err != nil { + return errors.WithStack(err) + } + + blockHeight := req.BlockHeight + if blockHeight == 0 { + blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext()) + if err != nil { + return errors.Wrap(err, "error during GetLatestBlock") + } + blockHeight = uint64(blockHeader.Height) + } + + var runeId runes.RuneId + if req.Id != "" { + var ok bool + runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id) + if !ok { + return errs.NewPublicError("unable to resolve rune id from \"id\"") + } + } + + runeEntry, err := h.usecase.GetRuneEntryByRuneIdAndHeight(ctx.UserContext(), runeId, blockHeight) + if err != nil { + return errors.Wrap(err, "error during GetTokenInfoByHeight") + } + holdingBalances, err := h.usecase.GetBalancesByRuneId(ctx.UserContext(), runeId, blockHeight) + if err != nil { + return errors.Wrap(err, "error during GetBalancesByRuneId") + } + + holdingBalances = lo.Filter(holdingBalances, func(b *entity.Balance, _ int) bool { + return !b.Amount.IsZero() + }) + // sort by amount descending + slices.SortFunc(holdingBalances, func(i, j *entity.Balance) int { + return j.Amount.Cmp(i.Amount) + }) + + totalSupply, err := runeEntry.Supply() + if err != nil { + return errors.Wrap(err, "cannot get total supply of rune") + } + mintedAmount, err := runeEntry.MintedAmount() + if err != nil { + return errors.Wrap(err, "cannot get minted amount of rune") + } + circulatingSupply := mintedAmount.Sub(runeEntry.BurnedAmount) + + terms := lo.FromPtr(runeEntry.Terms) + resp := getTokenInfoResponse{ + Result: &getTokenInfoResult{ + Id: runeId, + Name: runeEntry.SpacedRune, + Symbol: string(runeEntry.Symbol), + TotalSupply: totalSupply, + CirculatingSupply: circulatingSupply, + MintedAmount: mintedAmount, + BurnedAmount: runeEntry.BurnedAmount, + Decimals: runeEntry.Divisibility, + DeployedAt: uint64(runeEntry.EtchedAt.Unix()), + DeployedAtHeight: runeEntry.EtchingBlock, + CompletedAt: lo.Ternary(runeEntry.CompletedAt.IsZero(), nil, lo.ToPtr(uint64(runeEntry.CompletedAt.Unix()))), + CompletedAtHeight: runeEntry.CompletedAtHeight, + HoldersCount: len(holdingBalances), + Extend: tokenInfoExtend{ + Entry: entry{ + Divisibility: runeEntry.Divisibility, + Premine: runeEntry.Premine, + Rune: runeEntry.SpacedRune.Rune, + Spacers: runeEntry.SpacedRune.Spacers, + Symbol: string(runeEntry.Symbol), + Terms: entryTerms{ + Amount: lo.FromPtr(terms.Amount), + Cap: lo.FromPtr(terms.Cap), + HeightStart: terms.HeightStart, + HeightEnd: terms.HeightEnd, + OffsetStart: terms.OffsetStart, + OffsetEnd: terms.OffsetEnd, + }, + Turbo: runeEntry.Turbo, + }, + }, + }, + } + + return errors.WithStack(ctx.JSON(resp)) +} diff --git a/modules/runes/api/httphandler/get_transactions.go b/modules/runes/api/httphandler/get_transactions.go new file mode 100644 index 0000000..394cb2d --- /dev/null +++ b/modules/runes/api/httphandler/get_transactions.go @@ -0,0 +1,274 @@ +package httphandler + +import ( + "encoding/hex" + "slices" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/uint128" + "github.com/gofiber/fiber/v2" + "github.com/samber/lo" +) + +type getTransactionsRequest struct { + Wallet string `query:"wallet"` + Id string `query:"id"` + BlockHeight uint64 `query:"blockHeight"` +} + +func (r getTransactionsRequest) Validate() error { + var errList []error + if r.Id != "" && !isRuneIdOrRuneName(r.Id) { + errList = append(errList, errors.New("'id' is not valid rune id or rune name")) + } + return errs.WithPublicMessage(errors.Join(errList...), "validation error") +} + +type txInputOutput struct { + PkScript string `json:"pkScript"` + Address string `json:"address"` + Id runes.RuneId `json:"id"` + Amount uint128.Uint128 `json:"amount"` + Decimals uint8 `json:"decimals"` + Index uint32 `json:"index"` +} + +type terms struct { + Amount *uint128.Uint128 `json:"amount"` + Cap *uint128.Uint128 `json:"cap"` + HeightStart *uint64 `json:"heightStart"` + HeightEnd *uint64 `json:"heightEnd"` + OffsetStart *uint64 `json:"offsetStart"` + OffsetEnd *uint64 `json:"offsetEnd"` +} + +type etching struct { + Divisibility *uint8 `json:"divisibility"` + Premine *uint128.Uint128 `json:"premine"` + Rune *runes.Rune `json:"rune"` + Spacers *uint32 `json:"spacers"` + Symbol *string `json:"symbol"` + Terms *terms `json:"terms"` + Turbo bool `json:"turbo"` +} + +type edict struct { + Id runes.RuneId `json:"id"` + Amount uint128.Uint128 `json:"amount"` + Output int `json:"output"` +} + +type runestone struct { + Cenotaph bool `json:"cenotaph"` + Flaws []string `json:"flaws"` + Etching *etching `json:"etching"` + Edicts []edict `json:"edicts"` + Mint *runes.RuneId `json:"mint"` + Pointer *uint64 `json:"pointer"` +} + +type runeTransactionExtend struct { + RuneEtched bool `json:"runeEtched"` + Runestone *runestone `json:"runestone"` +} + +type amountWithDecimal struct { + Amount uint128.Uint128 `json:"amount"` + Decimals uint8 `json:"decimals"` +} + +type transaction struct { + TxHash chainhash.Hash `json:"txHash"` + BlockHeight uint64 `json:"blockHeight"` + Index uint32 `json:"index"` + Timestamp int64 `json:"timestamp"` + Inputs []txInputOutput `json:"inputs"` + Outputs []txInputOutput `json:"outputs"` + Mints map[string]amountWithDecimal `json:"mints"` + Burns map[string]amountWithDecimal `json:"burns"` + Extend runeTransactionExtend `json:"extend"` +} + +type getTransactionsResult struct { + List []transaction `json:"list"` +} + +type getTransactionsResponse = HttpResponse[getTransactionsResult] + +func (h *HttpHandler) GetTransactions(ctx *fiber.Ctx) (err error) { + var req getTransactionsRequest + if err := ctx.QueryParser(&req); err != nil { + return errors.WithStack(err) + } + if err := req.Validate(); err != nil { + return errors.WithStack(err) + } + + var pkScript []byte + if req.Wallet != "" { + var ok bool + pkScript, ok = resolvePkScript(h.network, req.Wallet) + if !ok { + return errs.NewPublicError("unable to resolve pkscript from \"wallet\"") + } + } + + var runeId runes.RuneId + if req.Id != "" { + var ok bool + runeId, ok = h.resolveRuneId(ctx.UserContext(), req.Id) + if !ok { + return errs.NewPublicError("unable to resolve rune id from \"id\"") + } + } + + blockHeight := req.BlockHeight + // set blockHeight to the latest block height blockHeight, pkScript, and runeId are not provided + if blockHeight == 0 && pkScript == nil && runeId == (runes.RuneId{}) { + blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext()) + if err != nil { + return errors.Wrap(err, "error during GetLatestBlock") + } + blockHeight = uint64(blockHeader.Height) + } + + txs, err := h.usecase.GetRuneTransactions(ctx.UserContext(), pkScript, runeId, blockHeight) + if err != nil { + return errors.Wrap(err, "error during GetRuneTransactions") + } + + var allRuneIds []runes.RuneId + for _, tx := range txs { + for id := range tx.Mints { + allRuneIds = append(allRuneIds, id) + } + for id := range tx.Burns { + allRuneIds = append(allRuneIds, id) + } + for _, input := range tx.Inputs { + allRuneIds = append(allRuneIds, input.RuneId) + } + for _, output := range tx.Outputs { + allRuneIds = append(allRuneIds, output.RuneId) + } + } + allRuneIds = lo.Uniq(allRuneIds) + runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), allRuneIds) + if err != nil { + return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch") + } + + txList := make([]transaction, 0, len(txs)) + for _, tx := range txs { + respTx := transaction{ + TxHash: tx.Hash, + BlockHeight: tx.BlockHeight, + Index: tx.Index, + Timestamp: tx.Timestamp.Unix(), + Inputs: make([]txInputOutput, 0, len(tx.Inputs)), + Outputs: make([]txInputOutput, 0, len(tx.Outputs)), + Mints: make(map[string]amountWithDecimal, len(tx.Mints)), + Burns: make(map[string]amountWithDecimal, len(tx.Burns)), + Extend: runeTransactionExtend{ + RuneEtched: tx.RuneEtched, + Runestone: nil, + }, + } + for _, input := range tx.Inputs { + address := addressFromPkScript(input.PkScript, h.network) + respTx.Inputs = append(respTx.Inputs, txInputOutput{ + PkScript: hex.EncodeToString(input.PkScript), + Address: address, + Id: input.RuneId, + Amount: input.Amount, + Decimals: runeEntries[input.RuneId].Divisibility, + Index: input.Index, + }) + } + for _, output := range tx.Outputs { + address := addressFromPkScript(output.PkScript, h.network) + respTx.Outputs = append(respTx.Outputs, txInputOutput{ + PkScript: hex.EncodeToString(output.PkScript), + Address: address, + Id: output.RuneId, + Amount: output.Amount, + Decimals: runeEntries[output.RuneId].Divisibility, + Index: output.Index, + }) + } + for id, amount := range tx.Mints { + respTx.Mints[id.String()] = amountWithDecimal{ + Amount: amount, + Decimals: runeEntries[id].Divisibility, + } + } + for id, amount := range tx.Burns { + respTx.Burns[id.String()] = amountWithDecimal{ + Amount: amount, + Decimals: runeEntries[id].Divisibility, + } + } + if tx.Runestone != nil { + var e *etching + if tx.Runestone.Etching != nil { + var symbol *string + if tx.Runestone.Etching.Symbol != nil { + symbol = lo.ToPtr(string(*tx.Runestone.Etching.Symbol)) + } + var t *terms + if tx.Runestone.Etching.Terms != nil { + t = &terms{ + Amount: tx.Runestone.Etching.Terms.Amount, + Cap: tx.Runestone.Etching.Terms.Cap, + HeightStart: tx.Runestone.Etching.Terms.HeightStart, + HeightEnd: tx.Runestone.Etching.Terms.HeightEnd, + OffsetStart: tx.Runestone.Etching.Terms.OffsetStart, + OffsetEnd: tx.Runestone.Etching.Terms.OffsetEnd, + } + } + e = &etching{ + Divisibility: tx.Runestone.Etching.Divisibility, + Premine: tx.Runestone.Etching.Premine, + Rune: tx.Runestone.Etching.Rune, + Spacers: tx.Runestone.Etching.Spacers, + Symbol: symbol, + Terms: t, + Turbo: tx.Runestone.Etching.Turbo, + } + } + respTx.Extend.Runestone = &runestone{ + Cenotaph: tx.Runestone.Cenotaph, + Flaws: lo.Ternary(tx.Runestone.Cenotaph, tx.Runestone.Flaws.CollectAsString(), nil), + Etching: e, + Edicts: lo.Map(tx.Runestone.Edicts, func(ed runes.Edict, _ int) edict { + return edict{ + Id: ed.Id, + Amount: ed.Amount, + Output: ed.Output, + } + }), + Mint: tx.Runestone.Mint, + Pointer: tx.Runestone.Pointer, + } + } + txList = append(txList, respTx) + } + // sort by block height ASC, then index ASC + slices.SortFunc(txList, func(t1, t2 transaction) int { + if t1.BlockHeight != t2.BlockHeight { + return int(t1.BlockHeight - t2.BlockHeight) + } + return int(t1.Index - t2.Index) + }) + + resp := getTransactionsResponse{ + Result: &getTransactionsResult{ + List: txList, + }, + } + + return errors.WithStack(ctx.JSON(resp)) +} diff --git a/modules/runes/api/httphandler/get_utxos_by_address.go b/modules/runes/api/httphandler/get_utxos_by_address.go new file mode 100644 index 0000000..e2acf71 --- /dev/null +++ b/modules/runes/api/httphandler/get_utxos_by_address.go @@ -0,0 +1,146 @@ +package httphandler + +import ( + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/modules/runes/internal/entity" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/uint128" + "github.com/gofiber/fiber/v2" + "github.com/samber/lo" +) + +type getUTXOsByAddressRequest struct { + Wallet string `params:"wallet"` + Id string `query:"id"` + BlockHeight uint64 `query:"blockHeight"` +} + +func (r getUTXOsByAddressRequest) Validate() error { + var errList []error + if r.Wallet == "" { + errList = append(errList, errors.New("'wallet' is required")) + } + if r.Id != "" && !isRuneIdOrRuneName(r.Id) { + errList = append(errList, errors.New("'id' is not valid rune id or rune name")) + } + return errs.WithPublicMessage(errors.Join(errList...), "validation error") +} + +type runeBalance struct { + RuneId runes.RuneId `json:"runeId"` + Rune runes.SpacedRune `json:"rune"` + Symbol string `json:"symbol"` + Amount uint128.Uint128 `json:"amount"` + Divisibility uint8 `json:"divisibility"` +} + +type utxoExtend struct { + Runes []runeBalance `json:"runes"` +} + +type utxo struct { + TxHash chainhash.Hash `json:"txHash"` + OutputIndex uint32 `json:"outputIndex"` + Extend utxoExtend `json:"extend"` +} + +type getUTXOsByAddressResult struct { + List []utxo `json:"list"` + BlockHeight uint64 `json:"blockHeight"` +} + +type getUTXOsByAddressResponse = HttpResponse[getUTXOsByAddressResult] + +func (h *HttpHandler) GetUTXOsByAddress(ctx *fiber.Ctx) (err error) { + var req getUTXOsByAddressRequest + if err := ctx.ParamsParser(&req); err != nil { + return errors.WithStack(err) + } + if err := ctx.QueryParser(&req); err != nil { + return errors.WithStack(err) + } + if err := req.Validate(); err != nil { + return errors.WithStack(err) + } + + pkScript, ok := resolvePkScript(h.network, req.Wallet) + if !ok { + return errs.NewPublicError("unable to resolve pkscript from \"wallet\"") + } + + blockHeight := req.BlockHeight + if blockHeight == 0 { + blockHeader, err := h.usecase.GetLatestBlock(ctx.UserContext()) + if err != nil { + return errors.Wrap(err, "error during GetLatestBlock") + } + blockHeight = uint64(blockHeader.Height) + } + + outPointBalances, err := h.usecase.GetUnspentOutPointBalancesByPkScript(ctx.UserContext(), pkScript, blockHeight) + if err != nil { + return errors.Wrap(err, "error during GetBalancesByPkScript") + } + + outPointBalanceRuneIds := lo.Map(outPointBalances, func(outPointBalance *entity.OutPointBalance, _ int) runes.RuneId { + return outPointBalance.RuneId + }) + runeEntries, err := h.usecase.GetRuneEntryByRuneIdBatch(ctx.UserContext(), outPointBalanceRuneIds) + if err != nil { + return errors.Wrap(err, "error during GetRuneEntryByRuneIdBatch") + } + + groupedBalances := lo.GroupBy(outPointBalances, func(outPointBalance *entity.OutPointBalance) wire.OutPoint { + return outPointBalance.OutPoint + }) + + utxoList := make([]utxo, 0, len(groupedBalances)) + for outPoint, balances := range groupedBalances { + runeBalances := make([]runeBalance, 0, len(balances)) + for _, balance := range balances { + runeEntry := runeEntries[balance.RuneId] + runeBalances = append(runeBalances, runeBalance{ + RuneId: balance.RuneId, + Rune: runeEntry.SpacedRune, + Symbol: string(runeEntry.Symbol), + Amount: balance.Amount, + Divisibility: runeEntry.Divisibility, + }) + } + + utxoList = append(utxoList, utxo{ + TxHash: outPoint.Hash, + OutputIndex: outPoint.Index, + Extend: utxoExtend{ + Runes: runeBalances, + }, + }) + } + + // filter by req.Id if exists + { + runeId, ok := h.resolveRuneId(ctx.UserContext(), req.Id) + if ok { + utxoList = lo.Filter(utxoList, func(u utxo, _ int) bool { + for _, runeBalance := range u.Extend.Runes { + if runeBalance.RuneId == runeId { + return true + } + } + return false + }) + } + } + + resp := getUTXOsByAddressResponse{ + Result: &getUTXOsByAddressResult{ + BlockHeight: blockHeight, + List: utxoList, + }, + } + + return errors.WithStack(ctx.JSON(resp)) +} diff --git a/modules/runes/api/httphandler/httphandler.go b/modules/runes/api/httphandler/httphandler.go new file mode 100644 index 0000000..246a0f9 --- /dev/null +++ b/modules/runes/api/httphandler/httphandler.go @@ -0,0 +1,114 @@ +package httphandler + +import ( + "context" + "encoding/hex" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/txscript" + "github.com/gaze-network/indexer-network/common" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/indexer-network/modules/runes/usecase" + "github.com/gaze-network/indexer-network/pkg/logger" + "github.com/gaze-network/indexer-network/pkg/logger/slogx" +) + +type HttpHandler struct { + usecase *usecase.Usecase + network common.Network +} + +func New(network common.Network, usecase *usecase.Usecase) *HttpHandler { + return &HttpHandler{ + usecase: usecase, + network: network, + } +} + +type HttpResponse[T any] struct { + Error *string `json:"error"` + Result *T `json:"result,omitempty"` +} + +func resolvePkScript(network common.Network, wallet string) ([]byte, bool) { + if wallet == "" { + return nil, false + } + defaultNet := func() *chaincfg.Params { + switch network { + case common.NetworkMainnet: + return &chaincfg.MainNetParams + case common.NetworkTestnet: + return &chaincfg.TestNet3Params + } + panic("invalid network") + }() + + // attempt to parse as address + address, err := btcutil.DecodeAddress(wallet, defaultNet) + if err == nil { + pkScript, err := txscript.PayToAddrScript(address) + if err != nil { + return nil, false + } + return pkScript, true + } + + // attempt to parse as pkscript + pkScript, err := hex.DecodeString(wallet) + if err != nil { + return nil, false + } + + return pkScript, true +} + +// TODO: extract this function somewhere else +// addressFromPkScript returns the address from the given pkScript. If the pkScript is invalid or not standard, it returns empty string. +func addressFromPkScript(pkScript []byte, network common.Network) string { + _, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, network.ChainParams()) + if err != nil { + logger.Debug("unable to extract address from pkscript", slogx.Error(err)) + return "" + } + if len(addrs) != 1 { + logger.Debug("invalid number of addresses extracted from pkscript. Expected only 1.", slogx.Int("numAddresses", len(addrs))) + return "" + } + return addrs[0].EncodeAddress() +} + +func (h *HttpHandler) resolveRuneId(ctx context.Context, id string) (runes.RuneId, bool) { + if id == "" { + return runes.RuneId{}, false + } + + // attempt to parse as rune id + runeId, err := runes.NewRuneIdFromString(id) + if err == nil { + return runeId, true + } + + // attempt to parse as rune + rune, err := runes.NewRuneFromString(id) + if err == nil { + runeId, err := h.usecase.GetRuneIdFromRune(ctx, rune) + if err != nil { + return runes.RuneId{}, false + } + return runeId, true + } + + return runes.RuneId{}, false +} + +func isRuneIdOrRuneName(id string) bool { + if _, err := runes.NewRuneIdFromString(id); err == nil { + return true + } + if _, err := runes.NewRuneFromString(id); err == nil { + return true + } + return false +} diff --git a/modules/runes/api/httphandler/routes.go b/modules/runes/api/httphandler/routes.go new file mode 100644 index 0000000..da24f35 --- /dev/null +++ b/modules/runes/api/httphandler/routes.go @@ -0,0 +1,18 @@ +package httphandler + +import ( + "github.com/gofiber/fiber/v2" +) + +func (h *HttpHandler) Mount(router fiber.Router) error { + r := router.Group("/v2/runes") + + r.Post("/balances/wallet/batch", h.GetBalancesByAddressBatch) + r.Get("/balances/wallet/:wallet", h.GetBalancesByAddress) + r.Get("/transactions", h.GetTransactions) + r.Get("/holders/:id", h.GetHolders) + r.Get("/info/:id", h.GetTokenInfo) + r.Get("/utxos/wallet/:wallet", h.GetUTXOsByAddress) + r.Get("/block", h.GetCurrentBlock) + return nil +} diff --git a/modules/runes/config/config.go b/modules/runes/config/config.go new file mode 100644 index 0000000..e2c503e --- /dev/null +++ b/modules/runes/config/config.go @@ -0,0 +1,10 @@ +package config + +import "github.com/gaze-network/indexer-network/internal/postgres" + +type Config struct { + Datasource string `mapstructure:"datasource"` // Datasource to fetch bitcoin data for Meta-Protocol e.g. `bitcoin-node` | `database` + Database string `mapstructure:"database"` // Database to store runes data. + APIHandlers []string `mapstructure:"api_handlers"` // List of API handlers to enable. (e.g. `http`) + Postgres postgres.Config `mapstructure:"postgres"` +} diff --git a/modules/runes/constants.go b/modules/runes/constants.go new file mode 100644 index 0000000..0a093ab --- /dev/null +++ b/modules/runes/constants.go @@ -0,0 +1,27 @@ +package runes + +import ( + "github.com/Cleverse/go-utilities/utils" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/gaze-network/indexer-network/common" + "github.com/gaze-network/indexer-network/core/types" +) + +const ( + Version = "v0.0.1" + DBVersion = 1 + EventHashVersion = 1 +) + +var startingBlockHeader = map[common.Network]types.BlockHeader{ + common.NetworkMainnet: { + Height: 839999, + Hash: *utils.Must(chainhash.NewHashFromStr("0000000000000000000172014ba58d66455762add0512355ad651207918494ab")), + PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000000000001dcce6ce7c8a45872cafd1fb04732b447a14a91832591")), + }, + common.NetworkTestnet: { + Height: 2583200, + Hash: *utils.Must(chainhash.NewHashFromStr("000000000006c5f0dfcd9e0e81f27f97a87aef82087ffe69cd3c390325bb6541")), + PrevBlock: *utils.Must(chainhash.NewHashFromStr("00000000000668f3bafac992f53424774515440cb47e1cb9e73af3f496139e28")), + }, +} diff --git a/modules/runes/database/postgresql/migrations/000001_initialize_tables.down.sql b/modules/runes/database/postgresql/migrations/000001_initialize_tables.down.sql new file mode 100644 index 0000000..a247237 --- /dev/null +++ b/modules/runes/database/postgresql/migrations/000001_initialize_tables.down.sql @@ -0,0 +1,14 @@ +BEGIN; + +DROP TABLE IF EXISTS "runes_indexer_stats"; +DROP TABLE IF EXISTS "runes_indexer_db_version"; +DROP TABLE IF EXISTS "runes_processor_state"; +DROP TABLE IF EXISTS "runes_indexed_blocks"; +DROP TABLE IF EXISTS "runes_entries"; +DROP TABLE IF EXISTS "runes_entry_states"; +DROP TABLE IF EXISTS "runes_transactions"; +DROP TABLE IF EXISTS "runes_runestones"; +DROP TABLE IF EXISTS "runes_outpoint_balances"; +DROP TABLE IF EXISTS "runes_balances"; + +COMMIT; diff --git a/modules/runes/database/postgresql/migrations/000001_initialize_tables.up.sql b/modules/runes/database/postgresql/migrations/000001_initialize_tables.up.sql new file mode 100644 index 0000000..a8b1469 --- /dev/null +++ b/modules/runes/database/postgresql/migrations/000001_initialize_tables.up.sql @@ -0,0 +1,122 @@ +BEGIN; + +-- Indexer Client Information + +CREATE TABLE IF NOT EXISTS "runes_indexer_stats" ( + "id" BIGSERIAL PRIMARY KEY, + "client_version" TEXT NOT NULL, + "network" TEXT NOT NULL, + "created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE IF NOT EXISTS "runes_indexer_state" ( + "id" BIGSERIAL PRIMARY KEY, + "db_version" INT NOT NULL, + "event_hash_version" INT NOT NULL, + "created_at" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP +); +CREATE INDEX IF NOT EXISTS runes_indexer_state_created_at_idx ON "runes_indexer_state" USING BTREE ("created_at" DESC); + +-- Runes data + +CREATE TABLE IF NOT EXISTS "runes_indexed_blocks" ( + "height" INT NOT NULL PRIMARY KEY, + "hash" TEXT NOT NULL, + "prev_hash" TEXT NOT NULL, + "event_hash" TEXT NOT NULL, + "cumulative_event_hash" TEXT NOT NULL +); + +CREATE TABLE IF NOT EXISTS "runes_entries" ( + "rune_id" TEXT NOT NULL PRIMARY KEY, + "number" BIGINT NOT NULL, -- sequential number of the rune starting from 0 + "rune" TEXT NOT NULL, + "spacers" INT NOT NULL, + "premine" DECIMAL NOT NULL, + "symbol" INT NOT NULL, + "divisibility" SMALLINT NOT NULL, + "terms" BOOLEAN NOT NULL, -- if true, then minting term exists for this entry + "terms_amount" DECIMAL, + "terms_cap" DECIMAL, + "terms_height_start" INT, + "terms_height_end" INT, + "terms_offset_start" INT, + "terms_offset_end" INT, + "turbo" BOOLEAN NOT NULL, + "etching_block" INT NOT NULL, + "etching_tx_hash" TEXT NOT NULL, + "etched_at" TIMESTAMP NOT NULL +); +CREATE UNIQUE INDEX IF NOT EXISTS runes_entries_rune_idx ON "runes_entries" USING BTREE ("rune"); +CREATE UNIQUE INDEX IF NOT EXISTS runes_entries_number_idx ON "runes_entries" USING BTREE ("number"); + +CREATE TABLE IF NOT EXISTS "runes_entry_states" ( + "rune_id" TEXT NOT NULL, + "block_height" INT NOT NULL, + "mints" DECIMAL NOT NULL, + "burned_amount" DECIMAL NOT NULL, + "completed_at" TIMESTAMP, + "completed_at_height" INT, + PRIMARY KEY ("rune_id", "block_height") +); + +CREATE TABLE IF NOT EXISTS "runes_transactions" ( + "hash" TEXT NOT NULL PRIMARY KEY, + "block_height" INT NOT NULL, + "index" INT NOT NULL, + "timestamp" TIMESTAMP NOT NULL, + "inputs" JSONB NOT NULL, + "outputs" JSONB NOT NULL, + "mints" JSONB NOT NULL, + "burns" JSONB NOT NULL, + "rune_etched" BOOLEAN NOT NULL +); +CREATE INDEX IF NOT EXISTS runes_transactions_block_height_idx ON "runes_transactions" USING BTREE ("block_height"); +CREATE INDEX IF NOT EXISTS runes_transactions_jsonb_idx ON "runes_transactions" USING GIN ("inputs", "outputs", "mints", "burns"); + +CREATE TABLE IF NOT EXISTS "runes_runestones" ( + "tx_hash" TEXT NOT NULL PRIMARY KEY, + "block_height" INT NOT NULL, + "etching" BOOLEAN NOT NULL, + "etching_divisibility" SMALLINT, + "etching_premine" DECIMAL, + "etching_rune" TEXT, + "etching_spacers" INT, + "etching_symbol" INT, + "etching_terms" BOOLEAN, + "etching_terms_amount" DECIMAL, + "etching_terms_cap" DECIMAL, + "etching_terms_height_start" INT, + "etching_terms_height_end" INT, + "etching_terms_offset_start" INT, + "etching_terms_offset_end" INT, + "etching_turbo" BOOLEAN, + "edicts" JSONB NOT NULL DEFAULT '[]', + "mint" TEXT, + "pointer" INT, + "cenotaph" BOOLEAN NOT NULL, + "flaws" INT NOT NULL +); + +CREATE TABLE IF NOT EXISTS "runes_outpoint_balances" ( + "rune_id" TEXT NOT NULL, + "pkscript" TEXT NOT NULL, + "tx_hash" TEXT NOT NULL, + "tx_idx" INT NOT NULL, -- output index + "amount" DECIMAL NOT NULL, + "block_height" INT NOT NULL, -- block height when this output was created + "spent_height" INT, -- block height when this output was spent + PRIMARY KEY ("rune_id", "tx_hash", "tx_idx") +); +CREATE INDEX IF NOT EXISTS runes_outpoint_balances_tx_hash_tx_idx_idx ON "runes_outpoint_balances" USING BTREE ("tx_hash", "tx_idx"); +CREATE INDEX IF NOT EXISTS runes_outpoint_balances_pkscript_block_height_spent_height_idx ON "runes_outpoint_balances" USING BTREE ("pkscript", "block_height", "spent_height"); + +CREATE TABLE IF NOT EXISTS "runes_balances" ( + "pkscript" TEXT NOT NULL, + "block_height" INT NOT NULL, + "rune_id" TEXT NOT NULL, + "amount" DECIMAL NOT NULL, + PRIMARY KEY ("pkscript", "rune_id", "block_height") +); + +COMMIT; diff --git a/modules/runes/database/postgresql/queries/data.sql b/modules/runes/database/postgresql/queries/data.sql new file mode 100644 index 0000000..873c7b0 --- /dev/null +++ b/modules/runes/database/postgresql/queries/data.sql @@ -0,0 +1,118 @@ +-- name: GetBalancesByPkScript :many +WITH balances AS ( + SELECT DISTINCT ON (rune_id) * FROM runes_balances WHERE pkscript = $1 AND block_height <= $2 ORDER BY rune_id, block_height DESC +) +SELECT * FROM balances WHERE amount > 0; + +-- name: GetBalancesByRuneId :many +WITH balances AS ( + SELECT DISTINCT ON (pkscript) * FROM runes_balances WHERE rune_id = $1 AND block_height <= $2 ORDER BY pkscript, block_height DESC +) +SELECT * FROM balances WHERE amount > 0; + +-- name: GetBalanceByPkScriptAndRuneId :one +SELECT * FROM runes_balances WHERE pkscript = $1 AND rune_id = $2 AND block_height <= $3 ORDER BY block_height DESC LIMIT 1; + +-- name: GetOutPointBalancesAtOutPoint :many +SELECT * FROM runes_outpoint_balances WHERE tx_hash = $1 AND tx_idx = $2; + +-- name: GetUnspentOutPointBalancesByPkScript :many +SELECT * FROM runes_outpoint_balances WHERE pkscript = @pkScript AND block_height <= @block_height AND (spent_height IS NULL OR spent_height > @block_height); + +-- name: GetRuneEntriesByRuneIds :many +WITH states AS ( + -- select latest state + SELECT DISTINCT ON (rune_id) * FROM runes_entry_states WHERE rune_id = ANY(@rune_ids::text[]) ORDER BY rune_id, block_height DESC +) +SELECT * FROM runes_entries + LEFT JOIN states ON runes_entries.rune_id = states.rune_id + WHERE runes_entries.rune_id = ANY(@rune_ids::text[]); + +-- name: GetRuneEntriesByRuneIdsAndHeight :many +WITH states AS ( + -- select latest state + SELECT DISTINCT ON (rune_id) * FROM runes_entry_states WHERE rune_id = ANY(@rune_ids::text[]) AND block_height <= @height ORDER BY rune_id, block_height DESC +) +SELECT * FROM runes_entries + LEFT JOIN states ON runes_entries.rune_id = states.rune_id + WHERE runes_entries.rune_id = ANY(@rune_ids::text[]) AND etching_block <= @height; + +-- name: GetRuneIdFromRune :one +SELECT rune_id FROM runes_entries WHERE rune = $1; + +-- name: GetRuneTransactions :many +SELECT * FROM runes_transactions + LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash + WHERE ( + @filter_pk_script::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter + OR runes_transactions.outputs @> @pk_script_param::JSONB + OR runes_transactions.inputs @> @pk_script_param::JSONB + ) AND ( + @filter_rune_id::BOOLEAN = FALSE -- if @filter_rune_id is TRUE, apply rune_id filter + OR runes_transactions.outputs @> @rune_id_param::JSONB + OR runes_transactions.inputs @> @rune_id_param::JSONB + OR runes_transactions.mints ? @rune_id + OR runes_transactions.burns ? @rune_id + OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = @rune_id_block_height AND runes_transactions.index = @rune_id_tx_index) + ) AND ( + @block_height::INT = 0 OR runes_transactions.block_height = @block_height::INT -- if @block_height > 0, apply block_height filter + ); + +-- name: CountRuneEntries :one +SELECT COUNT(*) FROM runes_entries; + +-- name: CreateRuneEntry :exec +INSERT INTO runes_entries (rune_id, rune, number, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18); + +-- name: CreateRuneEntryState :exec +INSERT INTO runes_entry_states (rune_id, block_height, mints, burned_amount, completed_at, completed_at_height) VALUES ($1, $2, $3, $4, $5, $6); + +-- name: CreateRuneTransaction :exec +INSERT INTO runes_transactions (hash, block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9); + +-- name: CreateRunestone :exec +INSERT INTO runes_runestones (tx_hash, block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21); + +-- name: CreateOutPointBalances :batchexec +INSERT INTO runes_outpoint_balances (rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height) VALUES ($1, $2, $3, $4, $5, $6, $7); + +-- name: SpendOutPointBalances :exec +UPDATE runes_outpoint_balances SET spent_height = $1 WHERE tx_hash = $2 AND tx_idx = $3; + +-- name: CreateRuneBalanceAtBlock :batchexec +INSERT INTO runes_balances (pkscript, block_height, rune_id, amount) VALUES ($1, $2, $3, $4); + +-- name: GetLatestIndexedBlock :one +SELECT * FROM runes_indexed_blocks ORDER BY height DESC LIMIT 1; + +-- name: GetIndexedBlockByHeight :one +SELECT * FROM runes_indexed_blocks WHERE height = $1; + +-- name: CreateIndexedBlock :exec +INSERT INTO runes_indexed_blocks (hash, height, prev_hash, event_hash, cumulative_event_hash) VALUES ($1, $2, $3, $4, $5); + +-- name: DeleteIndexedBlockSinceHeight :exec +DELETE FROM runes_indexed_blocks WHERE height >= $1; + +-- name: DeleteRuneEntriesSinceHeight :exec +DELETE FROM runes_entries WHERE etching_block >= $1; + +-- name: DeleteRuneEntryStatesSinceHeight :exec +DELETE FROM runes_entry_states WHERE block_height >= $1; + +-- name: DeleteRuneTransactionsSinceHeight :exec +DELETE FROM runes_transactions WHERE block_height >= $1; + +-- name: DeleteRunestonesSinceHeight :exec +DELETE FROM runes_runestones WHERE block_height >= $1; + +-- name: DeleteOutPointBalancesSinceHeight :exec +DELETE FROM runes_outpoint_balances WHERE block_height >= $1; + +-- name: UnspendOutPointBalancesSinceHeight :exec +UPDATE runes_outpoint_balances SET spent_height = NULL WHERE spent_height >= $1; + +-- name: DeleteRuneBalancesSinceHeight :exec +DELETE FROM runes_balances WHERE block_height >= $1; diff --git a/modules/runes/database/postgresql/queries/info.sql b/modules/runes/database/postgresql/queries/info.sql new file mode 100644 index 0000000..b9daeca --- /dev/null +++ b/modules/runes/database/postgresql/queries/info.sql @@ -0,0 +1,11 @@ +-- name: GetLatestIndexerState :one +SELECT * FROM runes_indexer_state ORDER BY created_at DESC LIMIT 1; + +-- name: SetIndexerState :exec +INSERT INTO runes_indexer_state (db_version, event_hash_version) VALUES ($1, $2); + +-- name: GetLatestIndexerStats :one +SELECT "client_version", "network" FROM runes_indexer_stats ORDER BY id DESC LIMIT 1; + +-- name: UpdateIndexerStats :exec +INSERT INTO runes_indexer_stats (client_version, network) VALUES ($1, $2); diff --git a/modules/runes/datagateway/indexer_info.go b/modules/runes/datagateway/indexer_info.go new file mode 100644 index 0000000..ac51910 --- /dev/null +++ b/modules/runes/datagateway/indexer_info.go @@ -0,0 +1,15 @@ +package datagateway + +import ( + "context" + + "github.com/gaze-network/indexer-network/common" + "github.com/gaze-network/indexer-network/modules/runes/internal/entity" +) + +type IndexerInfoDataGateway interface { + GetLatestIndexerState(ctx context.Context) (entity.IndexerState, error) + GetLatestIndexerStats(ctx context.Context) (version string, network common.Network, err error) + SetIndexerState(ctx context.Context, state entity.IndexerState) error + UpdateIndexerStats(ctx context.Context, clientVersion string, network common.Network) error +} diff --git a/modules/runes/datagateway/runes.go b/modules/runes/datagateway/runes.go new file mode 100644 index 0000000..e4457e0 --- /dev/null +++ b/modules/runes/datagateway/runes.go @@ -0,0 +1,81 @@ +package datagateway + +import ( + "context" + + "github.com/btcsuite/btcd/wire" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/modules/runes/internal/entity" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/uint128" +) + +type RunesDataGateway interface { + RunesReaderDataGateway + RunesWriterDataGateway + + // BeginRunesTx returns a new RunesDataGateway with transaction enabled. All write operations performed in this datagateway must be committed to persist changes. + BeginRunesTx(ctx context.Context) (RunesDataGatewayWithTx, error) +} + +type RunesDataGatewayWithTx interface { + RunesDataGateway + Tx +} + +type RunesReaderDataGateway interface { + GetLatestBlock(ctx context.Context) (types.BlockHeader, error) + GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error) + // GetRuneTransactions returns the runes transactions, filterable by pkScript, runeId and height. If pkScript, runeId or height is zero value, that filter is ignored. + GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, height uint64) ([]*entity.RuneTransaction, error) + + GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error) + GetUnspentOutPointBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.OutPointBalance, error) + // GetRuneIdFromRune returns the RuneId for the given rune. Returns errs.NotFound if the rune entry is not found. + GetRuneIdFromRune(ctx context.Context, rune runes.Rune) (runes.RuneId, error) + // GetRuneEntryByRuneId returns the RuneEntry for the given runeId. Returns errs.NotFound if the rune entry is not found. + GetRuneEntryByRuneId(ctx context.Context, runeId runes.RuneId) (*runes.RuneEntry, error) + // GetRuneEntryByRuneIdBatch returns the RuneEntries for the given runeIds. + GetRuneEntryByRuneIdBatch(ctx context.Context, runeIds []runes.RuneId) (map[runes.RuneId]*runes.RuneEntry, error) + // GetRuneEntryByRuneIdAndHeight returns the RuneEntry for the given runeId and block height. Returns errs.NotFound if the rune entry is not found. + GetRuneEntryByRuneIdAndHeight(ctx context.Context, runeId runes.RuneId, blockHeight uint64) (*runes.RuneEntry, error) + // GetRuneEntryByRuneIdAndHeightBatch returns the RuneEntries for the given runeIds and block height. + GetRuneEntryByRuneIdAndHeightBatch(ctx context.Context, runeIds []runes.RuneId, blockHeight uint64) (map[runes.RuneId]*runes.RuneEntry, error) + // CountRuneEntries returns the number of existing rune entries. + CountRuneEntries(ctx context.Context) (uint64, error) + + // GetBalancesByPkScript returns the balances for the given pkScript at the given blockHeight. + GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[runes.RuneId]*entity.Balance, error) + // GetBalancesByRuneId returns the balances for the given runeId at the given blockHeight. + // Cannot use []byte as map key, so we're returning as slice. + GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64) ([]*entity.Balance, error) + // GetBalancesByPkScriptAndRuneId returns the balance for the given pkScript and runeId at the given blockHeight. + GetBalanceByPkScriptAndRuneId(ctx context.Context, pkScript []byte, runeId runes.RuneId, blockHeight uint64) (*entity.Balance, error) +} + +type RunesWriterDataGateway interface { + CreateRuneEntry(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error + CreateRuneEntryState(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error + CreateOutPointBalances(ctx context.Context, outPointBalances []*entity.OutPointBalance) error + SpendOutPointBalances(ctx context.Context, outPoint wire.OutPoint, blockHeight uint64) error + CreateRuneBalances(ctx context.Context, params []CreateRuneBalancesParams) error + CreateRuneTransaction(ctx context.Context, tx *entity.RuneTransaction) error + CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error + + // TODO: collapse these into a single function (ResetStateToHeight)? + DeleteIndexedBlockSinceHeight(ctx context.Context, height uint64) error + DeleteRuneEntriesSinceHeight(ctx context.Context, height uint64) error + DeleteRuneEntryStatesSinceHeight(ctx context.Context, height uint64) error + DeleteRuneTransactionsSinceHeight(ctx context.Context, height uint64) error + DeleteRunestonesSinceHeight(ctx context.Context, height uint64) error + DeleteOutPointBalancesSinceHeight(ctx context.Context, height uint64) error + UnspendOutPointBalancesSinceHeight(ctx context.Context, height uint64) error + DeleteRuneBalancesSinceHeight(ctx context.Context, height uint64) error +} + +type CreateRuneBalancesParams struct { + PkScript []byte + RuneId runes.RuneId + Balance uint128.Uint128 + BlockHeight uint64 +} diff --git a/modules/runes/datagateway/tx.go b/modules/runes/datagateway/tx.go new file mode 100644 index 0000000..56455f6 --- /dev/null +++ b/modules/runes/datagateway/tx.go @@ -0,0 +1,12 @@ +package datagateway + +import "context" + +type Tx interface { + // Commit commits the DB transaction. All changes made after Begin() will be persisted. Calling Commit() will close the current transaction. + // If Commit() is called without a prior Begin(), it must be a no-op. + Commit(ctx context.Context) error + // Rollback rolls back the DB transaction. All changes made after Begin() will be discarded. + // Rollback() must be safe to call even if no transaction is active. Hence, a defer Rollback() is safe, even if Commit() was called prior with non-error conditions. + Rollback(ctx context.Context) error +} diff --git a/modules/runes/event_hash.go b/modules/runes/event_hash.go new file mode 100644 index 0000000..73efa1b --- /dev/null +++ b/modules/runes/event_hash.go @@ -0,0 +1,372 @@ +package runes + +import ( + "bytes" + "encoding/hex" + "slices" + "strconv" + "strings" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/modules/runes/internal/entity" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/uint128" + "github.com/samber/lo" +) + +// TODO: implement test to ensure that the event hash is calculated the same way for same version +func (p *Processor) calculateEventHash(header types.BlockHeader) (chainhash.Hash, error) { + payload, err := p.getHashPayload(header) + if err != nil { + return chainhash.Hash{}, errors.Wrap(err, "failed to get hash payload") + } + return chainhash.DoubleHashH(payload), nil +} + +func (p *Processor) getHashPayload(header types.BlockHeader) ([]byte, error) { + var sb strings.Builder + sb.WriteString("payload:v" + strconv.Itoa(EventHashVersion) + ":") + sb.WriteString("blockHash:") + sb.Write(header.Hash[:]) + + // serialize new rune entries + { + runeEntries := lo.Values(p.newRuneEntries) + slices.SortFunc(runeEntries, func(t1, t2 *runes.RuneEntry) int { + return int(t1.Number) - int(t2.Number) + }) + for _, entry := range runeEntries { + sb.Write(serializeNewRuneEntry(entry)) + } + } + // serialize new rune entry states + { + runeIds := lo.Keys(p.newRuneEntryStates) + slices.SortFunc(runeIds, func(t1, t2 runes.RuneId) int { + return t1.Cmp(t2) + }) + for _, runeId := range runeIds { + sb.Write(serializeNewRuneEntryState(p.newRuneEntryStates[runeId])) + } + } + // serialize new out point balances + sb.Write(serializeNewOutPointBalances(p.newOutPointBalances)) + + // serialize spend out points + sb.Write(serializeSpendOutPoints(p.newSpendOutPoints)) + + // serialize new balances + { + bytes, err := serializeNewBalances(p.newBalances) + if err != nil { + return nil, errors.Wrap(err, "failed to serialize new balances") + } + sb.Write(bytes) + } + + // serialize new txs + // sort txs by block height and index + { + bytes, err := serializeRuneTxs(p.newRuneTxs) + if err != nil { + return nil, errors.Wrap(err, "failed to serialize new rune txs") + } + sb.Write(bytes) + } + return []byte(sb.String()), nil +} + +func serializeNewRuneEntry(entry *runes.RuneEntry) []byte { + var sb strings.Builder + sb.WriteString("newRuneEntry:") + // nolint:goconst + sb.WriteString("runeId:" + entry.RuneId.String()) + sb.WriteString("number:" + strconv.Itoa(int(entry.Number))) + sb.WriteString("divisibility:" + strconv.Itoa(int(entry.Divisibility))) + sb.WriteString("premine:" + entry.Premine.String()) + sb.WriteString("rune:" + entry.SpacedRune.Rune.String()) + sb.WriteString("spacers:" + strconv.Itoa(int(entry.SpacedRune.Spacers))) + sb.WriteString("symbol:" + string(entry.Symbol)) + if entry.Terms != nil { + sb.WriteString("terms:") + terms := entry.Terms + if terms.Amount != nil { + // nolint:goconst + sb.WriteString("amount:" + terms.Amount.String()) + } + if terms.Cap != nil { + sb.WriteString("cap:" + terms.Cap.String()) + } + if terms.HeightStart != nil { + sb.WriteString("heightStart:" + strconv.Itoa(int(*terms.HeightStart))) + } + if terms.HeightEnd != nil { + sb.WriteString("heightEnd:" + strconv.Itoa(int(*terms.HeightEnd))) + } + if terms.OffsetStart != nil { + sb.WriteString("offsetStart:" + strconv.Itoa(int(*terms.OffsetStart))) + } + if terms.OffsetEnd != nil { + sb.WriteString("offsetEnd:" + strconv.Itoa(int(*terms.OffsetEnd))) + } + } + sb.WriteString("turbo:" + strconv.FormatBool(entry.Turbo)) + sb.WriteString("etchingBlock:" + strconv.Itoa(int(entry.EtchingBlock))) + sb.WriteString("etchingTxHash:" + entry.EtchingTxHash.String()) + sb.WriteString("etchedAt:" + strconv.Itoa(int(entry.EtchedAt.Unix()))) + sb.WriteString(";") + return []byte(sb.String()) +} + +func serializeNewRuneEntryState(entry *runes.RuneEntry) []byte { + var sb strings.Builder + sb.WriteString("newRuneEntryState:") + // write only mutable states + sb.WriteString("runeId:" + entry.RuneId.String()) + sb.WriteString("mints:" + entry.Mints.String()) + sb.WriteString("burnedAmount:" + entry.BurnedAmount.String()) + if entry.CompletedAtHeight != nil { + sb.WriteString("completedAtHeight:" + strconv.Itoa(int(*entry.CompletedAtHeight))) + sb.WriteString("completedAt:" + strconv.Itoa(int(entry.CompletedAt.Unix()))) + } + sb.WriteString(";") + return []byte(sb.String()) +} + +func serializeNewOutPointBalances(outPointBalances map[wire.OutPoint][]*entity.OutPointBalance) []byte { + var sb strings.Builder + sb.WriteString("newOutPointBalances:") + + // collect balance values + newBalances := make([]*entity.OutPointBalance, 0) + for _, balances := range outPointBalances { + newBalances = append(newBalances, balances...) + } + + // sort balances to ensure order + slices.SortFunc(newBalances, func(t1, t2 *entity.OutPointBalance) int { + // sort by outpoint first + if t1.OutPoint != t2.OutPoint { + if t1.OutPoint.Hash != t2.OutPoint.Hash { + return bytes.Compare(t1.OutPoint.Hash[:], t2.OutPoint.Hash[:]) + } + return int(t1.OutPoint.Index) - int(t2.OutPoint.Index) + } + // sort by runeId + return t1.RuneId.Cmp(t2.RuneId) + }) + for _, balance := range newBalances { + sb.WriteString("outPoint:") + sb.WriteString("hash:") + sb.Write(balance.OutPoint.Hash[:]) + sb.WriteString("index:" + strconv.Itoa(int(balance.OutPoint.Index))) + sb.WriteString("pkScript:") + sb.Write(balance.PkScript) + sb.WriteString("runeId:" + balance.RuneId.String()) + sb.WriteString("amount:" + balance.Amount.String()) + sb.WriteString(";") + } + return []byte(sb.String()) +} + +func serializeSpendOutPoints(spendOutPoints []wire.OutPoint) []byte { + var sb strings.Builder + sb.WriteString("spendOutPoints:") + // sort outpoints to ensure order + slices.SortFunc(spendOutPoints, func(t1, t2 wire.OutPoint) int { + if t1.Hash != t2.Hash { + return bytes.Compare(t1.Hash[:], t2.Hash[:]) + } + return int(t1.Index) - int(t2.Index) + }) + for _, outPoint := range spendOutPoints { + sb.WriteString("hash:") + sb.Write(outPoint.Hash[:]) + sb.WriteString("index:" + strconv.Itoa(int(outPoint.Index))) + sb.WriteString(";") + } + return []byte(sb.String()) +} + +func serializeNewBalances(balances map[string]map[runes.RuneId]uint128.Uint128) ([]byte, error) { + var sb strings.Builder + sb.WriteString("newBalances:") + + pkScriptStrs := lo.Keys(balances) + // sort pkScripts to ensure order + slices.SortFunc(pkScriptStrs, func(t1, t2 string) int { + return strings.Compare(t1, t2) + }) + for _, pkScriptStr := range pkScriptStrs { + runeIds := lo.Keys(balances[pkScriptStr]) + // sort runeIds to ensure order + slices.SortFunc(runeIds, func(t1, t2 runes.RuneId) int { + return t1.Cmp(t2) + }) + pkScript, err := hex.DecodeString(pkScriptStr) + if err != nil { + return nil, errors.Wrap(err, "failed to decode pkScript") + } + for _, runeId := range runeIds { + sb.WriteString("pkScript:") + sb.Write(pkScript) + sb.WriteString("runeId:" + runeId.String()) + sb.WriteString("amount:" + balances[pkScriptStr][runeId].String()) + sb.WriteString(";") + } + } + return []byte(sb.String()), nil +} + +func serializeRuneTxs(txs []*entity.RuneTransaction) ([]byte, error) { + var sb strings.Builder + + slices.SortFunc(txs, func(t1, t2 *entity.RuneTransaction) int { + if t1.BlockHeight != t2.BlockHeight { + return int(t1.BlockHeight) - int(t2.BlockHeight) + } + return int(t1.Index) - int(t2.Index) + }) + + sb.WriteString("txs:") + for _, tx := range txs { + sb.WriteString("hash:") + sb.Write(tx.Hash[:]) + sb.WriteString("blockHeight:" + strconv.Itoa(int(tx.BlockHeight))) + sb.WriteString("index:" + strconv.Itoa(int(tx.Index))) + + writeOutPointBalance := func(ob *entity.TxInputOutput) { + sb.WriteString("pkScript:") + sb.Write(ob.PkScript) + sb.WriteString("runeId:" + ob.RuneId.String()) + sb.WriteString("amount:" + ob.Amount.String()) + sb.WriteString("index:" + strconv.Itoa(int(ob.Index))) + sb.WriteString("txHash:") + sb.Write(ob.TxHash[:]) + sb.WriteString("txOutIndex:" + strconv.Itoa(int(ob.TxOutIndex))) + sb.WriteString(";") + } + // sort inputs to ensure order + slices.SortFunc(tx.Inputs, func(t1, t2 *entity.TxInputOutput) int { + if t1.Index != t2.Index { + return int(t1.Index) - int(t2.Index) + } + return t1.RuneId.Cmp(t2.RuneId) + }) + + sb.WriteString("in:") + for _, in := range tx.Inputs { + writeOutPointBalance(in) + } + // sort outputs to ensure order + slices.SortFunc(tx.Inputs, func(t1, t2 *entity.TxInputOutput) int { + if t1.Index != t2.Index { + return int(t1.Index) - int(t2.Index) + } + return t1.RuneId.Cmp(t2.RuneId) + }) + sb.WriteString("out:") + for _, out := range tx.Outputs { + writeOutPointBalance(out) + } + + mintsKeys := lo.Keys(tx.Mints) + slices.SortFunc(mintsKeys, func(t1, t2 runes.RuneId) int { + return t1.Cmp(t2) + }) + sb.WriteString("mints:") + for _, runeId := range mintsKeys { + amount := tx.Mints[runeId] + sb.WriteString(runeId.String()) + sb.WriteString(amount.String()) + sb.WriteString(";") + } + + burnsKeys := lo.Keys(tx.Burns) + slices.SortFunc(mintsKeys, func(t1, t2 runes.RuneId) int { + return t1.Cmp(t2) + }) + sb.WriteString("burns:") + for _, runeId := range burnsKeys { + amount := tx.Burns[runeId] + sb.WriteString(runeId.String()) + sb.WriteString(amount.String()) + sb.WriteString(";") + } + sb.WriteString("runeEtched:" + strconv.FormatBool(tx.RuneEtched)) + + sb.Write(serializeRunestoneForEventHash(tx.Runestone)) + sb.WriteString(";") + } + return []byte(sb.String()), nil +} + +func serializeRunestoneForEventHash(r *runes.Runestone) []byte { + if r == nil { + return []byte("rune:nil") + } + var sb strings.Builder + sb.WriteString("rune:") + if r.Etching != nil { + etching := r.Etching + sb.WriteString("etching:") + if etching.Divisibility != nil { + sb.WriteString("divisibility:" + strconv.Itoa(int(*etching.Divisibility))) + } + if etching.Premine != nil { + sb.WriteString("premine:" + etching.Premine.String()) + } + if etching.Rune != nil { + sb.WriteString("rune:" + etching.Rune.String()) + } + if etching.Spacers != nil { + sb.WriteString("spacers:" + strconv.Itoa(int(*etching.Spacers))) + } + if etching.Symbol != nil { + sb.WriteString("symbol:" + string(*etching.Symbol)) + } + if etching.Terms != nil { + terms := etching.Terms + if terms.Amount != nil { + sb.WriteString("amount:" + terms.Amount.String()) + } + if terms.Cap != nil { + sb.WriteString("cap:" + terms.Cap.String()) + } + if terms.HeightStart != nil { + sb.WriteString("heightStart:" + strconv.Itoa(int(*terms.HeightStart))) + } + if terms.HeightEnd != nil { + sb.WriteString("heightEnd:" + strconv.Itoa(int(*terms.HeightEnd))) + } + if terms.OffsetStart != nil { + sb.WriteString("offsetStart:" + strconv.Itoa(int(*terms.OffsetStart))) + } + if terms.OffsetEnd != nil { + sb.WriteString("offsetEnd:" + strconv.Itoa(int(*terms.OffsetEnd))) + } + } + if etching.Turbo { + sb.WriteString("turbo:" + strconv.FormatBool(etching.Turbo)) + } + } + if len(r.Edicts) > 0 { + sb.WriteString("edicts:") + // don't sort edicts, order must be kept the same because of delta encoding + for _, edict := range r.Edicts { + sb.WriteString(edict.Id.String() + edict.Amount.String() + strconv.Itoa(edict.Output) + ";") + } + } + if r.Mint != nil { + sb.WriteString("mint:" + r.Mint.String()) + } + if r.Pointer != nil { + sb.WriteString("pointer:" + strconv.Itoa(int(*r.Pointer))) + } + sb.WriteString("cenotaph:" + strconv.FormatBool(r.Cenotaph)) + sb.WriteString("flaws:" + strconv.Itoa(int(r.Flaws))) + return []byte(sb.String()) +} diff --git a/modules/runes/internal/entity/balance.go b/modules/runes/internal/entity/balance.go new file mode 100644 index 0000000..683d4ea --- /dev/null +++ b/modules/runes/internal/entity/balance.go @@ -0,0 +1,14 @@ +package entity + +import ( + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/uint128" +) + +type Balance struct { + PkScript []byte + Amount uint128.Uint128 + RuneId runes.RuneId + // BlockHeight last updated block height + BlockHeight uint64 +} diff --git a/modules/runes/internal/entity/indexed_block.go b/modules/runes/internal/entity/indexed_block.go new file mode 100644 index 0000000..007e830 --- /dev/null +++ b/modules/runes/internal/entity/indexed_block.go @@ -0,0 +1,11 @@ +package entity + +import "github.com/btcsuite/btcd/chaincfg/chainhash" + +type IndexedBlock struct { + Height int64 + Hash chainhash.Hash + PrevHash chainhash.Hash + EventHash chainhash.Hash + CumulativeEventHash chainhash.Hash +} diff --git a/modules/runes/internal/entity/indexer_state.go b/modules/runes/internal/entity/indexer_state.go new file mode 100644 index 0000000..b7f9dfc --- /dev/null +++ b/modules/runes/internal/entity/indexer_state.go @@ -0,0 +1,9 @@ +package entity + +import "time" + +type IndexerState struct { + CreatedAt time.Time + DBVersion int32 + EventHashVersion int32 +} diff --git a/modules/runes/internal/entity/outpoint_balance.go b/modules/runes/internal/entity/outpoint_balance.go new file mode 100644 index 0000000..4422b7d --- /dev/null +++ b/modules/runes/internal/entity/outpoint_balance.go @@ -0,0 +1,16 @@ +package entity + +import ( + "github.com/btcsuite/btcd/wire" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/uint128" +) + +type OutPointBalance struct { + RuneId runes.RuneId + PkScript []byte + OutPoint wire.OutPoint + Amount uint128.Uint128 + BlockHeight uint64 + SpentHeight *uint64 +} diff --git a/modules/runes/internal/entity/rune_transaction.go b/modules/runes/internal/entity/rune_transaction.go new file mode 100644 index 0000000..901f684 --- /dev/null +++ b/modules/runes/internal/entity/rune_transaction.go @@ -0,0 +1,76 @@ +package entity + +import ( + "encoding/hex" + "encoding/json" + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/uint128" +) + +type TxInputOutput struct { + PkScript []byte + RuneId runes.RuneId + Amount uint128.Uint128 + Index uint32 + TxHash chainhash.Hash + TxOutIndex uint32 +} + +type txInputOutputJSON struct { + PkScript string `json:"pkScript"` + RuneId runes.RuneId `json:"runeId"` + Amount uint128.Uint128 `json:"amount"` + Index uint32 `json:"index"` + TxHash chainhash.Hash `json:"txHash"` + TxOutIndex uint32 `json:"txOutIndex"` +} + +func (o TxInputOutput) MarshalJSON() ([]byte, error) { + bytes, err := json.Marshal(txInputOutputJSON{ + PkScript: hex.EncodeToString(o.PkScript), + RuneId: o.RuneId, + Amount: o.Amount, + Index: o.Index, + TxHash: o.TxHash, + TxOutIndex: o.TxOutIndex, + }) + if err != nil { + return nil, errors.WithStack(err) + } + return bytes, nil +} + +func (o *TxInputOutput) UnmarshalJSON(data []byte) error { + var aux txInputOutputJSON + if err := json.Unmarshal(data, &aux); err != nil { + return errors.WithStack(err) + } + pkScript, err := hex.DecodeString(aux.PkScript) + if err != nil { + return errors.WithStack(err) + } + o.PkScript = pkScript + o.RuneId = aux.RuneId + o.Amount = aux.Amount + o.Index = aux.Index + o.TxHash = aux.TxHash + o.TxOutIndex = aux.TxOutIndex + return nil +} + +type RuneTransaction struct { + Hash chainhash.Hash + BlockHeight uint64 + Index uint32 + Timestamp time.Time + Inputs []*TxInputOutput + Outputs []*TxInputOutput + Mints map[runes.RuneId]uint128.Uint128 + Burns map[runes.RuneId]uint128.Uint128 + Runestone *runes.Runestone + RuneEtched bool +} diff --git a/modules/runes/internal/entity/rune_transaction_test.go b/modules/runes/internal/entity/rune_transaction_test.go new file mode 100644 index 0000000..573f44c --- /dev/null +++ b/modules/runes/internal/entity/rune_transaction_test.go @@ -0,0 +1,32 @@ +package entity + +import ( + "encoding/hex" + "encoding/json" + "testing" + + "github.com/Cleverse/go-utilities/utils" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/uint128" + "github.com/stretchr/testify/assert" +) + +func TestTxInputOutputJSON(t *testing.T) { + ob := TxInputOutput{ + PkScript: utils.Must(hex.DecodeString("51203daaca9b82a51aca960c1491588246029d7e0fc49e0abdbcc8fd17574be5c74b")), + RuneId: runes.RuneId{BlockHeight: 1, TxIndex: 2}, + Amount: uint128.From64(100), + Index: 1, + TxHash: *utils.Must(chainhash.NewHashFromStr("3ea1b497b25993adf3f2c8dae1470721316a45c82600798c14d0425039c410ad")), + TxOutIndex: 2, + } + bytes, err := json.Marshal(ob) + assert.NoError(t, err) + t.Log(string(bytes)) + + var parsedOB TxInputOutput + err = json.Unmarshal(bytes, &parsedOB) + assert.NoError(t, err) + assert.Equal(t, ob, parsedOB) +} diff --git a/modules/runes/processor.go b/modules/runes/processor.go new file mode 100644 index 0000000..89724dc --- /dev/null +++ b/modules/runes/processor.go @@ -0,0 +1,231 @@ +package runes + +import ( + "context" + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/indexers" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/modules/bitcoin/btcclient" + "github.com/gaze-network/indexer-network/modules/runes/datagateway" + "github.com/gaze-network/indexer-network/modules/runes/internal/entity" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/indexer-network/pkg/logger" + "github.com/gaze-network/indexer-network/pkg/logger/slogx" + "github.com/gaze-network/indexer-network/pkg/reportingclient" + "github.com/gaze-network/uint128" + "github.com/samber/lo" +) + +var _ indexers.BitcoinProcessor = (*Processor)(nil) + +type Processor struct { + runesDg datagateway.RunesDataGateway + indexerInfoDg datagateway.IndexerInfoDataGateway + bitcoinClient btcclient.Contract + bitcoinDataSource indexers.BitcoinDatasource + network common.Network + reportingClient *reportingclient.ReportingClient + + newRuneEntries map[runes.RuneId]*runes.RuneEntry + newRuneEntryStates map[runes.RuneId]*runes.RuneEntry + newOutPointBalances map[wire.OutPoint][]*entity.OutPointBalance + newSpendOutPoints []wire.OutPoint + newBalances map[string]map[runes.RuneId]uint128.Uint128 // pkScript(hex) -> runeId -> amount + newRuneTxs []*entity.RuneTransaction +} + +func NewProcessor(runesDg datagateway.RunesDataGateway, indexerInfoDg datagateway.IndexerInfoDataGateway, bitcoinClient btcclient.Contract, bitcoinDataSource indexers.BitcoinDatasource, network common.Network, reportingClient *reportingclient.ReportingClient) *Processor { + return &Processor{ + runesDg: runesDg, + indexerInfoDg: indexerInfoDg, + bitcoinClient: bitcoinClient, + bitcoinDataSource: bitcoinDataSource, + network: network, + reportingClient: reportingClient, + newRuneEntries: make(map[runes.RuneId]*runes.RuneEntry), + newRuneEntryStates: make(map[runes.RuneId]*runes.RuneEntry), + newOutPointBalances: make(map[wire.OutPoint][]*entity.OutPointBalance), + newSpendOutPoints: make([]wire.OutPoint, 0), + newBalances: make(map[string]map[runes.RuneId]uint128.Uint128), + newRuneTxs: make([]*entity.RuneTransaction, 0), + } +} + +var ( + ErrDBVersionMismatch = errors.New("db version mismatch: please migrate db") + ErrEventHashVersionMismatch = errors.New("event hash version mismatch: please reset db and reindex") +) + +func (p *Processor) VerifyStates(ctx context.Context) error { + // TODO: ensure db is migrated + if err := p.ensureValidState(ctx); err != nil { + return errors.Wrap(err, "error during ensureValidState") + } + if p.network == common.NetworkMainnet { + if err := p.ensureGenesisRune(ctx); err != nil { + return errors.Wrap(err, "error during ensureGenesisRune") + } + } + if p.reportingClient != nil { + if err := p.reportingClient.SubmitNodeReport(ctx, "runes", p.network); err != nil { + return errors.Wrap(err, "failed to submit node report") + } + } + return nil +} + +func (p *Processor) ensureValidState(ctx context.Context) error { + indexerState, err := p.indexerInfoDg.GetLatestIndexerState(ctx) + if err != nil && !errors.Is(err, errs.NotFound) { + return errors.Wrap(err, "failed to get latest indexer state") + } + // if not found, set indexer state + if errors.Is(err, errs.NotFound) { + if err := p.indexerInfoDg.SetIndexerState(ctx, entity.IndexerState{ + DBVersion: DBVersion, + EventHashVersion: EventHashVersion, + }); err != nil { + return errors.Wrap(err, "failed to set indexer state") + } + } else { + if indexerState.DBVersion != DBVersion { + return errors.Wrapf(errs.ConflictSetting, "db version mismatch: current version is %d. Please upgrade to version %d", indexerState.DBVersion, DBVersion) + } + if indexerState.EventHashVersion != EventHashVersion { + return errors.Wrapf(errs.ConflictSetting, "event version mismatch: current version is %d. Please reset rune's db first.", indexerState.EventHashVersion, EventHashVersion) + } + } + + _, network, err := p.indexerInfoDg.GetLatestIndexerStats(ctx) + if err != nil && !errors.Is(err, errs.NotFound) { + return errors.Wrap(err, "failed to get latest indexer stats") + } + // if found, verify indexer stats + if err == nil { + if network != p.network { + return errors.Wrapf(errs.ConflictSetting, "network mismatch: latest indexed network is %d, configured network is %d. If you want to change the network, please reset the database", network, p.network) + } + } + if err := p.indexerInfoDg.UpdateIndexerStats(ctx, p.network.String(), p.network); err != nil { + return errors.Wrap(err, "failed to update indexer stats") + } + return nil +} + +var genesisRuneId = runes.RuneId{BlockHeight: 1, TxIndex: 0} + +func (p *Processor) ensureGenesisRune(ctx context.Context) error { + _, err := p.runesDg.GetRuneEntryByRuneId(ctx, genesisRuneId) + if err != nil && !errors.Is(err, errs.NotFound) { + return errors.Wrap(err, "failed to get genesis rune entry") + } + if errors.Is(err, errs.NotFound) { + runeEntry := &runes.RuneEntry{ + RuneId: genesisRuneId, + Number: 0, + Divisibility: 0, + Premine: uint128.Zero, + SpacedRune: runes.NewSpacedRune(runes.NewRune(2055900680524219742), 0b10000000), + Symbol: '\u29c9', + Terms: &runes.Terms{ + Amount: lo.ToPtr(uint128.From64(1)), + Cap: &uint128.Max, + HeightStart: lo.ToPtr(uint64(common.HalvingInterval * 4)), + HeightEnd: lo.ToPtr(uint64(common.HalvingInterval * 5)), + OffsetStart: nil, + OffsetEnd: nil, + }, + Turbo: true, + Mints: uint128.Zero, + BurnedAmount: uint128.Zero, + CompletedAt: time.Time{}, + CompletedAtHeight: nil, + EtchingBlock: 1, + EtchingTxHash: chainhash.Hash{}, + EtchedAt: time.Time{}, + } + if err := p.runesDg.CreateRuneEntry(ctx, runeEntry, genesisRuneId.BlockHeight); err != nil { + return errors.Wrap(err, "failed to create genesis rune entry") + } + } + return nil +} + +func (p *Processor) Name() string { + return "runes" +} + +func (p *Processor) CurrentBlock(ctx context.Context) (types.BlockHeader, error) { + blockHeader, err := p.runesDg.GetLatestBlock(ctx) + if err != nil { + if errors.Is(err, errs.NotFound) { + return startingBlockHeader[p.network], nil + } + return types.BlockHeader{}, errors.Wrap(err, "failed to get latest block") + } + return blockHeader, nil +} + +// warning: GetIndexedBlock currently returns a types.BlockHeader with only Height, Hash fields populated. +// This is because it is known that all usage of this function only requires these fields. In the future, we may want to populate all fields for type safety. +func (p *Processor) GetIndexedBlock(ctx context.Context, height int64) (types.BlockHeader, error) { + block, err := p.runesDg.GetIndexedBlockByHeight(ctx, height) + if err != nil { + return types.BlockHeader{}, errors.Wrap(err, "failed to get indexed block") + } + return types.BlockHeader{ + Height: block.Height, + Hash: block.Hash, + }, nil +} + +func (p *Processor) RevertData(ctx context.Context, from int64) error { + runesDgTx, err := p.runesDg.BeginRunesTx(ctx) + if err != nil { + return errors.Wrap(err, "failed to begin transaction") + } + defer func() { + if err := runesDgTx.Rollback(ctx); err != nil { + logger.WarnContext(ctx, "failed to rollback transaction", + slogx.Error(err), + slogx.String("event", "rollback_runes_revert"), + ) + } + }() + + if err := runesDgTx.DeleteIndexedBlockSinceHeight(ctx, uint64(from)); err != nil { + return errors.Wrap(err, "failed to delete indexed blocks") + } + if err := runesDgTx.DeleteRuneEntriesSinceHeight(ctx, uint64(from)); err != nil { + return errors.Wrap(err, "failed to delete rune entries") + } + if err := runesDgTx.DeleteRuneEntryStatesSinceHeight(ctx, uint64(from)); err != nil { + return errors.Wrap(err, "failed to delete rune entry states") + } + if err := runesDgTx.DeleteRuneTransactionsSinceHeight(ctx, uint64(from)); err != nil { + return errors.Wrap(err, "failed to delete rune transactions") + } + if err := runesDgTx.DeleteRunestonesSinceHeight(ctx, uint64(from)); err != nil { + return errors.Wrap(err, "failed to delete runestones") + } + if err := runesDgTx.DeleteOutPointBalancesSinceHeight(ctx, uint64(from)); err != nil { + return errors.Wrap(err, "failed to delete outpoint balances") + } + if err := runesDgTx.UnspendOutPointBalancesSinceHeight(ctx, uint64(from)); err != nil { + return errors.Wrap(err, "failed to unspend outpoint balances") + } + if err := runesDgTx.DeleteRuneBalancesSinceHeight(ctx, uint64(from)); err != nil { + return errors.Wrap(err, "failed to delete rune balances") + } + + if err := runesDgTx.Commit(ctx); err != nil { + return errors.Wrap(err, "failed to commit transaction") + } + return nil +} diff --git a/modules/runes/processor_process.go b/modules/runes/processor_process.go new file mode 100644 index 0000000..8d4baf9 --- /dev/null +++ b/modules/runes/processor_process.go @@ -0,0 +1,807 @@ +package runes + +import ( + "bytes" + "context" + "encoding/hex" + "log/slog" + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/modules/runes/datagateway" + "github.com/gaze-network/indexer-network/modules/runes/internal/entity" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/indexer-network/pkg/logger" + "github.com/gaze-network/indexer-network/pkg/logger/slogx" + "github.com/gaze-network/indexer-network/pkg/reportingclient" + "github.com/gaze-network/uint128" + "github.com/samber/lo" +) + +func (p *Processor) Process(ctx context.Context, blocks []*types.Block) error { + for _, block := range blocks { + ctx := logger.WithContext(ctx, slog.Int64("height", block.Header.Height)) + logger.DebugContext(ctx, "Processing new block", slog.Int("txs", len(block.Transactions))) + + for _, tx := range block.Transactions { + if err := p.processTx(ctx, tx, block.Header); err != nil { + return errors.Wrap(err, "failed to process tx") + } + } + + if err := p.flushBlock(ctx, block.Header); err != nil { + return errors.Wrap(err, "failed to flush block") + } + + logger.DebugContext(ctx, "Inserted new block") + } + return nil +} + +func (p *Processor) processTx(ctx context.Context, tx *types.Transaction, blockHeader types.BlockHeader) error { + if tx.BlockHeight < int64(runes.FirstRuneHeight(p.network)) { + // prevent processing txs before the activation height + return nil + } + runestone, err := runes.DecipherRunestone(tx) + if err != nil { + return errors.Wrap(err, "failed to decipher runestone") + } + + inputBalances, err := p.getInputBalances(ctx, tx.TxIn) + if err != nil { + return errors.Wrap(err, "failed to get input balances") + } + + if runestone == nil && len(inputBalances) == 0 { + // no runes involved in this tx + return nil + } + + unallocated := make(map[runes.RuneId]uint128.Uint128) + allocated := make(map[int]map[runes.RuneId]uint128.Uint128) + for _, balances := range inputBalances { + for runeId, balance := range balances { + unallocated[runeId] = unallocated[runeId].Add(balance.Amount) + p.newSpendOutPoints = append(p.newSpendOutPoints, balance.OutPoint) + } + } + + allocate := func(output int, runeId runes.RuneId, amount uint128.Uint128) { + if _, ok := unallocated[runeId]; !ok { + return + } + // cap amount to unallocated amount + if amount.Cmp(unallocated[runeId]) > 0 { + amount = unallocated[runeId] + } + if amount.IsZero() { + return + } + if _, ok := allocated[output]; !ok { + allocated[output] = make(map[runes.RuneId]uint128.Uint128) + } + allocated[output][runeId] = allocated[output][runeId].Add(amount) + unallocated[runeId] = unallocated[runeId].Sub(amount) + } + + mints := make(map[runes.RuneId]uint128.Uint128) + var runeEtched bool + if runestone != nil { + if runestone.Mint != nil { + mintRuneId := *runestone.Mint + amount, err := p.mint(ctx, mintRuneId, blockHeader) + if err != nil { + return errors.Wrap(err, "error during mint") + } + if !amount.IsZero() { + unallocated[mintRuneId] = unallocated[mintRuneId].Add(amount) + mints[mintRuneId] = amount + } + } + + etching, etchedRuneId, etchedRune, err := p.getEtchedRune(ctx, tx, runestone) + if err != nil { + return errors.Wrap(err, "error during getting etched rune") + } + if etching != nil { + runeEtched = true + } + + if !runestone.Cenotaph { + // include premine in unallocated, if exists + if etching != nil { + premine := lo.FromPtr(etching.Premine) + if !premine.IsZero() { + unallocated[etchedRuneId] = unallocated[etchedRuneId].Add(premine) + mints[etchedRuneId] = mints[etchedRuneId].Add(premine) + } + } + + // allocate runes + for _, edict := range runestone.Edicts { + // sanity check, should not happen since it is already checked in runes.MessageFromIntegers + if edict.Output > len(tx.TxOut) { + return errors.New("edict output index is out of range") + } + + var emptyRuneId runes.RuneId + // if rune id is empty, then use etched rune id + if edict.Id == emptyRuneId { + // empty rune id is only allowed for runestones with etching + if etching == nil { + continue + } + edict.Id = etchedRuneId + } + + if edict.Output == len(tx.TxOut) { + // if output == len(tx.TxOut), then allocate the amount to all outputs + + // find all non-OP_RETURN outputs + var destinations []int + for i, txOut := range tx.TxOut { + if txOut.IsOpReturn() { + destinations = append(destinations, i) + } + } + + if len(destinations) > 0 { + if edict.Amount.IsZero() { + // if amount is zero, divide ALL unallocated amount to all destinations + amount, remainder := unallocated[edict.Id].QuoRem64(uint64(len(destinations))) + for i, dest := range destinations { + // if i < remainder, then add 1 to amount + allocate(dest, edict.Id, lo.Ternary(i < int(remainder), amount.Add64(1), amount)) + } + } else { + // if amount is not zero, allocate the amount to all destinations, sequentially. + // If there is no more amount to allocate the rest of outputs, then no more will be allocated. + for _, dest := range destinations { + allocate(dest, edict.Id, edict.Amount) + } + } + } + } else { + // allocate amount to specific output + var amount uint128.Uint128 + if edict.Amount.IsZero() { + // if amount is zero, allocate the whole unallocated amount + amount = unallocated[edict.Id] + } else { + amount = edict.Amount + } + + allocate(edict.Output, edict.Id, amount) + } + } + } + + if etching != nil { + if err := p.createRuneEntry(ctx, runestone, etchedRuneId, etchedRune, tx, blockHeader); err != nil { + return errors.Wrap(err, "failed to create rune entry") + } + } + } + + burns := make(map[runes.RuneId]uint128.Uint128) + if runestone != nil && runestone.Cenotaph { + // all input runes and minted runes in a tx with cenotaph are burned + for runeId, amount := range unallocated { + burns[runeId] = burns[runeId].Add(amount) + } + } else { + // assign all un-allocated runes to the default output (pointer), or the first non + // OP_RETURN output if there is no default, or if the default output exceeds the number of outputs + var pointer *uint64 + if runestone != nil && !runestone.Cenotaph && runestone.Pointer != nil && *runestone.Pointer < uint64(len(tx.TxOut)) { + pointer = runestone.Pointer + } + + // if no pointer is provided, use the first non-OP_RETURN output + if pointer == nil { + for i, txOut := range tx.TxOut { + if !txOut.IsOpReturn() { + pointer = lo.ToPtr(uint64(i)) + break + } + } + } + + if pointer != nil { + // allocate all unallocated runes to the pointer + output := int(*pointer) + for runeId, amount := range unallocated { + allocate(output, runeId, amount) + } + } else { + // if pointer is still nil, then no output is available. Burn all unallocated runes. + for runeId, amount := range unallocated { + burns[runeId] = burns[runeId].Add(amount) + } + } + } + + // update outpoint balances + for output, balances := range allocated { + if tx.TxOut[output].IsOpReturn() { + // burn all allocated runes to OP_RETURN outputs + for runeId, amount := range balances { + burns[runeId] = burns[runeId].Add(amount) + } + continue + } + + outPoint := wire.OutPoint{ + Hash: tx.TxHash, + Index: uint32(output), + } + for runeId, amount := range balances { + p.newOutPointBalances[outPoint] = append(p.newOutPointBalances[outPoint], &entity.OutPointBalance{ + RuneId: runeId, + PkScript: tx.TxOut[output].PkScript, + OutPoint: outPoint, + Amount: amount, + BlockHeight: uint64(tx.BlockHeight), + SpentHeight: nil, + }) + } + } + + if err := p.updateNewBalances(ctx, tx, inputBalances, allocated); err != nil { + return errors.Wrap(err, "failed to update new balances") + } + + // increment burned amounts in rune entries + if err := p.incrementBurnedAmount(ctx, burns); err != nil { + return errors.Wrap(err, "failed to update burned amount") + } + + // construct RuneTransaction + runeTx := entity.RuneTransaction{ + Hash: tx.TxHash, + BlockHeight: uint64(blockHeader.Height), + Index: tx.Index, + Timestamp: blockHeader.Timestamp, + Inputs: make([]*entity.TxInputOutput, 0), + Outputs: make([]*entity.TxInputOutput, 0), + Mints: mints, + Burns: burns, + Runestone: runestone, + RuneEtched: runeEtched, + } + for inputIndex, balances := range inputBalances { + for runeId, balance := range balances { + runeTx.Inputs = append(runeTx.Inputs, &entity.TxInputOutput{ + PkScript: balance.PkScript, + RuneId: runeId, + Amount: balance.Amount, + Index: uint32(inputIndex), + TxHash: tx.TxIn[inputIndex].PreviousOutTxHash, + TxOutIndex: tx.TxIn[inputIndex].PreviousOutIndex, + }) + } + } + for outputIndex, balances := range allocated { + pkScript := tx.TxOut[outputIndex].PkScript + for runeId, amount := range balances { + runeTx.Outputs = append(runeTx.Outputs, &entity.TxInputOutput{ + PkScript: pkScript, + RuneId: runeId, + Amount: amount, + Index: uint32(outputIndex), + TxHash: tx.TxHash, + TxOutIndex: uint32(outputIndex), + }) + } + } + p.newRuneTxs = append(p.newRuneTxs, &runeTx) + return nil +} + +func (p *Processor) getInputBalances(ctx context.Context, txInputs []*types.TxIn) (map[int]map[runes.RuneId]*entity.OutPointBalance, error) { + inputBalances := make(map[int]map[runes.RuneId]*entity.OutPointBalance) + for i, txIn := range txInputs { + balances, err := p.getRunesBalancesAtOutPoint(ctx, wire.OutPoint{ + Hash: txIn.PreviousOutTxHash, + Index: txIn.PreviousOutIndex, + }) + if err != nil { + return nil, errors.Wrap(err, "failed to get runes balances at outpoint") + } + + if len(balances) > 0 { + inputBalances[i] = balances + } + } + return inputBalances, nil +} + +func (p *Processor) updateNewBalances(ctx context.Context, tx *types.Transaction, inputBalances map[int]map[runes.RuneId]*entity.OutPointBalance, allocated map[int]map[runes.RuneId]uint128.Uint128) error { + // getBalanceFromDg returns the current balance of the pkScript and runeId since last flush + getBalanceFromDg := func(ctx context.Context, pkScript []byte, runeId runes.RuneId) (uint128.Uint128, error) { + balance, err := p.runesDg.GetBalanceByPkScriptAndRuneId(ctx, pkScript, runeId, uint64(tx.BlockHeight-1)) + if err != nil { + if errors.Is(err, errs.NotFound) { + return uint128.Zero, nil + } + return uint128.Uint128{}, errors.Wrap(err, "failed to get balance by pk script and rune id") + } + return balance.Amount, nil + } + + // deduct balances used in inputs + for _, balances := range inputBalances { + for runeId, balance := range balances { + pkScript := balance.PkScript + pkScriptStr := hex.EncodeToString(pkScript) + if _, ok := p.newBalances[pkScriptStr]; !ok { + p.newBalances[pkScriptStr] = make(map[runes.RuneId]uint128.Uint128) + } + if _, ok := p.newBalances[pkScriptStr][runeId]; !ok { + balance, err := getBalanceFromDg(ctx, pkScript, runeId) + if err != nil { + return errors.WithStack(err) + } + p.newBalances[pkScriptStr][runeId] = balance + } + if p.newBalances[pkScriptStr][runeId].Cmp(balance.Amount) < 0 { + // total pkScript's balance is less that balance in input. This is impossible. Something is wrong. + return errors.Errorf("current balance is less than balance in input: %s", runeId) + } + p.newBalances[pkScriptStr][runeId] = p.newBalances[pkScriptStr][runeId].Sub(balance.Amount) + } + } + + // add balances allocated in outputs + for outputIndex, balances := range allocated { + pkScript := tx.TxOut[outputIndex].PkScript + pkScriptStr := hex.EncodeToString(pkScript) + for runeId, amount := range balances { + if _, ok := p.newBalances[pkScriptStr]; !ok { + p.newBalances[pkScriptStr] = make(map[runes.RuneId]uint128.Uint128) + } + if _, ok := p.newBalances[pkScriptStr][runeId]; !ok { + balance, err := getBalanceFromDg(ctx, pkScript, runeId) + if err != nil { + return errors.WithStack(err) + } + p.newBalances[pkScriptStr][runeId] = balance + } + p.newBalances[pkScriptStr][runeId] = p.newBalances[pkScriptStr][runeId].Add(amount) + } + } + + return nil +} + +func (p *Processor) mint(ctx context.Context, runeId runes.RuneId, blockHeader types.BlockHeader) (uint128.Uint128, error) { + runeEntry, err := p.getRuneEntryByRuneId(ctx, runeId) + if err != nil { + if errors.Is(err, errs.NotFound) { + return uint128.Zero, nil + } + return uint128.Uint128{}, errors.Wrap(err, "failed to get rune entry by rune id") + } + + amount, err := runeEntry.GetMintableAmount(uint64(blockHeader.Height)) + if err != nil { + return uint128.Zero, nil + } + + if err := p.incrementMintCount(ctx, runeId, blockHeader); err != nil { + return uint128.Zero, errors.Wrap(err, "failed to increment mint count") + } + return amount, nil +} + +func (p *Processor) getEtchedRune(ctx context.Context, tx *types.Transaction, runestone *runes.Runestone) (*runes.Etching, runes.RuneId, runes.Rune, error) { + if runestone.Etching == nil { + return nil, runes.RuneId{}, runes.Rune{}, nil + } + rune := runestone.Etching.Rune + if rune != nil { + minimumRune := runes.MinimumRuneAtHeight(p.network, uint64(tx.BlockHeight)) + if rune.Cmp(minimumRune) < 0 { + return nil, runes.RuneId{}, runes.Rune{}, nil + } + if rune.IsReserved() { + return nil, runes.RuneId{}, runes.Rune{}, nil + } + + ok, err := p.isRuneExists(ctx, *rune) + if err != nil { + return nil, runes.RuneId{}, runes.Rune{}, errors.Wrap(err, "error during check rune existence") + } + if ok { + return nil, runes.RuneId{}, runes.Rune{}, nil + } + + // check if tx commits to the rune + commit, err := p.txCommitsToRune(ctx, tx, *rune) + if err != nil { + return nil, runes.RuneId{}, runes.Rune{}, errors.Wrap(err, "error during check tx commits to rune") + } + if !commit { + return nil, runes.RuneId{}, runes.Rune{}, nil + } + } else { + rune = lo.ToPtr(runes.GetReservedRune(uint64(tx.BlockHeight), tx.Index)) + } + + runeId, err := runes.NewRuneId(uint64(tx.BlockHeight), tx.Index) + if err != nil { + return nil, runes.RuneId{}, runes.Rune{}, errors.Wrap(err, "failed to create rune id") + } + return runestone.Etching, runeId, *rune, nil +} + +func (p *Processor) txCommitsToRune(ctx context.Context, tx *types.Transaction, rune runes.Rune) (bool, error) { + commitment := rune.Commitment() + for i, txIn := range tx.TxIn { + tapscript, ok := extractTapScript(txIn.Witness) + if !ok { + continue + } + for tapscript.Next() { + // ignore errors and continue to next input + if tapscript.Err() != nil { + break + } + // check opcode is valid + if !runes.IsDataPushOpCode(tapscript.Opcode()) { + continue + } + + // tapscript must contain commitment of the rune + if !bytes.Equal(tapscript.Data(), commitment) { + continue + } + + // It is impossible to verify that input utxo is a P2TR output with just the input. + // Need to verify with utxo's pk script. + + prevTx, err := p.bitcoinClient.GetTransactionByHash(ctx, txIn.PreviousOutTxHash) + if err != nil && errors.Is(err, errs.NotFound) { + continue + } + if err != nil { + return false, errors.Wrapf(err, "can't get previous txout for txin `%v:%v`", tx.TxHash.String(), i) + } + pkScript := prevTx.TxOut[txIn.PreviousOutIndex].PkScript + // input utxo must be P2TR + if !txscript.IsPayToTaproot(pkScript) { + break + } + // input must be mature enough + confirmations := tx.BlockHeight - prevTx.BlockHeight + 1 + if confirmations < runes.RUNE_COMMIT_BLOCKS { + continue + } + + return true, nil + } + } + return false, nil +} + +func extractTapScript(witness [][]byte) (txscript.ScriptTokenizer, bool) { + witness = removeAnnexFromWitness(witness) + if len(witness) < 2 { + return txscript.ScriptTokenizer{}, false + } + script := witness[len(witness)-2] + + return txscript.MakeScriptTokenizer(0, script), true +} + +func removeAnnexFromWitness(witness [][]byte) [][]byte { + if len(witness) >= 2 && len(witness[len(witness)-1]) > 0 && witness[len(witness)-1][0] == txscript.TaprootAnnexTag { + return witness[:len(witness)-1] + } + return witness +} + +func (p *Processor) createRuneEntry(ctx context.Context, runestone *runes.Runestone, runeId runes.RuneId, rune runes.Rune, tx *types.Transaction, blockHeader types.BlockHeader) error { + count, err := p.countRuneEntries(ctx) + if err != nil { + return errors.Wrap(err, "failed to count rune entries") + } + + var runeEntry *runes.RuneEntry + if runestone.Cenotaph { + runeEntry = &runes.RuneEntry{ + RuneId: runeId, + Number: count, + SpacedRune: runes.NewSpacedRune(rune, 0), + Mints: uint128.Zero, + BurnedAmount: uint128.Zero, + Premine: uint128.Zero, + Symbol: '¤', + Divisibility: 0, + Terms: nil, + Turbo: false, + CompletedAt: time.Time{}, + CompletedAtHeight: nil, + EtchingBlock: uint64(tx.BlockHeight), + EtchingTxHash: tx.TxHash, + EtchedAt: blockHeader.Timestamp, + } + } else { + etching := runestone.Etching + runeEntry = &runes.RuneEntry{ + RuneId: runeId, + Number: count, + SpacedRune: runes.NewSpacedRune(rune, lo.FromPtr(etching.Spacers)), + Mints: uint128.Zero, + BurnedAmount: uint128.Zero, + Premine: lo.FromPtr(etching.Premine), + Symbol: lo.FromPtrOr(etching.Symbol, '¤'), + Divisibility: lo.FromPtr(etching.Divisibility), + Terms: etching.Terms, + Turbo: etching.Turbo, + CompletedAt: time.Time{}, + CompletedAtHeight: nil, + EtchingBlock: uint64(tx.BlockHeight), + EtchingTxHash: tx.TxHash, + EtchedAt: blockHeader.Timestamp, + } + } + p.newRuneEntries[runeId] = runeEntry + p.newRuneEntryStates[runeId] = runeEntry + return nil +} + +func (p *Processor) incrementMintCount(ctx context.Context, runeId runes.RuneId, blockHeader types.BlockHeader) (err error) { + runeEntry, err := p.getRuneEntryByRuneId(ctx, runeId) + if err != nil { + return errors.Wrap(err, "failed to get rune entry by rune id") + } + + runeEntry.Mints = runeEntry.Mints.Add64(1) + if runeEntry.Mints == lo.FromPtr(runeEntry.Terms.Cap) { + runeEntry.CompletedAt = blockHeader.Timestamp + runeEntry.CompletedAtHeight = lo.ToPtr(uint64(blockHeader.Height)) + } + p.newRuneEntryStates[runeId] = runeEntry + return nil +} + +func (p *Processor) incrementBurnedAmount(ctx context.Context, burned map[runes.RuneId]uint128.Uint128) (err error) { + runeEntries := make(map[runes.RuneId]*runes.RuneEntry) + runeIdsToFetch := make([]runes.RuneId, 0) + for runeId, amount := range burned { + if amount.IsZero() { + // ignore zero burn amount + continue + } + runeEntry, ok := p.newRuneEntryStates[runeId] + if !ok { + runeIdsToFetch = append(runeIdsToFetch, runeId) + } else { + runeEntries[runeId] = runeEntry + } + } + if len(runeIdsToFetch) > 0 { + for _, runeId := range runeIdsToFetch { + runeEntry, err := p.getRuneEntryByRuneId(ctx, runeId) + if err != nil { + if errors.Is(err, errs.NotFound) { + return errors.Wrap(err, "rune entry not found") + } + return errors.Wrap(err, "failed to get rune entry by rune id") + } + runeEntries[runeId] = runeEntry + } + } + + // update rune entries + for runeId, amount := range burned { + runeEntry, ok := runeEntries[runeId] + if !ok { + continue + } + runeEntry.BurnedAmount = runeEntry.BurnedAmount.Add(amount) + p.newRuneEntryStates[runeId] = runeEntry + } + return nil +} + +func (p *Processor) countRuneEntries(ctx context.Context) (uint64, error) { + runeCountInDB, err := p.runesDg.CountRuneEntries(ctx) + if err != nil { + return 0, errors.Wrap(err, "failed to count rune entries in db") + } + return runeCountInDB + uint64(len(p.newRuneEntries)), nil +} + +func (p *Processor) getRuneEntryByRuneId(ctx context.Context, runeId runes.RuneId) (*runes.RuneEntry, error) { + runeEntry, ok := p.newRuneEntryStates[runeId] + if ok { + return runeEntry, nil + } + // not checking from p.newRuneEntries since new rune entries add to p.newRuneEntryStates as well + + runeEntry, err := p.runesDg.GetRuneEntryByRuneId(ctx, runeId) + if err != nil { + return nil, errors.Wrap(err, "failed to get rune entry by rune id") + } + return runeEntry, nil +} + +func (p *Processor) isRuneExists(ctx context.Context, rune runes.Rune) (bool, error) { + for _, runeEntry := range p.newRuneEntries { + if runeEntry.SpacedRune.Rune == rune { + return true, nil + } + } + + _, err := p.runesDg.GetRuneIdFromRune(ctx, rune) + if err != nil { + if errors.Is(err, errs.NotFound) { + return false, nil + } + return false, errors.Wrap(err, "failed to get rune id from rune") + } + return true, nil +} + +func (p *Processor) getRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error) { + if outPointBalances, ok := p.newOutPointBalances[outPoint]; ok { + balances := make(map[runes.RuneId]*entity.OutPointBalance) + for _, outPointBalance := range outPointBalances { + balances[outPointBalance.RuneId] = outPointBalance + } + return balances, nil + } + + balances, err := p.runesDg.GetRunesBalancesAtOutPoint(ctx, outPoint) + if err != nil { + return nil, errors.Wrap(err, "failed to get runes balances at outpoint") + } + return balances, nil +} + +func (p *Processor) flushBlock(ctx context.Context, blockHeader types.BlockHeader) error { + runesDgTx, err := p.runesDg.BeginRunesTx(ctx) + if err != nil { + return errors.Wrap(err, "failed to begin runes tx") + } + defer func() { + if err := runesDgTx.Rollback(ctx); err != nil { + logger.WarnContext(ctx, "failed to rollback transaction", + slogx.Error(err), + slogx.String("event", "rollback_runes_insertion"), + ) + } + }() + + // CreateIndexedBlock must be performed before other flush methods to correctly calculate event hash + eventHash, err := p.calculateEventHash(blockHeader) + if err != nil { + return errors.Wrap(err, "failed to calculate event hash") + } + prevIndexedBlock, err := runesDgTx.GetIndexedBlockByHeight(ctx, blockHeader.Height-1) + if err != nil && errors.Is(err, errs.NotFound) && blockHeader.Height-1 == startingBlockHeader[p.network].Height { + prevIndexedBlock = &entity.IndexedBlock{ + Height: startingBlockHeader[p.network].Height, + Hash: startingBlockHeader[p.network].Hash, + EventHash: chainhash.Hash{}, + CumulativeEventHash: chainhash.Hash{}, + } + err = nil + } + if err != nil { + if errors.Is(err, errs.NotFound) { + return errors.Errorf("indexed block not found for height %d. Indexed block must be created for every Bitcoin block", blockHeader.Height) + } + return errors.Wrap(err, "failed to get indexed block by height") + } + cumulativeEventHash := chainhash.DoubleHashH(append(prevIndexedBlock.CumulativeEventHash[:], eventHash[:]...)) + + if err := runesDgTx.CreateIndexedBlock(ctx, &entity.IndexedBlock{ + Height: blockHeader.Height, + Hash: blockHeader.Hash, + PrevHash: blockHeader.PrevBlock, + EventHash: eventHash, + CumulativeEventHash: cumulativeEventHash, + }); err != nil { + return errors.Wrap(err, "failed to create indexed block") + } + // flush new rune entries + { + for _, runeEntry := range p.newRuneEntries { + if err := runesDgTx.CreateRuneEntry(ctx, runeEntry, uint64(blockHeader.Height)); err != nil { + return errors.Wrap(err, "failed to create rune entry") + } + } + p.newRuneEntries = make(map[runes.RuneId]*runes.RuneEntry) + } + // flush new rune entry states + { + for _, runeEntry := range p.newRuneEntryStates { + if err := runesDgTx.CreateRuneEntryState(ctx, runeEntry, uint64(blockHeader.Height)); err != nil { + return errors.Wrap(err, "failed to create rune entry state") + } + } + p.newRuneEntryStates = make(map[runes.RuneId]*runes.RuneEntry) + } + // flush new outpoint balances + { + newBalances := make([]*entity.OutPointBalance, 0) + for _, balances := range p.newOutPointBalances { + newBalances = append(newBalances, balances...) + } + if err := runesDgTx.CreateOutPointBalances(ctx, newBalances); err != nil { + return errors.Wrap(err, "failed to create outpoint balances") + } + p.newOutPointBalances = make(map[wire.OutPoint][]*entity.OutPointBalance) + } + // flush new spend outpoints + { + for _, outPoint := range p.newSpendOutPoints { + if err := runesDgTx.SpendOutPointBalances(ctx, outPoint, uint64(blockHeader.Height)); err != nil { + return errors.Wrap(err, "failed to create spend outpoint") + } + } + p.newSpendOutPoints = make([]wire.OutPoint, 0) + } + // flush new balances + { + params := make([]datagateway.CreateRuneBalancesParams, 0) + for pkScriptStr, balances := range p.newBalances { + pkScript, err := hex.DecodeString(pkScriptStr) + if err != nil { + return errors.Wrap(err, "failed to decode pk script") + } + for runeId, balance := range balances { + params = append(params, datagateway.CreateRuneBalancesParams{ + PkScript: pkScript, + RuneId: runeId, + Balance: balance, + BlockHeight: uint64(blockHeader.Height), + }) + } + } + if err := runesDgTx.CreateRuneBalances(ctx, params); err != nil { + return errors.Wrap(err, "failed to create balances at block") + } + p.newBalances = make(map[string]map[runes.RuneId]uint128.Uint128) + } + // flush new rune transactions + { + for _, runeTx := range p.newRuneTxs { + if err := runesDgTx.CreateRuneTransaction(ctx, runeTx); err != nil { + return errors.Wrap(err, "failed to create rune transaction") + } + } + p.newRuneTxs = make([]*entity.RuneTransaction, 0) + } + + if err := runesDgTx.Commit(ctx); err != nil { + return errors.Wrap(err, "failed to commit runes tx") + } + + // submit event to reporting system + if p.reportingClient != nil { + if err := p.reportingClient.SubmitBlockReport(ctx, reportingclient.SubmitBlockReportPayload{ + Type: "runes", + ClientVersion: Version, + DBVersion: DBVersion, + EventHashVersion: EventHashVersion, + Network: p.network, + BlockHeight: uint64(blockHeader.Height), + BlockHash: blockHeader.Hash, + EventHash: eventHash, + CumulativeEventHash: cumulativeEventHash, + }); err != nil { + return errors.Wrap(err, "failed to submit block report") + } + } + return nil +} diff --git a/modules/runes/repository/postgres/gen/batch.go b/modules/runes/repository/postgres/gen/batch.go new file mode 100644 index 0000000..177306b --- /dev/null +++ b/modules/runes/repository/postgres/gen/batch.go @@ -0,0 +1,130 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 +// source: batch.go + +package gen + +import ( + "context" + "errors" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" +) + +var ( + ErrBatchAlreadyClosed = errors.New("batch already closed") +) + +const createOutPointBalances = `-- name: CreateOutPointBalances :batchexec +INSERT INTO runes_outpoint_balances (rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height) VALUES ($1, $2, $3, $4, $5, $6, $7) +` + +type CreateOutPointBalancesBatchResults struct { + br pgx.BatchResults + tot int + closed bool +} + +type CreateOutPointBalancesParams struct { + RuneID string + Pkscript string + TxHash string + TxIdx int32 + Amount pgtype.Numeric + BlockHeight int32 + SpentHeight pgtype.Int4 +} + +func (q *Queries) CreateOutPointBalances(ctx context.Context, arg []CreateOutPointBalancesParams) *CreateOutPointBalancesBatchResults { + batch := &pgx.Batch{} + for _, a := range arg { + vals := []interface{}{ + a.RuneID, + a.Pkscript, + a.TxHash, + a.TxIdx, + a.Amount, + a.BlockHeight, + a.SpentHeight, + } + batch.Queue(createOutPointBalances, vals...) + } + br := q.db.SendBatch(ctx, batch) + return &CreateOutPointBalancesBatchResults{br, len(arg), false} +} + +func (b *CreateOutPointBalancesBatchResults) Exec(f func(int, error)) { + defer b.br.Close() + for t := 0; t < b.tot; t++ { + if b.closed { + if f != nil { + f(t, ErrBatchAlreadyClosed) + } + continue + } + _, err := b.br.Exec() + if f != nil { + f(t, err) + } + } +} + +func (b *CreateOutPointBalancesBatchResults) Close() error { + b.closed = true + return b.br.Close() +} + +const createRuneBalanceAtBlock = `-- name: CreateRuneBalanceAtBlock :batchexec +INSERT INTO runes_balances (pkscript, block_height, rune_id, amount) VALUES ($1, $2, $3, $4) +` + +type CreateRuneBalanceAtBlockBatchResults struct { + br pgx.BatchResults + tot int + closed bool +} + +type CreateRuneBalanceAtBlockParams struct { + Pkscript string + BlockHeight int32 + RuneID string + Amount pgtype.Numeric +} + +func (q *Queries) CreateRuneBalanceAtBlock(ctx context.Context, arg []CreateRuneBalanceAtBlockParams) *CreateRuneBalanceAtBlockBatchResults { + batch := &pgx.Batch{} + for _, a := range arg { + vals := []interface{}{ + a.Pkscript, + a.BlockHeight, + a.RuneID, + a.Amount, + } + batch.Queue(createRuneBalanceAtBlock, vals...) + } + br := q.db.SendBatch(ctx, batch) + return &CreateRuneBalanceAtBlockBatchResults{br, len(arg), false} +} + +func (b *CreateRuneBalanceAtBlockBatchResults) Exec(f func(int, error)) { + defer b.br.Close() + for t := 0; t < b.tot; t++ { + if b.closed { + if f != nil { + f(t, ErrBatchAlreadyClosed) + } + continue + } + _, err := b.br.Exec() + if f != nil { + f(t, err) + } + } +} + +func (b *CreateRuneBalanceAtBlockBatchResults) Close() error { + b.closed = true + return b.br.Close() +} diff --git a/modules/runes/repository/postgres/gen/data.sql.go b/modules/runes/repository/postgres/gen/data.sql.go new file mode 100644 index 0000000..0428bfd --- /dev/null +++ b/modules/runes/repository/postgres/gen/data.sql.go @@ -0,0 +1,816 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 +// source: data.sql + +package gen + +import ( + "context" + + "github.com/jackc/pgx/v5/pgtype" +) + +const countRuneEntries = `-- name: CountRuneEntries :one +SELECT COUNT(*) FROM runes_entries +` + +func (q *Queries) CountRuneEntries(ctx context.Context) (int64, error) { + row := q.db.QueryRow(ctx, countRuneEntries) + var count int64 + err := row.Scan(&count) + return count, err +} + +const createIndexedBlock = `-- name: CreateIndexedBlock :exec +INSERT INTO runes_indexed_blocks (hash, height, prev_hash, event_hash, cumulative_event_hash) VALUES ($1, $2, $3, $4, $5) +` + +type CreateIndexedBlockParams struct { + Hash string + Height int32 + PrevHash string + EventHash string + CumulativeEventHash string +} + +func (q *Queries) CreateIndexedBlock(ctx context.Context, arg CreateIndexedBlockParams) error { + _, err := q.db.Exec(ctx, createIndexedBlock, + arg.Hash, + arg.Height, + arg.PrevHash, + arg.EventHash, + arg.CumulativeEventHash, + ) + return err +} + +const createRuneEntry = `-- name: CreateRuneEntry :exec +INSERT INTO runes_entries (rune_id, rune, number, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18) +` + +type CreateRuneEntryParams struct { + RuneID string + Rune string + Number int64 + Spacers int32 + Premine pgtype.Numeric + Symbol int32 + Divisibility int16 + Terms bool + TermsAmount pgtype.Numeric + TermsCap pgtype.Numeric + TermsHeightStart pgtype.Int4 + TermsHeightEnd pgtype.Int4 + TermsOffsetStart pgtype.Int4 + TermsOffsetEnd pgtype.Int4 + Turbo bool + EtchingBlock int32 + EtchingTxHash string + EtchedAt pgtype.Timestamp +} + +func (q *Queries) CreateRuneEntry(ctx context.Context, arg CreateRuneEntryParams) error { + _, err := q.db.Exec(ctx, createRuneEntry, + arg.RuneID, + arg.Rune, + arg.Number, + arg.Spacers, + arg.Premine, + arg.Symbol, + arg.Divisibility, + arg.Terms, + arg.TermsAmount, + arg.TermsCap, + arg.TermsHeightStart, + arg.TermsHeightEnd, + arg.TermsOffsetStart, + arg.TermsOffsetEnd, + arg.Turbo, + arg.EtchingBlock, + arg.EtchingTxHash, + arg.EtchedAt, + ) + return err +} + +const createRuneEntryState = `-- name: CreateRuneEntryState :exec +INSERT INTO runes_entry_states (rune_id, block_height, mints, burned_amount, completed_at, completed_at_height) VALUES ($1, $2, $3, $4, $5, $6) +` + +type CreateRuneEntryStateParams struct { + RuneID string + BlockHeight int32 + Mints pgtype.Numeric + BurnedAmount pgtype.Numeric + CompletedAt pgtype.Timestamp + CompletedAtHeight pgtype.Int4 +} + +func (q *Queries) CreateRuneEntryState(ctx context.Context, arg CreateRuneEntryStateParams) error { + _, err := q.db.Exec(ctx, createRuneEntryState, + arg.RuneID, + arg.BlockHeight, + arg.Mints, + arg.BurnedAmount, + arg.CompletedAt, + arg.CompletedAtHeight, + ) + return err +} + +const createRuneTransaction = `-- name: CreateRuneTransaction :exec +INSERT INTO runes_transactions (hash, block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) +` + +type CreateRuneTransactionParams struct { + Hash string + BlockHeight int32 + Index int32 + Timestamp pgtype.Timestamp + Inputs []byte + Outputs []byte + Mints []byte + Burns []byte + RuneEtched bool +} + +func (q *Queries) CreateRuneTransaction(ctx context.Context, arg CreateRuneTransactionParams) error { + _, err := q.db.Exec(ctx, createRuneTransaction, + arg.Hash, + arg.BlockHeight, + arg.Index, + arg.Timestamp, + arg.Inputs, + arg.Outputs, + arg.Mints, + arg.Burns, + arg.RuneEtched, + ) + return err +} + +const createRunestone = `-- name: CreateRunestone :exec +INSERT INTO runes_runestones (tx_hash, block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21) +` + +type CreateRunestoneParams struct { + TxHash string + BlockHeight int32 + Etching bool + EtchingDivisibility pgtype.Int2 + EtchingPremine pgtype.Numeric + EtchingRune pgtype.Text + EtchingSpacers pgtype.Int4 + EtchingSymbol pgtype.Int4 + EtchingTerms pgtype.Bool + EtchingTermsAmount pgtype.Numeric + EtchingTermsCap pgtype.Numeric + EtchingTermsHeightStart pgtype.Int4 + EtchingTermsHeightEnd pgtype.Int4 + EtchingTermsOffsetStart pgtype.Int4 + EtchingTermsOffsetEnd pgtype.Int4 + EtchingTurbo pgtype.Bool + Edicts []byte + Mint pgtype.Text + Pointer pgtype.Int4 + Cenotaph bool + Flaws int32 +} + +func (q *Queries) CreateRunestone(ctx context.Context, arg CreateRunestoneParams) error { + _, err := q.db.Exec(ctx, createRunestone, + arg.TxHash, + arg.BlockHeight, + arg.Etching, + arg.EtchingDivisibility, + arg.EtchingPremine, + arg.EtchingRune, + arg.EtchingSpacers, + arg.EtchingSymbol, + arg.EtchingTerms, + arg.EtchingTermsAmount, + arg.EtchingTermsCap, + arg.EtchingTermsHeightStart, + arg.EtchingTermsHeightEnd, + arg.EtchingTermsOffsetStart, + arg.EtchingTermsOffsetEnd, + arg.EtchingTurbo, + arg.Edicts, + arg.Mint, + arg.Pointer, + arg.Cenotaph, + arg.Flaws, + ) + return err +} + +const deleteIndexedBlockSinceHeight = `-- name: DeleteIndexedBlockSinceHeight :exec +DELETE FROM runes_indexed_blocks WHERE height >= $1 +` + +func (q *Queries) DeleteIndexedBlockSinceHeight(ctx context.Context, height int32) error { + _, err := q.db.Exec(ctx, deleteIndexedBlockSinceHeight, height) + return err +} + +const deleteOutPointBalancesSinceHeight = `-- name: DeleteOutPointBalancesSinceHeight :exec +DELETE FROM runes_outpoint_balances WHERE block_height >= $1 +` + +func (q *Queries) DeleteOutPointBalancesSinceHeight(ctx context.Context, blockHeight int32) error { + _, err := q.db.Exec(ctx, deleteOutPointBalancesSinceHeight, blockHeight) + return err +} + +const deleteRuneBalancesSinceHeight = `-- name: DeleteRuneBalancesSinceHeight :exec +DELETE FROM runes_balances WHERE block_height >= $1 +` + +func (q *Queries) DeleteRuneBalancesSinceHeight(ctx context.Context, blockHeight int32) error { + _, err := q.db.Exec(ctx, deleteRuneBalancesSinceHeight, blockHeight) + return err +} + +const deleteRuneEntriesSinceHeight = `-- name: DeleteRuneEntriesSinceHeight :exec +DELETE FROM runes_entries WHERE etching_block >= $1 +` + +func (q *Queries) DeleteRuneEntriesSinceHeight(ctx context.Context, etchingBlock int32) error { + _, err := q.db.Exec(ctx, deleteRuneEntriesSinceHeight, etchingBlock) + return err +} + +const deleteRuneEntryStatesSinceHeight = `-- name: DeleteRuneEntryStatesSinceHeight :exec +DELETE FROM runes_entry_states WHERE block_height >= $1 +` + +func (q *Queries) DeleteRuneEntryStatesSinceHeight(ctx context.Context, blockHeight int32) error { + _, err := q.db.Exec(ctx, deleteRuneEntryStatesSinceHeight, blockHeight) + return err +} + +const deleteRuneTransactionsSinceHeight = `-- name: DeleteRuneTransactionsSinceHeight :exec +DELETE FROM runes_transactions WHERE block_height >= $1 +` + +func (q *Queries) DeleteRuneTransactionsSinceHeight(ctx context.Context, blockHeight int32) error { + _, err := q.db.Exec(ctx, deleteRuneTransactionsSinceHeight, blockHeight) + return err +} + +const deleteRunestonesSinceHeight = `-- name: DeleteRunestonesSinceHeight :exec +DELETE FROM runes_runestones WHERE block_height >= $1 +` + +func (q *Queries) DeleteRunestonesSinceHeight(ctx context.Context, blockHeight int32) error { + _, err := q.db.Exec(ctx, deleteRunestonesSinceHeight, blockHeight) + return err +} + +const getBalanceByPkScriptAndRuneId = `-- name: GetBalanceByPkScriptAndRuneId :one +SELECT pkscript, block_height, rune_id, amount FROM runes_balances WHERE pkscript = $1 AND rune_id = $2 AND block_height <= $3 ORDER BY block_height DESC LIMIT 1 +` + +type GetBalanceByPkScriptAndRuneIdParams struct { + Pkscript string + RuneID string + BlockHeight int32 +} + +func (q *Queries) GetBalanceByPkScriptAndRuneId(ctx context.Context, arg GetBalanceByPkScriptAndRuneIdParams) (RunesBalance, error) { + row := q.db.QueryRow(ctx, getBalanceByPkScriptAndRuneId, arg.Pkscript, arg.RuneID, arg.BlockHeight) + var i RunesBalance + err := row.Scan( + &i.Pkscript, + &i.BlockHeight, + &i.RuneID, + &i.Amount, + ) + return i, err +} + +const getBalancesByPkScript = `-- name: GetBalancesByPkScript :many +WITH balances AS ( + SELECT DISTINCT ON (rune_id) pkscript, block_height, rune_id, amount FROM runes_balances WHERE pkscript = $1 AND block_height <= $2 ORDER BY rune_id, block_height DESC +) +SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0 +` + +type GetBalancesByPkScriptParams struct { + Pkscript string + BlockHeight int32 +} + +type GetBalancesByPkScriptRow struct { + Pkscript string + BlockHeight int32 + RuneID string + Amount pgtype.Numeric +} + +func (q *Queries) GetBalancesByPkScript(ctx context.Context, arg GetBalancesByPkScriptParams) ([]GetBalancesByPkScriptRow, error) { + rows, err := q.db.Query(ctx, getBalancesByPkScript, arg.Pkscript, arg.BlockHeight) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetBalancesByPkScriptRow + for rows.Next() { + var i GetBalancesByPkScriptRow + if err := rows.Scan( + &i.Pkscript, + &i.BlockHeight, + &i.RuneID, + &i.Amount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const getBalancesByRuneId = `-- name: GetBalancesByRuneId :many +WITH balances AS ( + SELECT DISTINCT ON (pkscript) pkscript, block_height, rune_id, amount FROM runes_balances WHERE rune_id = $1 AND block_height <= $2 ORDER BY pkscript, block_height DESC +) +SELECT pkscript, block_height, rune_id, amount FROM balances WHERE amount > 0 +` + +type GetBalancesByRuneIdParams struct { + RuneID string + BlockHeight int32 +} + +type GetBalancesByRuneIdRow struct { + Pkscript string + BlockHeight int32 + RuneID string + Amount pgtype.Numeric +} + +func (q *Queries) GetBalancesByRuneId(ctx context.Context, arg GetBalancesByRuneIdParams) ([]GetBalancesByRuneIdRow, error) { + rows, err := q.db.Query(ctx, getBalancesByRuneId, arg.RuneID, arg.BlockHeight) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetBalancesByRuneIdRow + for rows.Next() { + var i GetBalancesByRuneIdRow + if err := rows.Scan( + &i.Pkscript, + &i.BlockHeight, + &i.RuneID, + &i.Amount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const getIndexedBlockByHeight = `-- name: GetIndexedBlockByHeight :one +SELECT height, hash, prev_hash, event_hash, cumulative_event_hash FROM runes_indexed_blocks WHERE height = $1 +` + +func (q *Queries) GetIndexedBlockByHeight(ctx context.Context, height int32) (RunesIndexedBlock, error) { + row := q.db.QueryRow(ctx, getIndexedBlockByHeight, height) + var i RunesIndexedBlock + err := row.Scan( + &i.Height, + &i.Hash, + &i.PrevHash, + &i.EventHash, + &i.CumulativeEventHash, + ) + return i, err +} + +const getLatestIndexedBlock = `-- name: GetLatestIndexedBlock :one +SELECT height, hash, prev_hash, event_hash, cumulative_event_hash FROM runes_indexed_blocks ORDER BY height DESC LIMIT 1 +` + +func (q *Queries) GetLatestIndexedBlock(ctx context.Context) (RunesIndexedBlock, error) { + row := q.db.QueryRow(ctx, getLatestIndexedBlock) + var i RunesIndexedBlock + err := row.Scan( + &i.Height, + &i.Hash, + &i.PrevHash, + &i.EventHash, + &i.CumulativeEventHash, + ) + return i, err +} + +const getOutPointBalancesAtOutPoint = `-- name: GetOutPointBalancesAtOutPoint :many +SELECT rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height FROM runes_outpoint_balances WHERE tx_hash = $1 AND tx_idx = $2 +` + +type GetOutPointBalancesAtOutPointParams struct { + TxHash string + TxIdx int32 +} + +func (q *Queries) GetOutPointBalancesAtOutPoint(ctx context.Context, arg GetOutPointBalancesAtOutPointParams) ([]RunesOutpointBalance, error) { + rows, err := q.db.Query(ctx, getOutPointBalancesAtOutPoint, arg.TxHash, arg.TxIdx) + if err != nil { + return nil, err + } + defer rows.Close() + var items []RunesOutpointBalance + for rows.Next() { + var i RunesOutpointBalance + if err := rows.Scan( + &i.RuneID, + &i.Pkscript, + &i.TxHash, + &i.TxIdx, + &i.Amount, + &i.BlockHeight, + &i.SpentHeight, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const getRuneEntriesByRuneIds = `-- name: GetRuneEntriesByRuneIds :many +WITH states AS ( + -- select latest state + SELECT DISTINCT ON (rune_id) rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entry_states WHERE rune_id = ANY($1::text[]) ORDER BY rune_id, block_height DESC +) +SELECT runes_entries.rune_id, number, rune, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at, states.rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entries + LEFT JOIN states ON runes_entries.rune_id = states.rune_id + WHERE runes_entries.rune_id = ANY($1::text[]) +` + +type GetRuneEntriesByRuneIdsRow struct { + RuneID string + Number int64 + Rune string + Spacers int32 + Premine pgtype.Numeric + Symbol int32 + Divisibility int16 + Terms bool + TermsAmount pgtype.Numeric + TermsCap pgtype.Numeric + TermsHeightStart pgtype.Int4 + TermsHeightEnd pgtype.Int4 + TermsOffsetStart pgtype.Int4 + TermsOffsetEnd pgtype.Int4 + Turbo bool + EtchingBlock int32 + EtchingTxHash string + EtchedAt pgtype.Timestamp + RuneID_2 pgtype.Text + BlockHeight pgtype.Int4 + Mints pgtype.Numeric + BurnedAmount pgtype.Numeric + CompletedAt pgtype.Timestamp + CompletedAtHeight pgtype.Int4 +} + +func (q *Queries) GetRuneEntriesByRuneIds(ctx context.Context, runeIds []string) ([]GetRuneEntriesByRuneIdsRow, error) { + rows, err := q.db.Query(ctx, getRuneEntriesByRuneIds, runeIds) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetRuneEntriesByRuneIdsRow + for rows.Next() { + var i GetRuneEntriesByRuneIdsRow + if err := rows.Scan( + &i.RuneID, + &i.Number, + &i.Rune, + &i.Spacers, + &i.Premine, + &i.Symbol, + &i.Divisibility, + &i.Terms, + &i.TermsAmount, + &i.TermsCap, + &i.TermsHeightStart, + &i.TermsHeightEnd, + &i.TermsOffsetStart, + &i.TermsOffsetEnd, + &i.Turbo, + &i.EtchingBlock, + &i.EtchingTxHash, + &i.EtchedAt, + &i.RuneID_2, + &i.BlockHeight, + &i.Mints, + &i.BurnedAmount, + &i.CompletedAt, + &i.CompletedAtHeight, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const getRuneEntriesByRuneIdsAndHeight = `-- name: GetRuneEntriesByRuneIdsAndHeight :many +WITH states AS ( + -- select latest state + SELECT DISTINCT ON (rune_id) rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entry_states WHERE rune_id = ANY($1::text[]) AND block_height <= $2 ORDER BY rune_id, block_height DESC +) +SELECT runes_entries.rune_id, number, rune, spacers, premine, symbol, divisibility, terms, terms_amount, terms_cap, terms_height_start, terms_height_end, terms_offset_start, terms_offset_end, turbo, etching_block, etching_tx_hash, etched_at, states.rune_id, block_height, mints, burned_amount, completed_at, completed_at_height FROM runes_entries + LEFT JOIN states ON runes_entries.rune_id = states.rune_id + WHERE runes_entries.rune_id = ANY($1::text[]) AND etching_block <= $2 +` + +type GetRuneEntriesByRuneIdsAndHeightParams struct { + RuneIds []string + Height int32 +} + +type GetRuneEntriesByRuneIdsAndHeightRow struct { + RuneID string + Number int64 + Rune string + Spacers int32 + Premine pgtype.Numeric + Symbol int32 + Divisibility int16 + Terms bool + TermsAmount pgtype.Numeric + TermsCap pgtype.Numeric + TermsHeightStart pgtype.Int4 + TermsHeightEnd pgtype.Int4 + TermsOffsetStart pgtype.Int4 + TermsOffsetEnd pgtype.Int4 + Turbo bool + EtchingBlock int32 + EtchingTxHash string + EtchedAt pgtype.Timestamp + RuneID_2 pgtype.Text + BlockHeight pgtype.Int4 + Mints pgtype.Numeric + BurnedAmount pgtype.Numeric + CompletedAt pgtype.Timestamp + CompletedAtHeight pgtype.Int4 +} + +func (q *Queries) GetRuneEntriesByRuneIdsAndHeight(ctx context.Context, arg GetRuneEntriesByRuneIdsAndHeightParams) ([]GetRuneEntriesByRuneIdsAndHeightRow, error) { + rows, err := q.db.Query(ctx, getRuneEntriesByRuneIdsAndHeight, arg.RuneIds, arg.Height) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetRuneEntriesByRuneIdsAndHeightRow + for rows.Next() { + var i GetRuneEntriesByRuneIdsAndHeightRow + if err := rows.Scan( + &i.RuneID, + &i.Number, + &i.Rune, + &i.Spacers, + &i.Premine, + &i.Symbol, + &i.Divisibility, + &i.Terms, + &i.TermsAmount, + &i.TermsCap, + &i.TermsHeightStart, + &i.TermsHeightEnd, + &i.TermsOffsetStart, + &i.TermsOffsetEnd, + &i.Turbo, + &i.EtchingBlock, + &i.EtchingTxHash, + &i.EtchedAt, + &i.RuneID_2, + &i.BlockHeight, + &i.Mints, + &i.BurnedAmount, + &i.CompletedAt, + &i.CompletedAtHeight, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const getRuneIdFromRune = `-- name: GetRuneIdFromRune :one +SELECT rune_id FROM runes_entries WHERE rune = $1 +` + +func (q *Queries) GetRuneIdFromRune(ctx context.Context, rune string) (string, error) { + row := q.db.QueryRow(ctx, getRuneIdFromRune, rune) + var rune_id string + err := row.Scan(&rune_id) + return rune_id, err +} + +const getRuneTransactions = `-- name: GetRuneTransactions :many +SELECT hash, runes_transactions.block_height, index, timestamp, inputs, outputs, mints, burns, rune_etched, tx_hash, runes_runestones.block_height, etching, etching_divisibility, etching_premine, etching_rune, etching_spacers, etching_symbol, etching_terms, etching_terms_amount, etching_terms_cap, etching_terms_height_start, etching_terms_height_end, etching_terms_offset_start, etching_terms_offset_end, etching_turbo, edicts, mint, pointer, cenotaph, flaws FROM runes_transactions + LEFT JOIN runes_runestones ON runes_transactions.hash = runes_runestones.tx_hash + WHERE ( + $1::BOOLEAN = FALSE -- if @filter_pk_script is TRUE, apply pk_script filter + OR runes_transactions.outputs @> $2::JSONB + OR runes_transactions.inputs @> $2::JSONB + ) AND ( + $3::BOOLEAN = FALSE -- if @filter_rune_id is TRUE, apply rune_id filter + OR runes_transactions.outputs @> $4::JSONB + OR runes_transactions.inputs @> $4::JSONB + OR runes_transactions.mints ? $5 + OR runes_transactions.burns ? $5 + OR (runes_transactions.rune_etched = TRUE AND runes_transactions.block_height = $6 AND runes_transactions.index = $7) + ) AND ( + $8::INT = 0 OR runes_transactions.block_height = $8::INT -- if @block_height > 0, apply block_height filter + ) +` + +type GetRuneTransactionsParams struct { + FilterPkScript bool + PkScriptParam []byte + FilterRuneID bool + RuneIDParam []byte + RuneID []byte + RuneIDBlockHeight int32 + RuneIDTxIndex int32 + BlockHeight int32 +} + +type GetRuneTransactionsRow struct { + Hash string + BlockHeight int32 + Index int32 + Timestamp pgtype.Timestamp + Inputs []byte + Outputs []byte + Mints []byte + Burns []byte + RuneEtched bool + TxHash pgtype.Text + BlockHeight_2 pgtype.Int4 + Etching pgtype.Bool + EtchingDivisibility pgtype.Int2 + EtchingPremine pgtype.Numeric + EtchingRune pgtype.Text + EtchingSpacers pgtype.Int4 + EtchingSymbol pgtype.Int4 + EtchingTerms pgtype.Bool + EtchingTermsAmount pgtype.Numeric + EtchingTermsCap pgtype.Numeric + EtchingTermsHeightStart pgtype.Int4 + EtchingTermsHeightEnd pgtype.Int4 + EtchingTermsOffsetStart pgtype.Int4 + EtchingTermsOffsetEnd pgtype.Int4 + EtchingTurbo pgtype.Bool + Edicts []byte + Mint pgtype.Text + Pointer pgtype.Int4 + Cenotaph pgtype.Bool + Flaws pgtype.Int4 +} + +func (q *Queries) GetRuneTransactions(ctx context.Context, arg GetRuneTransactionsParams) ([]GetRuneTransactionsRow, error) { + rows, err := q.db.Query(ctx, getRuneTransactions, + arg.FilterPkScript, + arg.PkScriptParam, + arg.FilterRuneID, + arg.RuneIDParam, + arg.RuneID, + arg.RuneIDBlockHeight, + arg.RuneIDTxIndex, + arg.BlockHeight, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetRuneTransactionsRow + for rows.Next() { + var i GetRuneTransactionsRow + if err := rows.Scan( + &i.Hash, + &i.BlockHeight, + &i.Index, + &i.Timestamp, + &i.Inputs, + &i.Outputs, + &i.Mints, + &i.Burns, + &i.RuneEtched, + &i.TxHash, + &i.BlockHeight_2, + &i.Etching, + &i.EtchingDivisibility, + &i.EtchingPremine, + &i.EtchingRune, + &i.EtchingSpacers, + &i.EtchingSymbol, + &i.EtchingTerms, + &i.EtchingTermsAmount, + &i.EtchingTermsCap, + &i.EtchingTermsHeightStart, + &i.EtchingTermsHeightEnd, + &i.EtchingTermsOffsetStart, + &i.EtchingTermsOffsetEnd, + &i.EtchingTurbo, + &i.Edicts, + &i.Mint, + &i.Pointer, + &i.Cenotaph, + &i.Flaws, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const getUnspentOutPointBalancesByPkScript = `-- name: GetUnspentOutPointBalancesByPkScript :many +SELECT rune_id, pkscript, tx_hash, tx_idx, amount, block_height, spent_height FROM runes_outpoint_balances WHERE pkscript = $1 AND block_height <= $2 AND (spent_height IS NULL OR spent_height > $2) +` + +type GetUnspentOutPointBalancesByPkScriptParams struct { + Pkscript string + BlockHeight int32 +} + +func (q *Queries) GetUnspentOutPointBalancesByPkScript(ctx context.Context, arg GetUnspentOutPointBalancesByPkScriptParams) ([]RunesOutpointBalance, error) { + rows, err := q.db.Query(ctx, getUnspentOutPointBalancesByPkScript, arg.Pkscript, arg.BlockHeight) + if err != nil { + return nil, err + } + defer rows.Close() + var items []RunesOutpointBalance + for rows.Next() { + var i RunesOutpointBalance + if err := rows.Scan( + &i.RuneID, + &i.Pkscript, + &i.TxHash, + &i.TxIdx, + &i.Amount, + &i.BlockHeight, + &i.SpentHeight, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const spendOutPointBalances = `-- name: SpendOutPointBalances :exec +UPDATE runes_outpoint_balances SET spent_height = $1 WHERE tx_hash = $2 AND tx_idx = $3 +` + +type SpendOutPointBalancesParams struct { + SpentHeight pgtype.Int4 + TxHash string + TxIdx int32 +} + +func (q *Queries) SpendOutPointBalances(ctx context.Context, arg SpendOutPointBalancesParams) error { + _, err := q.db.Exec(ctx, spendOutPointBalances, arg.SpentHeight, arg.TxHash, arg.TxIdx) + return err +} + +const unspendOutPointBalancesSinceHeight = `-- name: UnspendOutPointBalancesSinceHeight :exec +UPDATE runes_outpoint_balances SET spent_height = NULL WHERE spent_height >= $1 +` + +func (q *Queries) UnspendOutPointBalancesSinceHeight(ctx context.Context, spentHeight pgtype.Int4) error { + _, err := q.db.Exec(ctx, unspendOutPointBalancesSinceHeight, spentHeight) + return err +} diff --git a/modules/runes/repository/postgres/gen/db.go b/modules/runes/repository/postgres/gen/db.go new file mode 100644 index 0000000..150a59a --- /dev/null +++ b/modules/runes/repository/postgres/gen/db.go @@ -0,0 +1,33 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 + +package gen + +import ( + "context" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" +) + +type DBTX interface { + Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error) + Query(context.Context, string, ...interface{}) (pgx.Rows, error) + QueryRow(context.Context, string, ...interface{}) pgx.Row + SendBatch(context.Context, *pgx.Batch) pgx.BatchResults +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx pgx.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/modules/runes/repository/postgres/gen/info.sql.go b/modules/runes/repository/postgres/gen/info.sql.go new file mode 100644 index 0000000..815f21d --- /dev/null +++ b/modules/runes/repository/postgres/gen/info.sql.go @@ -0,0 +1,70 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 +// source: info.sql + +package gen + +import ( + "context" +) + +const getLatestIndexerState = `-- name: GetLatestIndexerState :one +SELECT id, db_version, event_hash_version, created_at FROM runes_indexer_state ORDER BY created_at DESC LIMIT 1 +` + +func (q *Queries) GetLatestIndexerState(ctx context.Context) (RunesIndexerState, error) { + row := q.db.QueryRow(ctx, getLatestIndexerState) + var i RunesIndexerState + err := row.Scan( + &i.Id, + &i.DbVersion, + &i.EventHashVersion, + &i.CreatedAt, + ) + return i, err +} + +const getLatestIndexerStats = `-- name: GetLatestIndexerStats :one +SELECT "client_version", "network" FROM runes_indexer_stats ORDER BY id DESC LIMIT 1 +` + +type GetLatestIndexerStatsRow struct { + ClientVersion string + Network string +} + +func (q *Queries) GetLatestIndexerStats(ctx context.Context) (GetLatestIndexerStatsRow, error) { + row := q.db.QueryRow(ctx, getLatestIndexerStats) + var i GetLatestIndexerStatsRow + err := row.Scan(&i.ClientVersion, &i.Network) + return i, err +} + +const setIndexerState = `-- name: SetIndexerState :exec +INSERT INTO runes_indexer_state (db_version, event_hash_version) VALUES ($1, $2) +` + +type SetIndexerStateParams struct { + DbVersion int32 + EventHashVersion int32 +} + +func (q *Queries) SetIndexerState(ctx context.Context, arg SetIndexerStateParams) error { + _, err := q.db.Exec(ctx, setIndexerState, arg.DbVersion, arg.EventHashVersion) + return err +} + +const updateIndexerStats = `-- name: UpdateIndexerStats :exec +INSERT INTO runes_indexer_stats (client_version, network) VALUES ($1, $2) +` + +type UpdateIndexerStatsParams struct { + ClientVersion string + Network string +} + +func (q *Queries) UpdateIndexerStats(ctx context.Context, arg UpdateIndexerStatsParams) error { + _, err := q.db.Exec(ctx, updateIndexerStats, arg.ClientVersion, arg.Network) + return err +} diff --git a/modules/runes/repository/postgres/gen/models.go b/modules/runes/repository/postgres/gen/models.go new file mode 100644 index 0000000..2a85858 --- /dev/null +++ b/modules/runes/repository/postgres/gen/models.go @@ -0,0 +1,114 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.26.0 + +package gen + +import ( + "github.com/jackc/pgx/v5/pgtype" +) + +type RunesBalance struct { + Pkscript string + BlockHeight int32 + RuneID string + Amount pgtype.Numeric +} + +type RunesEntry struct { + RuneID string + Number int64 + Rune string + Spacers int32 + Premine pgtype.Numeric + Symbol int32 + Divisibility int16 + Terms bool + TermsAmount pgtype.Numeric + TermsCap pgtype.Numeric + TermsHeightStart pgtype.Int4 + TermsHeightEnd pgtype.Int4 + TermsOffsetStart pgtype.Int4 + TermsOffsetEnd pgtype.Int4 + Turbo bool + EtchingBlock int32 + EtchingTxHash string + EtchedAt pgtype.Timestamp +} + +type RunesEntryState struct { + RuneID string + BlockHeight int32 + Mints pgtype.Numeric + BurnedAmount pgtype.Numeric + CompletedAt pgtype.Timestamp + CompletedAtHeight pgtype.Int4 +} + +type RunesIndexedBlock struct { + Height int32 + Hash string + PrevHash string + EventHash string + CumulativeEventHash string +} + +type RunesIndexerStat struct { + Id int64 + ClientVersion string + Network string + CreatedAt pgtype.Timestamptz +} + +type RunesIndexerState struct { + Id int64 + DbVersion int32 + EventHashVersion int32 + CreatedAt pgtype.Timestamptz +} + +type RunesOutpointBalance struct { + RuneID string + Pkscript string + TxHash string + TxIdx int32 + Amount pgtype.Numeric + BlockHeight int32 + SpentHeight pgtype.Int4 +} + +type RunesRunestone struct { + TxHash string + BlockHeight int32 + Etching bool + EtchingDivisibility pgtype.Int2 + EtchingPremine pgtype.Numeric + EtchingRune pgtype.Text + EtchingSpacers pgtype.Int4 + EtchingSymbol pgtype.Int4 + EtchingTerms pgtype.Bool + EtchingTermsAmount pgtype.Numeric + EtchingTermsCap pgtype.Numeric + EtchingTermsHeightStart pgtype.Int4 + EtchingTermsHeightEnd pgtype.Int4 + EtchingTermsOffsetStart pgtype.Int4 + EtchingTermsOffsetEnd pgtype.Int4 + EtchingTurbo pgtype.Bool + Edicts []byte + Mint pgtype.Text + Pointer pgtype.Int4 + Cenotaph bool + Flaws int32 +} + +type RunesTransaction struct { + Hash string + BlockHeight int32 + Index int32 + Timestamp pgtype.Timestamp + Inputs []byte + Outputs []byte + Mints []byte + Burns []byte + RuneEtched bool +} diff --git a/modules/runes/repository/postgres/indexer_info.go b/modules/runes/repository/postgres/indexer_info.go new file mode 100644 index 0000000..258c96b --- /dev/null +++ b/modules/runes/repository/postgres/indexer_info.go @@ -0,0 +1,56 @@ +package postgres + +import ( + "context" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/modules/runes/datagateway" + "github.com/gaze-network/indexer-network/modules/runes/internal/entity" + "github.com/gaze-network/indexer-network/modules/runes/repository/postgres/gen" + "github.com/jackc/pgx/v5" +) + +var _ datagateway.IndexerInfoDataGateway = (*Repository)(nil) + +func (r *Repository) GetLatestIndexerState(ctx context.Context) (entity.IndexerState, error) { + indexerStateModel, err := r.queries.GetLatestIndexerState(ctx) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return entity.IndexerState{}, errors.WithStack(errs.NotFound) + } + return entity.IndexerState{}, errors.Wrap(err, "error during query") + } + indexerState := mapIndexerStateModelToType(indexerStateModel) + return indexerState, nil +} + +func (r *Repository) GetLatestIndexerStats(ctx context.Context) (string, common.Network, error) { + stats, err := r.queries.GetLatestIndexerStats(ctx) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return "", "", errors.WithStack(errs.NotFound) + } + return "", "", errors.Wrap(err, "error during query") + } + return stats.ClientVersion, common.Network(stats.Network), nil +} + +func (r *Repository) SetIndexerState(ctx context.Context, state entity.IndexerState) error { + params := mapIndexerStateTypeToParams(state) + if err := r.queries.SetIndexerState(ctx, params); err != nil { + return errors.Wrap(err, "error during exec") + } + return nil +} + +func (r *Repository) UpdateIndexerStats(ctx context.Context, clientVersion string, network common.Network) error { + if err := r.queries.UpdateIndexerStats(ctx, gen.UpdateIndexerStatsParams{ + ClientVersion: clientVersion, + Network: string(network), + }); err != nil { + return errors.Wrap(err, "error during exec") + } + return nil +} diff --git a/modules/runes/repository/postgres/mapper.go b/modules/runes/repository/postgres/mapper.go new file mode 100644 index 0000000..ef99273 --- /dev/null +++ b/modules/runes/repository/postgres/mapper.go @@ -0,0 +1,693 @@ +package postgres + +import ( + "encoding/hex" + "encoding/json" + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/modules/runes/internal/entity" + "github.com/gaze-network/indexer-network/modules/runes/repository/postgres/gen" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/gaze-network/uint128" + "github.com/jackc/pgx/v5/pgtype" + "github.com/samber/lo" +) + +func uint128FromNumeric(src pgtype.Numeric) (*uint128.Uint128, error) { + if !src.Valid { + return nil, nil + } + bytes, err := src.MarshalJSON() + if err != nil { + return nil, errors.WithStack(err) + } + result, err := uint128.FromString(string(bytes)) + if err != nil { + return nil, errors.WithStack(err) + } + return &result, nil +} + +func numericFromUint128(src *uint128.Uint128) (pgtype.Numeric, error) { + if src == nil { + return pgtype.Numeric{}, nil + } + bytes := []byte(src.String()) + var result pgtype.Numeric + err := result.UnmarshalJSON(bytes) + if err != nil { + return pgtype.Numeric{}, errors.WithStack(err) + } + return result, nil +} + +func mapIndexerStateModelToType(src gen.RunesIndexerState) entity.IndexerState { + var createdAt time.Time + if src.CreatedAt.Valid { + createdAt = src.CreatedAt.Time + } + return entity.IndexerState{ + DBVersion: src.DbVersion, + EventHashVersion: src.EventHashVersion, + CreatedAt: createdAt, + } +} + +func mapIndexerStateTypeToParams(src entity.IndexerState) gen.SetIndexerStateParams { + return gen.SetIndexerStateParams{ + DbVersion: src.DBVersion, + EventHashVersion: src.EventHashVersion, + } +} + +func mapRuneEntryModelToType(src gen.GetRuneEntriesByRuneIdsRow) (runes.RuneEntry, error) { + runeId, err := runes.NewRuneIdFromString(src.RuneID) + if err != nil { + return runes.RuneEntry{}, errors.Wrap(err, "failed to parse rune id") + } + burnedAmount, err := uint128FromNumeric(src.BurnedAmount) + if err != nil { + return runes.RuneEntry{}, errors.Wrap(err, "failed to parse burned amount") + } + rune, err := runes.NewRuneFromString(src.Rune) + if err != nil { + return runes.RuneEntry{}, errors.Wrap(err, "failed to parse rune") + } + mints, err := uint128FromNumeric(src.Mints) + if err != nil { + return runes.RuneEntry{}, errors.Wrap(err, "failed to parse mints") + } + premine, err := uint128FromNumeric(src.Premine) + if err != nil { + return runes.RuneEntry{}, errors.Wrap(err, "failed to parse premine") + } + var completedAt time.Time + if src.CompletedAt.Valid { + completedAt = src.CompletedAt.Time + } + var completedAtHeight *uint64 + if src.CompletedAtHeight.Valid { + completedAtHeight = lo.ToPtr(uint64(src.CompletedAtHeight.Int32)) + } + var terms *runes.Terms + if src.Terms { + terms = &runes.Terms{} + if src.TermsAmount.Valid { + amount, err := uint128FromNumeric(src.TermsAmount) + if err != nil { + return runes.RuneEntry{}, errors.Wrap(err, "failed to parse terms amount") + } + terms.Amount = amount + } + if src.TermsCap.Valid { + cap, err := uint128FromNumeric(src.TermsCap) + if err != nil { + return runes.RuneEntry{}, errors.Wrap(err, "failed to parse terms cap") + } + terms.Cap = cap + } + if src.TermsHeightStart.Valid { + heightStart := uint64(src.TermsHeightStart.Int32) + terms.HeightStart = &heightStart + } + if src.TermsHeightEnd.Valid { + heightEnd := uint64(src.TermsHeightEnd.Int32) + terms.HeightEnd = &heightEnd + } + if src.TermsOffsetStart.Valid { + offsetStart := uint64(src.TermsOffsetStart.Int32) + terms.OffsetStart = &offsetStart + } + if src.TermsOffsetEnd.Valid { + offsetEnd := uint64(src.TermsOffsetEnd.Int32) + terms.OffsetEnd = &offsetEnd + } + } + etchingTxHash, err := chainhash.NewHashFromStr(src.EtchingTxHash) + if err != nil { + return runes.RuneEntry{}, errors.Wrap(err, "failed to parse etching tx hash") + } + var etchedAt time.Time + if src.EtchedAt.Valid { + etchedAt = src.EtchedAt.Time + } + return runes.RuneEntry{ + RuneId: runeId, + Number: uint64(src.Number), + Divisibility: uint8(src.Divisibility), + Premine: lo.FromPtr(premine), + SpacedRune: runes.NewSpacedRune(rune, uint32(src.Spacers)), + Symbol: src.Symbol, + Terms: terms, + Turbo: src.Turbo, + Mints: lo.FromPtr(mints), + BurnedAmount: lo.FromPtr(burnedAmount), + CompletedAt: completedAt, + CompletedAtHeight: completedAtHeight, + EtchingBlock: uint64(src.EtchingBlock), + EtchingTxHash: *etchingTxHash, + EtchedAt: etchedAt, + }, nil +} + +func mapRuneEntryTypeToParams(src runes.RuneEntry, blockHeight uint64) (gen.CreateRuneEntryParams, gen.CreateRuneEntryStateParams, error) { + runeId := src.RuneId.String() + rune := src.SpacedRune.Rune.String() + spacers := int32(src.SpacedRune.Spacers) + mints, err := numericFromUint128(&src.Mints) + if err != nil { + return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse mints") + } + burnedAmount, err := numericFromUint128(&src.BurnedAmount) + if err != nil { + return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse burned amount") + } + premine, err := numericFromUint128(&src.Premine) + if err != nil { + return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse premine") + } + var completedAt pgtype.Timestamp + if !src.CompletedAt.IsZero() { + completedAt.Time = src.CompletedAt + completedAt.Valid = true + } + var completedAtHeight pgtype.Int4 + if src.CompletedAtHeight != nil { + completedAtHeight.Int32 = int32(*src.CompletedAtHeight) + completedAtHeight.Valid = true + } + var terms bool + var termsAmount, termsCap pgtype.Numeric + var termsHeightStart, termsHeightEnd, termsOffsetStart, termsOffsetEnd pgtype.Int4 + if src.Terms != nil { + terms = true + if src.Terms.Amount != nil { + termsAmount, err = numericFromUint128(src.Terms.Amount) + if err != nil { + return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse terms amount") + } + } + if src.Terms.Cap != nil { + termsCap, err = numericFromUint128(src.Terms.Cap) + if err != nil { + return gen.CreateRuneEntryParams{}, gen.CreateRuneEntryStateParams{}, errors.Wrap(err, "failed to parse terms cap") + } + } + if src.Terms.HeightStart != nil { + termsHeightStart = pgtype.Int4{ + Int32: int32(*src.Terms.HeightStart), + Valid: true, + } + } + if src.Terms.HeightEnd != nil { + termsHeightEnd = pgtype.Int4{ + Int32: int32(*src.Terms.HeightEnd), + Valid: true, + } + } + if src.Terms.OffsetStart != nil { + termsOffsetStart = pgtype.Int4{ + Int32: int32(*src.Terms.OffsetStart), + Valid: true, + } + } + if src.Terms.OffsetEnd != nil { + termsOffsetEnd = pgtype.Int4{ + Int32: int32(*src.Terms.OffsetEnd), + Valid: true, + } + } + } + etchedAt := pgtype.Timestamp{Time: time.Time{}, Valid: true} + + return gen.CreateRuneEntryParams{ + RuneID: runeId, + Rune: rune, + Number: int64(src.Number), + Spacers: spacers, + Premine: premine, + Symbol: src.Symbol, + Divisibility: int16(src.Divisibility), + Terms: terms, + TermsAmount: termsAmount, + TermsCap: termsCap, + TermsHeightStart: termsHeightStart, + TermsHeightEnd: termsHeightEnd, + TermsOffsetStart: termsOffsetStart, + TermsOffsetEnd: termsOffsetEnd, + Turbo: src.Turbo, + EtchingBlock: int32(src.EtchingBlock), + EtchingTxHash: src.EtchingTxHash.String(), + EtchedAt: etchedAt, + }, gen.CreateRuneEntryStateParams{ + BlockHeight: int32(blockHeight), + RuneID: runeId, + Mints: mints, + BurnedAmount: burnedAmount, + CompletedAt: completedAt, + CompletedAtHeight: completedAtHeight, + }, nil +} + +// mapRuneTransactionModelToType returns params for creating a new rune transaction and (optionally) a runestone. +func mapRuneTransactionTypeToParams(src entity.RuneTransaction) (gen.CreateRuneTransactionParams, *gen.CreateRunestoneParams, error) { + var timestamp pgtype.Timestamp + if !src.Timestamp.IsZero() { + timestamp.Time = src.Timestamp + timestamp.Valid = true + } + inputsBytes, err := json.Marshal(src.Inputs) + if err != nil { + return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal inputs") + } + outputsBytes, err := json.Marshal(src.Outputs) + if err != nil { + return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal outputs") + } + mints := make(map[string]uint128.Uint128) + for key, value := range src.Mints { + mints[key.String()] = value + } + mintsBytes, err := json.Marshal(mints) + if err != nil { + return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal mints") + } + burns := make(map[string]uint128.Uint128) + for key, value := range src.Burns { + burns[key.String()] = value + } + burnsBytes, err := json.Marshal(burns) + if err != nil { + return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to marshal burns") + } + + var runestoneParams *gen.CreateRunestoneParams + if src.Runestone != nil { + params, err := mapRunestoneTypeToParams(*src.Runestone, src.Hash, src.BlockHeight) + if err != nil { + return gen.CreateRuneTransactionParams{}, nil, errors.Wrap(err, "failed to map runestone to params") + } + runestoneParams = ¶ms + } + + return gen.CreateRuneTransactionParams{ + Hash: src.Hash.String(), + BlockHeight: int32(src.BlockHeight), + Index: int32(src.Index), + Timestamp: timestamp, + Inputs: inputsBytes, + Outputs: outputsBytes, + Mints: mintsBytes, + Burns: burnsBytes, + RuneEtched: src.RuneEtched, + }, runestoneParams, nil +} + +func extractModelRuneTxAndRunestone(src gen.GetRuneTransactionsRow) (gen.RunesTransaction, *gen.RunesRunestone, error) { + var runestone *gen.RunesRunestone + if src.TxHash.Valid { + // these fields should never be null + if !src.Cenotaph.Valid { + return gen.RunesTransaction{}, nil, errors.New("runestone cenotaph is null") + } + if !src.Flaws.Valid { + return gen.RunesTransaction{}, nil, errors.New("runestone flaws is null") + } + runestone = &gen.RunesRunestone{ + TxHash: src.TxHash.String, + BlockHeight: src.BlockHeight, + Etching: src.Etching.Bool, + EtchingDivisibility: src.EtchingDivisibility, + EtchingPremine: src.EtchingPremine, + EtchingRune: src.EtchingRune, + EtchingSpacers: src.EtchingSpacers, + EtchingSymbol: src.EtchingSymbol, + EtchingTerms: src.EtchingTerms, + EtchingTermsAmount: src.EtchingTermsAmount, + EtchingTermsCap: src.EtchingTermsCap, + EtchingTermsHeightStart: src.EtchingTermsHeightStart, + EtchingTermsHeightEnd: src.EtchingTermsHeightEnd, + EtchingTermsOffsetStart: src.EtchingTermsOffsetStart, + EtchingTermsOffsetEnd: src.EtchingTermsOffsetEnd, + Edicts: src.Edicts, + Mint: src.Mint, + Pointer: src.Pointer, + Cenotaph: src.Cenotaph.Bool, + Flaws: src.Flaws.Int32, + } + } + return gen.RunesTransaction{ + Hash: src.Hash, + BlockHeight: src.BlockHeight, + Index: src.Index, + Timestamp: src.Timestamp, + Inputs: src.Inputs, + Outputs: src.Outputs, + Mints: src.Mints, + Burns: src.Burns, + RuneEtched: src.RuneEtched, + }, runestone, nil +} + +func mapRuneTransactionModelToType(src gen.RunesTransaction) (entity.RuneTransaction, error) { + hash, err := chainhash.NewHashFromStr(src.Hash) + if err != nil { + return entity.RuneTransaction{}, errors.Wrap(err, "failed to parse transaction hash") + } + var timestamp time.Time + if src.Timestamp.Valid { + timestamp = src.Timestamp.Time + } + + inputs := make([]*entity.TxInputOutput, 0) + if err := json.Unmarshal(src.Inputs, &inputs); err != nil { + return entity.RuneTransaction{}, errors.Wrap(err, "failed to unmarshal inputs") + } + outputs := make([]*entity.TxInputOutput, 0) + if err := json.Unmarshal(src.Outputs, &outputs); err != nil { + return entity.RuneTransaction{}, errors.Wrap(err, "failed to unmarshal outputs") + } + mintsRaw := make(map[string]uint128.Uint128) + if err := json.Unmarshal(src.Mints, &mintsRaw); err != nil { + return entity.RuneTransaction{}, errors.Wrap(err, "failed to unmarshal mints") + } + mints := make(map[runes.RuneId]uint128.Uint128) + for key, value := range mintsRaw { + runeId, err := runes.NewRuneIdFromString(key) + if err != nil { + return entity.RuneTransaction{}, errors.Wrap(err, "failed to parse rune id") + } + mints[runeId] = value + } + + burnsRaw := make(map[string]uint128.Uint128) + if err := json.Unmarshal(src.Burns, &burnsRaw); err != nil { + return entity.RuneTransaction{}, errors.Wrap(err, "failed to unmarshal burns") + } + burns := make(map[runes.RuneId]uint128.Uint128) + for key, value := range burnsRaw { + runeId, err := runes.NewRuneIdFromString(key) + if err != nil { + return entity.RuneTransaction{}, errors.Wrap(err, "failed to parse rune id") + } + burns[runeId] = value + } + + return entity.RuneTransaction{ + Hash: *hash, + BlockHeight: uint64(src.BlockHeight), + Index: uint32(src.Index), + Timestamp: timestamp, + Inputs: inputs, + Outputs: outputs, + Mints: mints, + Burns: burns, + RuneEtched: src.RuneEtched, + }, nil +} + +func mapRunestoneTypeToParams(src runes.Runestone, txHash chainhash.Hash, blockHeight uint64) (gen.CreateRunestoneParams, error) { + var runestoneParams gen.CreateRunestoneParams + + // TODO: optimize serialized edicts + edictsBytes, err := json.Marshal(src.Edicts) + if err != nil { + return gen.CreateRunestoneParams{}, errors.Wrap(err, "failed to marshal runestone edicts") + } + runestoneParams = gen.CreateRunestoneParams{ + TxHash: txHash.String(), + BlockHeight: int32(blockHeight), + Edicts: edictsBytes, + Cenotaph: src.Cenotaph, + Flaws: int32(src.Flaws), + } + if src.Etching != nil { + runestoneParams.Etching = true + etching := *src.Etching + if etching.Divisibility != nil { + runestoneParams.EtchingDivisibility = pgtype.Int2{Int16: int16(*etching.Divisibility), Valid: true} + } + if etching.Premine != nil { + premine, err := numericFromUint128(etching.Premine) + if err != nil { + return gen.CreateRunestoneParams{}, errors.Wrap(err, "failed to parse etching premine") + } + runestoneParams.EtchingPremine = premine + } + if etching.Rune != nil { + runestoneParams.EtchingRune = pgtype.Text{String: etching.Rune.String(), Valid: true} + } + if etching.Spacers != nil { + runestoneParams.EtchingSpacers = pgtype.Int4{Int32: int32(*etching.Spacers), Valid: true} + } + if etching.Symbol != nil { + runestoneParams.EtchingSymbol = pgtype.Int4{Int32: *etching.Symbol, Valid: true} + } + if etching.Terms != nil { + runestoneParams.EtchingTerms = pgtype.Bool{Bool: true, Valid: true} + terms := *etching.Terms + if terms.Amount != nil { + amount, err := numericFromUint128(terms.Amount) + if err != nil { + return gen.CreateRunestoneParams{}, errors.Wrap(err, "failed to parse etching terms amount") + } + runestoneParams.EtchingTermsAmount = amount + } + if terms.Cap != nil { + cap, err := numericFromUint128(terms.Cap) + if err != nil { + return gen.CreateRunestoneParams{}, errors.Wrap(err, "failed to parse etching terms cap") + } + runestoneParams.EtchingTermsCap = cap + } + if terms.HeightStart != nil { + runestoneParams.EtchingTermsHeightStart = pgtype.Int4{Int32: int32(*terms.HeightStart), Valid: true} + } + if terms.HeightEnd != nil { + runestoneParams.EtchingTermsHeightEnd = pgtype.Int4{Int32: int32(*terms.HeightEnd), Valid: true} + } + if terms.OffsetStart != nil { + runestoneParams.EtchingTermsOffsetStart = pgtype.Int4{Int32: int32(*terms.OffsetStart), Valid: true} + } + if terms.OffsetEnd != nil { + runestoneParams.EtchingTermsOffsetEnd = pgtype.Int4{Int32: int32(*terms.OffsetEnd), Valid: true} + } + } + runestoneParams.EtchingTurbo = pgtype.Bool{Bool: etching.Turbo, Valid: true} + } + if src.Mint != nil { + runestoneParams.Mint = pgtype.Text{String: src.Mint.String(), Valid: true} + } + if src.Pointer != nil { + runestoneParams.Pointer = pgtype.Int4{Int32: int32(*src.Pointer), Valid: true} + } + + return runestoneParams, nil +} + +func mapRunestoneModelToType(src gen.RunesRunestone) (runes.Runestone, error) { + runestone := runes.Runestone{ + Cenotaph: src.Cenotaph, + Flaws: runes.Flaws(src.Flaws), + } + if src.Etching { + etching := runes.Etching{} + if src.EtchingDivisibility.Valid { + divisibility := uint8(src.EtchingDivisibility.Int16) + etching.Divisibility = &divisibility + } + if src.EtchingPremine.Valid { + premine, err := uint128FromNumeric(src.EtchingPremine) + if err != nil { + return runes.Runestone{}, errors.Wrap(err, "failed to parse etching premine") + } + etching.Premine = premine + } + if src.EtchingRune.Valid { + rune, err := runes.NewRuneFromString(src.EtchingRune.String) + if err != nil { + return runes.Runestone{}, errors.Wrap(err, "failed to parse etching rune") + } + etching.Rune = &rune + } + if src.EtchingSpacers.Valid { + spacers := uint32(src.EtchingSpacers.Int32) + etching.Spacers = &spacers + } + if src.EtchingSymbol.Valid { + var symbol rune = src.EtchingSymbol.Int32 + etching.Symbol = &symbol + } + if src.EtchingTerms.Valid && src.EtchingTerms.Bool { + terms := runes.Terms{} + if src.EtchingTermsAmount.Valid { + amount, err := uint128FromNumeric(src.EtchingTermsAmount) + if err != nil { + return runes.Runestone{}, errors.Wrap(err, "failed to parse etching terms amount") + } + terms.Amount = amount + } + if src.EtchingTermsCap.Valid { + cap, err := uint128FromNumeric(src.EtchingTermsCap) + if err != nil { + return runes.Runestone{}, errors.Wrap(err, "failed to parse etching terms cap") + } + terms.Cap = cap + } + if src.EtchingTermsHeightStart.Valid { + heightStart := uint64(src.EtchingTermsHeightStart.Int32) + terms.HeightStart = &heightStart + } + if src.EtchingTermsHeightEnd.Valid { + heightEnd := uint64(src.EtchingTermsHeightEnd.Int32) + terms.HeightEnd = &heightEnd + } + if src.EtchingTermsOffsetStart.Valid { + offsetStart := uint64(src.EtchingTermsOffsetStart.Int32) + terms.OffsetStart = &offsetStart + } + if src.EtchingTermsOffsetEnd.Valid { + offsetEnd := uint64(src.EtchingTermsOffsetEnd.Int32) + terms.OffsetEnd = &offsetEnd + } + etching.Terms = &terms + } + etching.Turbo = src.EtchingTurbo.Valid && src.EtchingTurbo.Bool + runestone.Etching = &etching + } + if src.Mint.Valid { + mint, err := runes.NewRuneIdFromString(src.Mint.String) + if err != nil { + return runes.Runestone{}, errors.Wrap(err, "failed to parse mint") + } + runestone.Mint = &mint + } + if src.Pointer.Valid { + pointer := uint64(src.Pointer.Int32) + runestone.Pointer = &pointer + } + // Edicts + { + if err := json.Unmarshal(src.Edicts, &runestone.Edicts); err != nil { + return runes.Runestone{}, errors.Wrap(err, "failed to unmarshal edicts") + } + if len(runestone.Edicts) == 0 { + runestone.Edicts = nil + } + } + return runestone, nil +} + +func mapBalanceModelToType(src gen.RunesBalance) (*entity.Balance, error) { + runeId, err := runes.NewRuneIdFromString(src.RuneID) + if err != nil { + return nil, errors.Wrap(err, "failed to parse rune id") + } + amount, err := uint128FromNumeric(src.Amount) + if err != nil { + return nil, errors.Wrap(err, "failed to parse balance") + } + pkScript, err := hex.DecodeString(src.Pkscript) + if err != nil { + return nil, errors.Wrap(err, "failed to parse pkscript") + } + return &entity.Balance{ + PkScript: pkScript, + RuneId: runeId, + Amount: lo.FromPtr(amount), + BlockHeight: uint64(src.BlockHeight), + }, nil +} + +func mapIndexedBlockModelToType(src gen.RunesIndexedBlock) (*entity.IndexedBlock, error) { + hash, err := chainhash.NewHashFromStr(src.Hash) + if err != nil { + return nil, errors.Wrap(err, "failed to parse block hash") + } + prevBlockHash, err := chainhash.NewHashFromStr(src.PrevHash) + if err != nil { + return nil, errors.Wrap(err, "failed to parse prev block hash") + } + eventHash, err := chainhash.NewHashFromStr(src.EventHash) + if err != nil { + return nil, errors.Wrap(err, "failed to parse event hash") + } + cumulativeEventHash, err := chainhash.NewHashFromStr(src.CumulativeEventHash) + if err != nil { + return nil, errors.Wrap(err, "failed to parse cumulative event hash") + } + return &entity.IndexedBlock{ + Height: int64(src.Height), + Hash: *hash, + PrevHash: *prevBlockHash, + EventHash: *eventHash, + CumulativeEventHash: *cumulativeEventHash, + }, nil +} + +func mapIndexedBlockTypeToParams(src entity.IndexedBlock) (gen.CreateIndexedBlockParams, error) { + return gen.CreateIndexedBlockParams{ + Height: int32(src.Height), + Hash: src.Hash.String(), + PrevHash: src.PrevHash.String(), + EventHash: src.EventHash.String(), + CumulativeEventHash: src.CumulativeEventHash.String(), + }, nil +} + +func mapOutPointBalanceModelToType(src gen.RunesOutpointBalance) (entity.OutPointBalance, error) { + runeId, err := runes.NewRuneIdFromString(src.RuneID) + if err != nil { + return entity.OutPointBalance{}, errors.Wrap(err, "failed to parse rune id") + } + amount, err := uint128FromNumeric(src.Amount) + if err != nil { + return entity.OutPointBalance{}, errors.Wrap(err, "failed to parse balance") + } + pkScript, err := hex.DecodeString(src.Pkscript) + if err != nil { + return entity.OutPointBalance{}, errors.Wrap(err, "failed to parse pkscript") + } + txHash, err := chainhash.NewHashFromStr(src.TxHash) + if err != nil { + return entity.OutPointBalance{}, errors.Wrap(err, "failed to parse tx hash") + } + var spentHeight *uint64 + if src.SpentHeight.Valid { + spentHeight = lo.ToPtr(uint64(src.SpentHeight.Int32)) + } + return entity.OutPointBalance{ + PkScript: pkScript, + RuneId: runeId, + Amount: lo.FromPtr(amount), + OutPoint: wire.OutPoint{ + Hash: *txHash, + Index: uint32(src.TxIdx), + }, + BlockHeight: uint64(src.BlockHeight), + SpentHeight: spentHeight, + }, nil +} + +func mapOutPointBalanceTypeToParams(src entity.OutPointBalance) (gen.CreateOutPointBalancesParams, error) { + amount, err := numericFromUint128(&src.Amount) + if err != nil { + return gen.CreateOutPointBalancesParams{}, errors.Wrap(err, "failed to parse amount") + } + var spentHeight pgtype.Int4 + if src.SpentHeight != nil { + spentHeight = pgtype.Int4{Int32: int32(*src.SpentHeight), Valid: true} + } + return gen.CreateOutPointBalancesParams{ + TxHash: src.OutPoint.Hash.String(), + TxIdx: int32(src.OutPoint.Index), + Pkscript: hex.EncodeToString(src.PkScript), + RuneID: src.RuneId.String(), + Amount: amount, + BlockHeight: int32(src.BlockHeight), + SpentHeight: spentHeight, + }, nil +} diff --git a/modules/runes/repository/postgres/mapper_test.go b/modules/runes/repository/postgres/mapper_test.go new file mode 100644 index 0000000..4c7bc0e --- /dev/null +++ b/modules/runes/repository/postgres/mapper_test.go @@ -0,0 +1,61 @@ +package postgres + +import ( + "testing" + + "github.com/gaze-network/uint128" + "github.com/jackc/pgx/v5/pgtype" + "github.com/stretchr/testify/assert" +) + +func TestUint128FromNumeric(t *testing.T) { + t.Run("normal", func(t *testing.T) { + numeric := pgtype.Numeric{} + numeric.ScanInt64(pgtype.Int8{ + Int64: 1000, + Valid: true, + }) + + expected := uint128.From64(1000) + + result, err := uint128FromNumeric(numeric) + assert.NoError(t, err) + assert.Equal(t, &expected, result) + }) + t.Run("nil", func(t *testing.T) { + numeric := pgtype.Numeric{} + numeric.ScanInt64(pgtype.Int8{ + Valid: false, + }) + + result, err := uint128FromNumeric(numeric) + assert.NoError(t, err) + assert.Nil(t, result) + }) +} + +func TestNumericFromUint128(t *testing.T) { + t.Run("normal", func(t *testing.T) { + u128 := uint128.From64(1) + + expected := pgtype.Numeric{} + expected.ScanInt64(pgtype.Int8{ + Int64: 1, + Valid: true, + }) + + result, err := numericFromUint128(&u128) + assert.NoError(t, err) + assert.Equal(t, expected, result) + }) + t.Run("nil", func(t *testing.T) { + expected := pgtype.Numeric{} + expected.ScanInt64(pgtype.Int8{ + Valid: false, + }) + + result, err := numericFromUint128(nil) + assert.NoError(t, err) + assert.Equal(t, expected, result) + }) +} diff --git a/modules/runes/repository/postgres/postgres.go b/modules/runes/repository/postgres/postgres.go new file mode 100644 index 0000000..d2a841c --- /dev/null +++ b/modules/runes/repository/postgres/postgres.go @@ -0,0 +1,20 @@ +package postgres + +import ( + "github.com/gaze-network/indexer-network/internal/postgres" + "github.com/gaze-network/indexer-network/modules/runes/repository/postgres/gen" + "github.com/jackc/pgx/v5" +) + +type Repository struct { + db postgres.DB + queries *gen.Queries + tx pgx.Tx +} + +func NewRepository(db postgres.DB) *Repository { + return &Repository{ + db: db, + queries: gen.New(db), + } +} diff --git a/modules/runes/repository/postgres/runes.go b/modules/runes/repository/postgres/runes.go new file mode 100644 index 0000000..bf495a9 --- /dev/null +++ b/modules/runes/repository/postgres/runes.go @@ -0,0 +1,483 @@ +package postgres + +import ( + "context" + "encoding/hex" + "fmt" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/modules/runes/datagateway" + "github.com/gaze-network/indexer-network/modules/runes/internal/entity" + "github.com/gaze-network/indexer-network/modules/runes/repository/postgres/gen" + "github.com/gaze-network/indexer-network/modules/runes/runes" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" + "github.com/samber/lo" +) + +var _ datagateway.RunesDataGateway = (*Repository)(nil) + +// warning: GetLatestBlock currently returns a types.BlockHeader with only Height, Hash, and PrevBlock fields populated. +// This is because it is known that all usage of this function only requires these fields. In the future, we may want to populate all fields for type safety. +func (r *Repository) GetLatestBlock(ctx context.Context) (types.BlockHeader, error) { + block, err := r.queries.GetLatestIndexedBlock(ctx) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return types.BlockHeader{}, errors.WithStack(errs.NotFound) + } + return types.BlockHeader{}, errors.Wrap(err, "error during query") + } + hash, err := chainhash.NewHashFromStr(block.Hash) + if err != nil { + return types.BlockHeader{}, errors.Wrap(err, "failed to parse block hash") + } + prevHash, err := chainhash.NewHashFromStr(block.PrevHash) + if err != nil { + return types.BlockHeader{}, errors.Wrap(err, "failed to parse prev block hash") + } + return types.BlockHeader{ + Height: int64(block.Height), + Hash: *hash, + PrevBlock: *prevHash, + }, nil +} + +func (r *Repository) GetIndexedBlockByHeight(ctx context.Context, height int64) (*entity.IndexedBlock, error) { + indexedBlockModel, err := r.queries.GetIndexedBlockByHeight(ctx, int32(height)) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errors.WithStack(errs.NotFound) + } + return nil, errors.Wrap(err, "error during query") + } + + indexedBlock, err := mapIndexedBlockModelToType(indexedBlockModel) + if err != nil { + return nil, errors.Wrap(err, "failed to parse indexed block model") + } + return indexedBlock, nil +} + +func (r *Repository) GetRuneTransactions(ctx context.Context, pkScript []byte, runeId runes.RuneId, height uint64) ([]*entity.RuneTransaction, error) { + pkScriptParam := []byte(fmt.Sprintf(`[{"pkScript":"%s"}]`, hex.EncodeToString(pkScript))) + runeIdParam := []byte(fmt.Sprintf(`[{"runeId":"%s"}]`, runeId.String())) + rows, err := r.queries.GetRuneTransactions(ctx, gen.GetRuneTransactionsParams{ + FilterPkScript: pkScript != nil, + PkScriptParam: pkScriptParam, + + FilterRuneID: runeId != runes.RuneId{}, + RuneIDParam: runeIdParam, + RuneID: []byte(runeId.String()), + RuneIDBlockHeight: int32(runeId.BlockHeight), + RuneIDTxIndex: int32(runeId.TxIndex), + + BlockHeight: int32(height), + }) + if err != nil { + return nil, errors.Wrap(err, "error during query") + } + + runeTxs := make([]*entity.RuneTransaction, 0, len(rows)) + for _, row := range rows { + runeTxModel, runestoneModel, err := extractModelRuneTxAndRunestone(row) + if err != nil { + return nil, errors.Wrap(err, "failed to extract rune transaction and runestone from row") + } + + runeTx, err := mapRuneTransactionModelToType(runeTxModel) + if err != nil { + return nil, errors.Wrap(err, "failed to parse rune transaction model") + } + if runestoneModel != nil { + runestone, err := mapRunestoneModelToType(*runestoneModel) + if err != nil { + return nil, errors.Wrap(err, "failed to parse runestone model") + } + runeTx.Runestone = &runestone + } + runeTxs = append(runeTxs, &runeTx) + } + return runeTxs, nil +} + +func (r *Repository) GetRunesBalancesAtOutPoint(ctx context.Context, outPoint wire.OutPoint) (map[runes.RuneId]*entity.OutPointBalance, error) { + balances, err := r.queries.GetOutPointBalancesAtOutPoint(ctx, gen.GetOutPointBalancesAtOutPointParams{ + TxHash: outPoint.Hash.String(), + TxIdx: int32(outPoint.Index), + }) + if err != nil { + return nil, errors.Wrap(err, "error during query") + } + + result := make(map[runes.RuneId]*entity.OutPointBalance, len(balances)) + for _, balanceModel := range balances { + balance, err := mapOutPointBalanceModelToType(balanceModel) + if err != nil { + return nil, errors.Wrap(err, "failed to parse balance model") + } + result[balance.RuneId] = &balance + } + return result, nil +} + +func (r *Repository) GetUnspentOutPointBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) ([]*entity.OutPointBalance, error) { + balances, err := r.queries.GetUnspentOutPointBalancesByPkScript(ctx, gen.GetUnspentOutPointBalancesByPkScriptParams{ + Pkscript: hex.EncodeToString(pkScript), + BlockHeight: int32(blockHeight), + }) + if err != nil { + return nil, errors.Wrap(err, "error during query") + } + + result := make([]*entity.OutPointBalance, 0, len(balances)) + for _, balanceModel := range balances { + balance, err := mapOutPointBalanceModelToType(balanceModel) + if err != nil { + return nil, errors.Wrap(err, "failed to parse balance model") + } + result = append(result, &balance) + } + return result, nil +} + +func (r *Repository) GetRuneIdFromRune(ctx context.Context, rune runes.Rune) (runes.RuneId, error) { + runeIdStr, err := r.queries.GetRuneIdFromRune(ctx, rune.String()) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return runes.RuneId{}, errors.WithStack(errs.NotFound) + } + return runes.RuneId{}, errors.Wrap(err, "error during query") + } + runeId, err := runes.NewRuneIdFromString(runeIdStr) + if err != nil { + return runes.RuneId{}, errors.Wrap(err, "failed to parse RuneId") + } + return runeId, nil +} + +func (r *Repository) GetRuneEntryByRuneId(ctx context.Context, runeId runes.RuneId) (*runes.RuneEntry, error) { + runeEntries, err := r.GetRuneEntryByRuneIdBatch(ctx, []runes.RuneId{runeId}) + if err != nil { + return nil, errors.Wrap(err, "failed to get rune entries by rune id") + } + runeEntry, ok := runeEntries[runeId] + if !ok { + return nil, errors.WithStack(errs.NotFound) + } + return runeEntry, nil +} + +func (r *Repository) GetRuneEntryByRuneIdBatch(ctx context.Context, runeIds []runes.RuneId) (map[runes.RuneId]*runes.RuneEntry, error) { + rows, err := r.queries.GetRuneEntriesByRuneIds(ctx, lo.Map(runeIds, func(runeId runes.RuneId, _ int) string { + return runeId.String() + })) + if err != nil { + return nil, errors.Wrap(err, "error during query") + } + + runeEntries := make(map[runes.RuneId]*runes.RuneEntry, len(rows)) + var errs []error + for i, runeEntryModel := range rows { + runeEntry, err := mapRuneEntryModelToType(runeEntryModel) + if err != nil { + errs = append(errs, errors.Wrapf(err, "failed to parse rune entry model index %d", i)) + continue + } + runeEntries[runeEntry.RuneId] = &runeEntry + } + if len(errs) > 0 { + return nil, errors.Join(errs...) + } + + return runeEntries, nil +} + +func (r *Repository) GetRuneEntryByRuneIdAndHeight(ctx context.Context, runeId runes.RuneId, blockHeight uint64) (*runes.RuneEntry, error) { + runeEntries, err := r.GetRuneEntryByRuneIdBatch(ctx, []runes.RuneId{runeId}) + if err != nil { + return nil, errors.Wrap(err, "failed to get rune entries by rune id") + } + runeEntry, ok := runeEntries[runeId] + if !ok { + return nil, errors.WithStack(errs.NotFound) + } + return runeEntry, nil +} + +func (r *Repository) GetRuneEntryByRuneIdAndHeightBatch(ctx context.Context, runeIds []runes.RuneId, blockHeight uint64) (map[runes.RuneId]*runes.RuneEntry, error) { + rows, err := r.queries.GetRuneEntriesByRuneIdsAndHeight(ctx, gen.GetRuneEntriesByRuneIdsAndHeightParams{ + RuneIds: lo.Map(runeIds, func(runeId runes.RuneId, _ int) string { + return runeId.String() + }), + Height: int32(blockHeight), + }) + if err != nil { + return nil, errors.Wrap(err, "error during query") + } + + runeEntries := make(map[runes.RuneId]*runes.RuneEntry, len(rows)) + var errs []error + for i, runeEntryModel := range rows { + runeEntry, err := mapRuneEntryModelToType(gen.GetRuneEntriesByRuneIdsRow(runeEntryModel)) + if err != nil { + errs = append(errs, errors.Wrapf(err, "failed to parse rune entry model index %d", i)) + continue + } + runeEntries[runeEntry.RuneId] = &runeEntry + } + if len(errs) > 0 { + return nil, errors.Join(errs...) + } + + return runeEntries, nil +} + +func (r *Repository) CountRuneEntries(ctx context.Context) (uint64, error) { + count, err := r.queries.CountRuneEntries(ctx) + if err != nil { + return 0, errors.Wrap(err, "error during query") + } + return uint64(count), nil +} + +func (r *Repository) GetBalancesByPkScript(ctx context.Context, pkScript []byte, blockHeight uint64) (map[runes.RuneId]*entity.Balance, error) { + balances, err := r.queries.GetBalancesByPkScript(ctx, gen.GetBalancesByPkScriptParams{ + Pkscript: hex.EncodeToString(pkScript), + BlockHeight: int32(blockHeight), + }) + if err != nil { + return nil, errors.Wrap(err, "error during query") + } + + result := make(map[runes.RuneId]*entity.Balance, len(balances)) + for _, balanceModel := range balances { + balance, err := mapBalanceModelToType(gen.RunesBalance(balanceModel)) + if err != nil { + return nil, errors.Wrap(err, "failed to parse balance model") + } + result[balance.RuneId] = balance + } + return result, nil +} + +func (r *Repository) GetBalancesByRuneId(ctx context.Context, runeId runes.RuneId, blockHeight uint64) ([]*entity.Balance, error) { + balances, err := r.queries.GetBalancesByRuneId(ctx, gen.GetBalancesByRuneIdParams{ + RuneID: runeId.String(), + BlockHeight: int32(blockHeight), + }) + if err != nil { + return nil, errors.Wrap(err, "error during query") + } + + result := make([]*entity.Balance, 0, len(balances)) + for _, balanceModel := range balances { + balance, err := mapBalanceModelToType(gen.RunesBalance(balanceModel)) + if err != nil { + return nil, errors.Wrap(err, "failed to parse balance model") + } + result = append(result, balance) + } + return result, nil +} + +func (r *Repository) GetBalanceByPkScriptAndRuneId(ctx context.Context, pkScript []byte, runeId runes.RuneId, blockHeight uint64) (*entity.Balance, error) { + balance, err := r.queries.GetBalanceByPkScriptAndRuneId(ctx, gen.GetBalanceByPkScriptAndRuneIdParams{ + Pkscript: hex.EncodeToString(pkScript), + RuneID: runeId.String(), + BlockHeight: int32(blockHeight), + }) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return nil, errors.WithStack(errs.NotFound) + } + return nil, errors.Wrap(err, "error during query") + } + + result, err := mapBalanceModelToType(balance) + if err != nil { + return nil, errors.Wrap(err, "failed to parse balance model") + } + return result, nil +} + +func (r *Repository) CreateRuneTransaction(ctx context.Context, tx *entity.RuneTransaction) error { + if tx == nil { + return nil + } + txParams, runestoneParams, err := mapRuneTransactionTypeToParams(*tx) + if err != nil { + return errors.Wrap(err, "failed to map rune transaction to params") + } + if err = r.queries.CreateRuneTransaction(ctx, txParams); err != nil { + return errors.Wrap(err, "error during exec CreateRuneTransaction") + } + if runestoneParams != nil { + if err = r.queries.CreateRunestone(ctx, *runestoneParams); err != nil { + return errors.Wrap(err, "error during exec CreateRunestone") + } + } + return nil +} + +func (r *Repository) CreateRuneEntry(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error { + if entry == nil { + return nil + } + createParams, _, err := mapRuneEntryTypeToParams(*entry, blockHeight) + if err != nil { + return errors.Wrap(err, "failed to map rune entry to params") + } + if err = r.queries.CreateRuneEntry(ctx, createParams); err != nil { + return errors.Wrap(err, "error during exec CreateRuneEntry") + } + return nil +} + +func (r *Repository) CreateRuneEntryState(ctx context.Context, entry *runes.RuneEntry, blockHeight uint64) error { + if entry == nil { + return nil + } + _, createStateParams, err := mapRuneEntryTypeToParams(*entry, blockHeight) + if err != nil { + return errors.Wrap(err, "failed to map rune entry to params") + } + if err = r.queries.CreateRuneEntryState(ctx, createStateParams); err != nil { + return errors.Wrap(err, "error during exec CreateRuneEntryState") + } + return nil +} + +func (r *Repository) CreateOutPointBalances(ctx context.Context, outPointBalances []*entity.OutPointBalance) error { + params := make([]gen.CreateOutPointBalancesParams, 0, len(outPointBalances)) + for _, balance := range outPointBalances { + param, err := mapOutPointBalanceTypeToParams(*balance) + if err != nil { + return errors.Wrap(err, "failed to map outpoint balance to params") + } + params = append(params, param) + } + result := r.queries.CreateOutPointBalances(ctx, params) + var execErrors []error + result.Exec(func(i int, err error) { + if err != nil { + execErrors = append(execErrors, err) + } + }) + if len(execErrors) > 0 { + return errors.Wrap(errors.Join(execErrors...), "error during exec") + } + return nil +} + +func (r *Repository) SpendOutPointBalances(ctx context.Context, outPoint wire.OutPoint, blockHeight uint64) error { + if err := r.queries.SpendOutPointBalances(ctx, gen.SpendOutPointBalancesParams{ + TxHash: outPoint.Hash.String(), + TxIdx: int32(outPoint.Index), + SpentHeight: pgtype.Int4{Int32: int32(blockHeight), Valid: true}, + }); err != nil { + return errors.Wrap(err, "error during exec") + } + return nil +} + +func (r *Repository) CreateRuneBalances(ctx context.Context, params []datagateway.CreateRuneBalancesParams) error { + insertParams := make([]gen.CreateRuneBalanceAtBlockParams, 0, len(params)) + for _, param := range params { + param := param + amount, err := numericFromUint128(¶m.Balance) + if err != nil { + return errors.Wrap(err, "failed to convert balance to numeric") + } + insertParams = append(insertParams, gen.CreateRuneBalanceAtBlockParams{ + Pkscript: hex.EncodeToString(param.PkScript), + BlockHeight: int32(param.BlockHeight), + RuneID: param.RuneId.String(), + Amount: amount, + }) + } + result := r.queries.CreateRuneBalanceAtBlock(ctx, insertParams) + var execErrors []error + result.Exec(func(i int, err error) { + if err != nil { + execErrors = append(execErrors, err) + } + }) + if len(execErrors) > 0 { + return errors.Wrap(errors.Join(execErrors...), "error during exec") + } + return nil +} + +func (r *Repository) CreateIndexedBlock(ctx context.Context, block *entity.IndexedBlock) error { + if block == nil { + return nil + } + params, err := mapIndexedBlockTypeToParams(*block) + if err != nil { + return errors.Wrap(err, "failed to map indexed block to params") + } + if err = r.queries.CreateIndexedBlock(ctx, params); err != nil { + return errors.Wrap(err, "error during exec") + } + return nil +} + +func (r *Repository) DeleteIndexedBlockSinceHeight(ctx context.Context, height uint64) error { + if err := r.queries.DeleteIndexedBlockSinceHeight(ctx, int32(height)); err != nil { + return errors.Wrap(err, "error during exec") + } + return nil +} + +func (r *Repository) DeleteRuneEntriesSinceHeight(ctx context.Context, height uint64) error { + if err := r.queries.DeleteRuneEntriesSinceHeight(ctx, int32(height)); err != nil { + return errors.Wrap(err, "error during exec") + } + return nil +} + +func (r *Repository) DeleteRuneEntryStatesSinceHeight(ctx context.Context, height uint64) error { + if err := r.queries.DeleteRuneEntryStatesSinceHeight(ctx, int32(height)); err != nil { + return errors.Wrap(err, "error during exec") + } + return nil +} + +func (r *Repository) DeleteRuneTransactionsSinceHeight(ctx context.Context, height uint64) error { + if err := r.queries.DeleteRuneTransactionsSinceHeight(ctx, int32(height)); err != nil { + return errors.Wrap(err, "error during exec") + } + return nil +} + +func (r *Repository) DeleteRunestonesSinceHeight(ctx context.Context, height uint64) error { + if err := r.queries.DeleteRunestonesSinceHeight(ctx, int32(height)); err != nil { + return errors.Wrap(err, "error during exec") + } + return nil +} + +func (r *Repository) DeleteOutPointBalancesSinceHeight(ctx context.Context, height uint64) error { + if err := r.queries.DeleteOutPointBalancesSinceHeight(ctx, int32(height)); err != nil { + return errors.Wrap(err, "error during exec") + } + return nil +} + +func (r *Repository) UnspendOutPointBalancesSinceHeight(ctx context.Context, height uint64) error { + if err := r.queries.UnspendOutPointBalancesSinceHeight(ctx, pgtype.Int4{Int32: int32(height), Valid: true}); err != nil { + return errors.Wrap(err, "error during exec") + } + return nil +} + +func (r *Repository) DeleteRuneBalancesSinceHeight(ctx context.Context, height uint64) error { + if err := r.queries.DeleteRuneBalancesSinceHeight(ctx, int32(height)); err != nil { + return errors.Wrap(err, "error during exec") + } + return nil +} diff --git a/modules/runes/repository/postgres/tx.go b/modules/runes/repository/postgres/tx.go new file mode 100644 index 0000000..649e6c4 --- /dev/null +++ b/modules/runes/repository/postgres/tx.go @@ -0,0 +1,62 @@ +package postgres + +import ( + "context" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/modules/runes/datagateway" + "github.com/gaze-network/indexer-network/pkg/logger" + "github.com/jackc/pgx" +) + +var ErrTxAlreadyExists = errors.New("Transaction already exists. Call Commit() or Rollback() first.") + +func (r *Repository) begin(ctx context.Context) (*Repository, error) { + if r.tx != nil { + return nil, errors.WithStack(ErrTxAlreadyExists) + } + tx, err := r.db.Begin(ctx) + if err != nil { + return nil, errors.Wrap(err, "failed to begin transaction") + } + return &Repository{ + db: r.db, + queries: r.queries.WithTx(tx), + tx: tx, + }, nil +} + +func (r *Repository) BeginRunesTx(ctx context.Context) (datagateway.RunesDataGatewayWithTx, error) { + repo, err := r.begin(ctx) + if err != nil { + return nil, errors.WithStack(err) + } + return repo, nil +} + +func (r *Repository) Commit(ctx context.Context) error { + if r.tx == nil { + return nil + } + err := r.tx.Commit(ctx) + if err != nil { + return errors.Wrap(err, "failed to commit transaction") + } + r.tx = nil + return nil +} + +func (r *Repository) Rollback(ctx context.Context) error { + if r.tx == nil { + return nil + } + err := r.tx.Rollback(ctx) + if err != nil && !errors.Is(err, pgx.ErrTxClosed) { + return errors.Wrap(err, "failed to rollback transaction") + } + if err == nil { + logger.DebugContext(ctx, "rolled back transaction") + } + r.tx = nil + return nil +} diff --git a/modules/runes/runes/edict.go b/modules/runes/runes/edict.go new file mode 100644 index 0000000..c37b4c1 --- /dev/null +++ b/modules/runes/runes/edict.go @@ -0,0 +1,11 @@ +package runes + +import ( + "github.com/gaze-network/uint128" +) + +type Edict struct { + Amount uint128.Uint128 + Id RuneId + Output int +} diff --git a/modules/runes/runes/etching.go b/modules/runes/runes/etching.go new file mode 100644 index 0000000..7337ba7 --- /dev/null +++ b/modules/runes/runes/etching.go @@ -0,0 +1,64 @@ +package runes + +import ( + "github.com/Cleverse/go-utilities/utils" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/uint128" + "github.com/samber/lo" +) + +type Terms struct { + // Amount of the rune to be minted per transaction + Amount *uint128.Uint128 + // Number of allowed mints + Cap *uint128.Uint128 + // Block height at which the rune can start being minted. If both HeightStart and OffsetStart are set, use the higher value. + HeightStart *uint64 + // Block height at which the rune can no longer be minted. If both HeightEnd and OffsetEnd are set, use the lower value. + HeightEnd *uint64 + // Offset from etched block at which the rune can start being minted. If both HeightStart and OffsetStart are set, use the higher value. + OffsetStart *uint64 + // Offset from etched block at which the rune can no longer be minted. If both HeightEnd and OffsetEnd are set, use the lower value. + OffsetEnd *uint64 +} + +type Etching struct { + // Number of decimals when displaying the rune + Divisibility *uint8 + // Number of runes to be minted during etching + Premine *uint128.Uint128 + // Rune name + Rune *Rune + // Bitmap of spacers to be displayed between each letter of the rune name + Spacers *uint32 + // Single Unicode codepoint to represent the rune + Symbol *rune + // Minting terms. If not provided, the rune is not mintable. + Terms *Terms + // Whether to opt-in to future protocol changes, whatever they may be + Turbo bool +} + +const ( + maxDivisibility uint8 = 38 + maxSpacers uint32 = 0b00000111_11111111_11111111_11111111 +) + +func (e Etching) Supply() (uint128.Uint128, error) { + terms := utils.Default(e.Terms, &Terms{}) + + amount := lo.FromPtr(terms.Amount) + cap := lo.FromPtr(terms.Cap) + premine := lo.FromPtr(e.Premine) + + result, overflow := amount.MulOverflow(cap) + if overflow { + return uint128.Uint128{}, errors.WithStack(errs.OverflowUint128) + } + result, overflow = result.AddOverflow(premine) + if overflow { + return uint128.Uint128{}, errors.WithStack(errs.OverflowUint128) + } + return result, nil +} diff --git a/modules/runes/runes/etching_test.go b/modules/runes/runes/etching_test.go new file mode 100644 index 0000000..80ed64d --- /dev/null +++ b/modules/runes/runes/etching_test.go @@ -0,0 +1,123 @@ +package runes + +import ( + "fmt" + "strings" + "testing" + + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/uint128" + "github.com/samber/lo" + "github.com/stretchr/testify/assert" +) + +func TestMaxSpacers(t *testing.T) { + maxRune := Rune(uint128.Max) + var sb strings.Builder + for i, c := range maxRune.String() { + if i > 0 { + sb.WriteRune('•') + } + sb.WriteRune(c) + } + spacedRune, err := NewSpacedRuneFromString(sb.String()) + assert.NoError(t, err) + assert.Equal(t, maxSpacers, spacedRune.Spacers) +} + +func TestSupply(t *testing.T) { + testNumber := 0 + test := func(e Etching, expectedSupply uint128.Uint128) { + t.Run(fmt.Sprintf("case_%d", testNumber), func(t *testing.T) { + t.Parallel() + actualSupply, err := e.Supply() + assert.NoError(t, err) + assert.Equal(t, expectedSupply, actualSupply) + }) + testNumber++ + } + testError := func(e Etching, expectedError error) { + t.Run(fmt.Sprintf("case_%d", testNumber), func(t *testing.T) { + t.Parallel() + _, err := e.Supply() + assert.ErrorIs(t, err, expectedError) + }) + testNumber++ + } + + test(Etching{}, uint128.From64(0)) + + test(Etching{ + Premine: lo.ToPtr(uint128.From64(0)), + Terms: nil, + }, uint128.From64(0)) + + test(Etching{ + Premine: lo.ToPtr(uint128.From64(1)), + Terms: nil, + }, uint128.From64(1)) + + test(Etching{ + Premine: lo.ToPtr(uint128.From64(1)), + Terms: &Terms{ + Amount: lo.ToPtr(uint128.From64(0)), + Cap: lo.ToPtr(uint128.From64(0)), + }, + }, uint128.From64(1)) + + test(Etching{ + Premine: lo.ToPtr(uint128.From64(1000)), + Terms: &Terms{ + Amount: lo.ToPtr(uint128.From64(100)), + Cap: lo.ToPtr(uint128.From64(10)), + }, + }, uint128.From64(2000)) + + test(Etching{ + Premine: lo.ToPtr(uint128.From64(0)), + Terms: &Terms{ + Amount: lo.ToPtr(uint128.From64(100)), + Cap: lo.ToPtr(uint128.From64(10)), + }, + }, uint128.From64(1000)) + + test(Etching{ + Premine: lo.ToPtr(uint128.From64(1000)), + Terms: &Terms{ + Amount: lo.ToPtr(uint128.From64(100)), + Cap: lo.ToPtr(uint128.From64(0)), + }, + }, uint128.From64(1000)) + + test(Etching{ + Premine: lo.ToPtr(uint128.From64(1000)), + Terms: &Terms{ + Amount: lo.ToPtr(uint128.From64(0)), + Cap: lo.ToPtr(uint128.From64(10)), + }, + }, uint128.From64(1000)) + + test(Etching{ + Premine: lo.ToPtr(uint128.Max.Div64(2).Add64(1)), + Terms: &Terms{ + Amount: lo.ToPtr(uint128.From64(1)), + Cap: lo.ToPtr(uint128.Max.Div64(2)), + }, + }, uint128.Max) + + test(Etching{ + Premine: lo.ToPtr(uint128.From64(0)), + Terms: &Terms{ + Amount: lo.ToPtr(uint128.From64(1)), + Cap: lo.ToPtr(uint128.Max), + }, + }, uint128.Max) + + testError(Etching{ + Premine: lo.ToPtr(uint128.Max), + Terms: &Terms{ + Amount: lo.ToPtr(uint128.From64(1)), + Cap: lo.ToPtr(uint128.From64(1)), + }, + }, errs.OverflowUint128) +} diff --git a/modules/runes/runes/flag.go b/modules/runes/runes/flag.go new file mode 100644 index 0000000..e45df1c --- /dev/null +++ b/modules/runes/runes/flag.go @@ -0,0 +1,77 @@ +package runes + +import ( + "math/big" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/uint128" +) + +// Flag represents a single flag that can be set on a runestone. +type Flag uint8 + +const ( + FlagEtching = Flag(0) + FlagTerms = Flag(1) + FlagTurbo = Flag(2) + FlagCenotaph = Flag(127) +) + +func (f Flag) Mask() Flags { + return Flags(uint128.From64(1).Lsh(uint(f))) +} + +// Flags is a bitmask of flags that can be set on a runestone. +type Flags uint128.Uint128 + +func (f Flags) Uint128() uint128.Uint128 { + return uint128.Uint128(f) +} + +func (f Flags) And(other Flags) Flags { + return Flags(f.Uint128().And(other.Uint128())) +} + +func (f Flags) Or(other Flags) Flags { + return Flags(f.Uint128().Or(other.Uint128())) +} + +func ParseFlags(input interface{}) (Flags, error) { + switch input := input.(type) { + case Flags: + return input, nil + case uint128.Uint128: + return Flags(input), nil + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + return Flags(uint128.From64(input.(uint64))), nil + case big.Int: + u128, err := uint128.FromBig(&input) + if err != nil { + return Flags{}, errors.Join(err, errs.OverflowUint128) + } + return Flags(u128), nil + case *big.Int: + u128, err := uint128.FromBig(input) + if err != nil { + return Flags{}, errors.Join(err, errs.OverflowUint128) + } + return Flags(u128), nil + default: + panic("invalid flags input type") + } +} + +func (f *Flags) Take(flag Flag) bool { + found := !f.And(flag.Mask()).Uint128().Equals64(0) + if found { + // f = f - (1 << flag) + *f = Flags(f.Uint128().Sub(flag.Mask().Uint128())) + } + return found +} + +func (f *Flags) Set(flag Flag) { + // f = f | (1 << flag) + *f = Flags(f.Uint128().Or(flag.Mask().Uint128())) +} diff --git a/modules/runes/runes/flaw.go b/modules/runes/runes/flaw.go new file mode 100644 index 0000000..a2c4013 --- /dev/null +++ b/modules/runes/runes/flaw.go @@ -0,0 +1,60 @@ +package runes + +type FlawFlag int + +const ( + FlawFlagEdictOutput FlawFlag = iota + FlawFlagEdictRuneId + FlawFlagInvalidScript + FlawFlagOpCode + FlawFlagSupplyOverflow + FlawFlagTrailingIntegers + FlawFlagTruncatedField + FlawFlagUnrecognizedEvenTag + FlawFlagUnrecognizedFlag + FlawFlagVarInt +) + +func (f FlawFlag) Mask() Flaws { + return 1 << f +} + +var flawMessages = map[FlawFlag]string{ + FlawFlagEdictOutput: "edict output greater than transaction output count", + FlawFlagEdictRuneId: "invalid runeId in edict", + FlawFlagInvalidScript: "invalid script in OP_RETURN", + FlawFlagOpCode: "non-pushdata opcode in OP_RETURN", + FlawFlagSupplyOverflow: "supply overflows uint128", + FlawFlagTrailingIntegers: "trailing integers in body", + FlawFlagTruncatedField: "field with missing value", + FlawFlagUnrecognizedEvenTag: "unrecognized even tag", + FlawFlagUnrecognizedFlag: "unrecognized field", + FlawFlagVarInt: "invalid varint", +} + +func (f FlawFlag) String() string { + return flawMessages[f] +} + +// Flaws is a bitmask of flaws that caused a runestone to be a cenotaph. +type Flaws uint32 + +func (f Flaws) Collect() []FlawFlag { + var flags []FlawFlag + // collect from list of all flags + for flag := range flawMessages { + if f&flag.Mask() != 0 { + flags = append(flags, flag) + } + } + return flags +} + +func (f Flaws) CollectAsString() []string { + flawFlags := f.Collect() + flawMsgs := make([]string, 0, len(flawFlags)) + for _, flag := range flawFlags { + flawMsgs = append(flawMsgs, flag.String()) + } + return flawMsgs +} diff --git a/modules/runes/runes/message.go b/modules/runes/runes/message.go new file mode 100644 index 0000000..7b63995 --- /dev/null +++ b/modules/runes/runes/message.go @@ -0,0 +1,94 @@ +package runes + +import ( + "math" + + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/uint128" + "github.com/samber/lo" +) + +type Message struct { + Fields Fields + Edicts []Edict + Flaws Flaws +} + +type Fields map[Tag][]uint128.Uint128 + +func (fields Fields) Take(tag Tag) *uint128.Uint128 { + values, ok := fields[tag] + if !ok { + return nil + } + first := values[0] + values = values[1:] + if len(values) == 0 { + delete(fields, tag) + } else { + fields[tag] = values + } + return &first +} + +func MessageFromIntegers(tx *types.Transaction, payload []uint128.Uint128) Message { + flaws := Flaws(0) + var edicts []Edict + fields := make(map[Tag][]uint128.Uint128) + + for i := 0; i < len(payload); i += 2 { + tag, err := ParseTag(payload[i]) + if err != nil { + continue + } + + // If tag is Body, treat all remaining integers are edicts + if tag == TagBody { + runeId := RuneId{} + for _, chunk := range lo.Chunk(payload[i+1:], 4) { + if len(chunk) != 4 { + flaws |= FlawFlagTrailingIntegers.Mask() + break + } + blockDelta, txIndexDelta, amount, output := chunk[0], chunk[1], chunk[2], chunk[3] + if blockDelta.Cmp64(math.MaxUint64) > 0 || txIndexDelta.Cmp64(math.MaxUint32) > 0 { + flaws |= FlawFlagEdictRuneId.Mask() + break + } + if output.Cmp64(uint64(len(tx.TxOut))) > 0 { + flaws |= FlawFlagEdictOutput.Mask() + break + } + runeId, err = runeId.Next(blockDelta.Uint64(), txIndexDelta.Uint32()) // safe to cast as uint32 because we checked + if err != nil { + flaws |= FlawFlagEdictRuneId.Mask() + break + } + edict := Edict{ + Id: runeId, + Amount: amount, + Output: int(output.Uint64()), + } + edicts = append(edicts, edict) + } + break + } + + // append tag value to fields + if i+1 >= len(payload) { + flaws |= FlawFlagTruncatedField.Mask() + break + } + value := payload[i+1] + if _, ok := fields[tag]; !ok { + fields[tag] = make([]uint128.Uint128, 0) + } + fields[tag] = append(fields[tag], value) + } + + return Message{ + Flaws: flaws, + Edicts: edicts, + Fields: fields, + } +} diff --git a/modules/runes/runes/rune.go b/modules/runes/runes/rune.go new file mode 100644 index 0000000..5ccea7b --- /dev/null +++ b/modules/runes/runes/rune.go @@ -0,0 +1,175 @@ +package runes + +import ( + "slices" + + "github.com/Cleverse/go-utilities/utils" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common" + "github.com/gaze-network/uint128" +) + +type Rune uint128.Uint128 + +func (r Rune) Uint128() uint128.Uint128 { + return uint128.Uint128(r) +} + +func NewRune(value uint64) Rune { + return Rune(uint128.From64(value)) +} + +func NewRuneFromUint128(value uint128.Uint128) Rune { + return Rune(value) +} + +var ErrInvalidBase26 = errors.New("invalid base-26 character: must be in the range [A-Z]") + +// NewRuneFromString creates a new Rune from a string of modified base-26 integer +func NewRuneFromString(value string) (Rune, error) { + n := uint128.From64(0) + for i, char := range value { + if i > 0 { + n = n.Add(uint128.From64(1)) + } + n = n.Mul(uint128.From64(26)) + if char < 'A' || char > 'Z' { + return Rune{}, ErrInvalidBase26 + } + n = n.Add(uint128.From64(uint64(char - 'A'))) + } + return Rune(n), nil +} + +var firstReservedRune = NewRuneFromUint128(utils.Must(uint128.FromString("6402364363415443603228541259936211926"))) + +var unlockSteps = []uint128.Uint128{ + utils.Must(uint128.FromString("0")), // A + utils.Must(uint128.FromString("26")), // AA + utils.Must(uint128.FromString("702")), // AAA + utils.Must(uint128.FromString("18278")), // AAAA + utils.Must(uint128.FromString("475254")), // AAAAA + utils.Must(uint128.FromString("12356630")), // AAAAAA + utils.Must(uint128.FromString("321272406")), // AAAAAAA + utils.Must(uint128.FromString("8353082582")), // AAAAAAAA + utils.Must(uint128.FromString("217180147158")), // AAAAAAAAA + utils.Must(uint128.FromString("5646683826134")), // AAAAAAAAAA + utils.Must(uint128.FromString("146813779479510")), // AAAAAAAAAAA + utils.Must(uint128.FromString("3817158266467286")), // AAAAAAAAAAAA + utils.Must(uint128.FromString("99246114928149462")), // AAAAAAAAAAAAA + utils.Must(uint128.FromString("2580398988131886038")), // AAAAAAAAAAAAAA + utils.Must(uint128.FromString("67090373691429037014")), // AAAAAAAAAAAAAAA + utils.Must(uint128.FromString("1744349715977154962390")), // AAAAAAAAAAAAAAAA + utils.Must(uint128.FromString("45353092615406029022166")), // AAAAAAAAAAAAAAAAA + utils.Must(uint128.FromString("1179180408000556754576342")), // AAAAAAAAAAAAAAAAAA + utils.Must(uint128.FromString("30658690608014475618984918")), // AAAAAAAAAAAAAAAAAAA + utils.Must(uint128.FromString("797125955808376366093607894")), // AAAAAAAAAAAAAAAAAAAA + utils.Must(uint128.FromString("20725274851017785518433805270")), // AAAAAAAAAAAAAAAAAAAAA + utils.Must(uint128.FromString("538857146126462423479278937046")), // AAAAAAAAAAAAAAAAAAAAAA + utils.Must(uint128.FromString("14010285799288023010461252363222")), // AAAAAAAAAAAAAAAAAAAAAAA + utils.Must(uint128.FromString("364267430781488598271992561443798")), // AAAAAAAAAAAAAAAAAAAAAAAA + utils.Must(uint128.FromString("9470953200318703555071806597538774")), // AAAAAAAAAAAAAAAAAAAAAAAAA + utils.Must(uint128.FromString("246244783208286292431866971536008150")), // AAAAAAAAAAAAAAAAAAAAAAAAAA + utils.Must(uint128.FromString("6402364363415443603228541259936211926")), // AAAAAAAAAAAAAAAAAAAAAAAAAAA + utils.Must(uint128.FromString("166461473448801533683942072758341510102")), // AAAAAAAAAAAAAAAAAAAAAAAAAAAA +} + +func (r Rune) IsReserved() bool { + return r.Uint128().Cmp(firstReservedRune.Uint128()) >= 0 +} + +// Commitment returns the commitment of the rune. The commitment is the little-endian encoding of the rune. +func (r Rune) Commitment() []byte { + bytes := make([]byte, 16) + r.Uint128().PutBytes(bytes) + end := len(bytes) + for end > 0 && bytes[end-1] == 0 { + end-- + } + return bytes[:end] +} + +// String returns the string representation of the rune in modified base-26 integer +func (r Rune) String() string { + if r.Uint128() == uint128.Max { + return "BCGDENLQRQWDSLRUGSNLBTMFIJAV" + } + + chars := "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + + value := r.Uint128().Add64(1) + var encoded []byte + for !value.IsZero() { + idx := value.Sub64(1).Mod64(26) + encoded = append(encoded, chars[idx]) + value = value.Sub64(1).Div64(26) + } + slices.Reverse(encoded) + return string(encoded) +} + +func (r Rune) Cmp(other Rune) int { + return r.Uint128().Cmp(other.Uint128()) +} + +func FirstRuneHeight(network common.Network) uint64 { + switch network { + case common.NetworkMainnet: + return common.HalvingInterval * 4 + case common.NetworkTestnet: + return common.HalvingInterval * 12 + } + panic("invalid network") +} + +func MinimumRuneAtHeight(network common.Network, height uint64) Rune { + offset := height + 1 + interval := common.HalvingInterval / 12 + + // runes are gradually unlocked from rune activation height until the next halving + start := FirstRuneHeight(network) + end := start + common.HalvingInterval + + if offset < start { + return (Rune)(unlockSteps[12]) + } + if offset >= end { + return (Rune)(unlockSteps[0]) + } + progress := offset - start + length := 12 - progress/uint64(interval) + + startRune := unlockSteps[length] + endRune := unlockSteps[length-1] // length cannot be 0 because we checked that offset < end + remainder := progress % uint64(interval) + + // result = startRune - ((startRune - endRune) * remainder / interval) + result := startRune.Sub(startRune.Sub(endRune).Mul64(remainder).Div64(uint64(interval))) + return Rune(result) +} + +func GetReservedRune(blockHeight uint64, txIndex uint32) Rune { + // firstReservedRune + ((blockHeight << 32) | txIndex) + delta := uint128.From64(blockHeight).Lsh(32).Or64(uint64(txIndex)) + return Rune(firstReservedRune.Uint128().Add(delta)) +} + +// MarshalJSON implements json.Marshaler +func (r Rune) MarshalJSON() ([]byte, error) { + return []byte(`"` + r.String() + `"`), nil +} + +// UnmarshalJSON implements json.Unmarshaler +func (r *Rune) UnmarshalJSON(data []byte) error { + // data must be quoted + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return errors.New("must be string") + } + data = data[1 : len(data)-1] + parsed, err := NewRuneFromString(string(data)) + if err != nil { + return errors.WithStack(err) + } + *r = parsed + return nil +} diff --git a/modules/runes/runes/rune_entry.go b/modules/runes/runes/rune_entry.go new file mode 100644 index 0000000..df0ffb8 --- /dev/null +++ b/modules/runes/runes/rune_entry.go @@ -0,0 +1,130 @@ +package runes + +import ( + "math" + "time" + + "github.com/Cleverse/go-utilities/utils" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/uint128" + "github.com/samber/lo" +) + +type RuneEntry struct { + RuneId RuneId + Number uint64 + Divisibility uint8 + // Premine is the amount of the rune that was premined. + Premine uint128.Uint128 + SpacedRune SpacedRune + Symbol rune + // Terms is the minting terms of the rune. + Terms *Terms + Turbo bool + // Mints is the number of times that this rune has been minted. + Mints uint128.Uint128 + BurnedAmount uint128.Uint128 + // CompletedAt is the time when the rune was fully minted. + CompletedAt time.Time + // CompletedAtHeight is the block height when the rune was fully minted. + CompletedAtHeight *uint64 + EtchingBlock uint64 + EtchingTxHash chainhash.Hash + EtchedAt time.Time +} + +var ( + ErrUnmintable = errors.New("rune is not mintable") + ErrMintCapReached = errors.New("rune mint cap reached") + ErrMintBeforeStart = errors.New("rune minting has not started") + ErrMintAfterEnd = errors.New("rune minting has ended") +) + +func (e *RuneEntry) GetMintableAmount(height uint64) (uint128.Uint128, error) { + if e.Terms == nil { + return uint128.Uint128{}, ErrUnmintable + } + if !e.IsMintStarted(height) { + return uint128.Uint128{}, ErrMintBeforeStart + } + if e.IsMintEnded(height) { + return uint128.Uint128{}, ErrMintAfterEnd + } + var cap uint128.Uint128 + if e.Terms.Cap != nil { + cap = *e.Terms.Cap + } + if e.Mints.Cmp(cap) >= 0 { + return uint128.Uint128{}, ErrMintCapReached + } + var amount uint128.Uint128 + if e.Terms.Amount != nil { + amount = *e.Terms.Amount + } + return amount, nil +} + +func (e *RuneEntry) IsMintStarted(height uint64) bool { + if e.Terms == nil { + return false + } + + var relative, absolute uint64 + if e.Terms.OffsetStart != nil { + relative = e.RuneId.BlockHeight + *e.Terms.OffsetStart + } + if e.Terms.HeightStart != nil { + absolute = *e.Terms.HeightStart + } + + return height >= max(relative, absolute) +} + +func (e *RuneEntry) IsMintEnded(height uint64) bool { + if e.Terms == nil { + return false + } + + var relative, absolute uint64 = math.MaxUint64, math.MaxUint64 + if e.Terms.OffsetEnd != nil { + relative = e.RuneId.BlockHeight + *e.Terms.OffsetEnd + } + if e.Terms.HeightEnd != nil { + absolute = *e.Terms.HeightEnd + } + + return height >= min(relative, absolute) +} + +func (e RuneEntry) Supply() (uint128.Uint128, error) { + terms := utils.Default(e.Terms, &Terms{}) + + amount := lo.FromPtr(terms.Amount) + cap := lo.FromPtr(terms.Cap) + premine := e.Premine + + result, overflow := amount.MulOverflow(cap) + if overflow { + return uint128.Uint128{}, errors.WithStack(errs.OverflowUint128) + } + result, overflow = result.AddOverflow(premine) + if overflow { + return uint128.Uint128{}, errors.WithStack(errs.OverflowUint128) + } + return result, nil +} + +func (e RuneEntry) MintedAmount() (uint128.Uint128, error) { + terms := lo.FromPtr(e.Terms) + amount, overflow := e.Mints.MulOverflow(lo.FromPtr(terms.Amount)) + if overflow { + return uint128.Uint128{}, errors.WithStack(errs.OverflowUint128) + } + amount, overflow = amount.AddOverflow(e.Premine) + if overflow { + return uint128.Uint128{}, errors.WithStack(errs.OverflowUint128) + } + return amount, nil +} diff --git a/modules/runes/runes/rune_id.go b/modules/runes/runes/rune_id.go new file mode 100644 index 0000000..c793357 --- /dev/null +++ b/modules/runes/runes/rune_id.go @@ -0,0 +1,119 @@ +package runes + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" +) + +type RuneId struct { + BlockHeight uint64 + TxIndex uint32 +} + +var ErrRuneIdZeroBlockNonZeroTxIndex = errors.New("rune id cannot be zero block height and non-zero tx index") + +func NewRuneId(blockHeight uint64, txIndex uint32) (RuneId, error) { + if blockHeight == 0 && txIndex != 0 { + return RuneId{}, errors.WithStack(ErrRuneIdZeroBlockNonZeroTxIndex) + } + return RuneId{ + BlockHeight: blockHeight, + TxIndex: txIndex, + }, nil +} + +var ( + ErrInvalidSeparator = errors.New("invalid rune id: must contain exactly one separator") + ErrCannotParseBlockHeight = errors.New("invalid rune id: cannot parse block height") + ErrCannotParseTxIndex = errors.New("invalid rune id: cannot parse tx index") +) + +func NewRuneIdFromString(str string) (RuneId, error) { + strs := strings.Split(str, ":") + if len(strs) != 2 { + return RuneId{}, ErrInvalidSeparator + } + blockHeightStr, txIndexStr := strs[0], strs[1] + blockHeight, err := strconv.ParseUint(blockHeightStr, 10, 64) + if err != nil { + return RuneId{}, errors.WithStack(errors.Join(err, ErrCannotParseBlockHeight)) + } + txIndex, err := strconv.ParseUint(txIndexStr, 10, 32) + if err != nil { + return RuneId{}, errors.WithStack(errors.Join(err, ErrCannotParseTxIndex)) + } + return RuneId{ + BlockHeight: blockHeight, + TxIndex: uint32(txIndex), + }, nil +} + +func (r RuneId) String() string { + return fmt.Sprintf("%d:%d", r.BlockHeight, r.TxIndex) +} + +// Cmp compares two RuneIds. It returns -1 if r is less than other, 0 if r is equal to other, and 1 if r is greater than other. +// RuneIds are compared first by block height and then by tx index in ascending order. +func (r RuneId) Cmp(other RuneId) int { + if r.BlockHeight != other.BlockHeight { + return int(r.BlockHeight - other.BlockHeight) + } + return int(r.TxIndex - other.TxIndex) +} + +// Delta calculates the delta encoding between two RuneIds. If the two RuneIds are in the same block, then the block delta is 0 and the tx index delta is the difference between the two tx indices. +// If the two RuneIds are in different blocks, then the block delta is the difference between the two block indices and the tx index delta is the tx index in the other block. +func (r RuneId) Delta(next RuneId) (uint64, uint32) { + blockDelta := next.BlockHeight - r.BlockHeight + // if the block is the same, then tx index is the difference between the two + if blockDelta == 0 { + return 0, next.TxIndex - r.TxIndex + } + // otherwise, tx index is the tx index in the next block + return blockDelta, next.TxIndex +} + +// Next calculates the next RuneId given a block delta and tx index delta. +func (r RuneId) Next(blockDelta uint64, txIndexDelta uint32) (RuneId, error) { + if blockDelta == 0 { + if math.MaxUint32-r.TxIndex < txIndexDelta { + return RuneId{}, errs.OverflowUint32 + } + return NewRuneId( + r.BlockHeight, + r.TxIndex+txIndexDelta, + ) + } + if math.MaxUint64-r.BlockHeight < blockDelta { + return RuneId{}, errs.OverflowUint64 + } + return NewRuneId( + r.BlockHeight+blockDelta, + txIndexDelta, + ) +} + +// MarshalJSON implements json.Marshaler +func (r RuneId) MarshalJSON() ([]byte, error) { + return []byte(`"` + r.String() + `"`), nil +} + +// UnmarshalJSON implements json.Unmarshaler +func (r *RuneId) UnmarshalJSON(data []byte) error { + // data must be quoted + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return errors.New("must be string") + } + data = data[1 : len(data)-1] + parsed, err := NewRuneIdFromString(string(data)) + if err != nil { + return errors.WithStack(err) + } + *r = parsed + return nil +} diff --git a/modules/runes/runes/rune_id_test.go b/modules/runes/runes/rune_id_test.go new file mode 100644 index 0000000..9bf47c9 --- /dev/null +++ b/modules/runes/runes/rune_id_test.go @@ -0,0 +1,108 @@ +package runes + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewRuneIdFromString(t *testing.T) { + type testcase struct { + name string + input string + expectedOutput RuneId + shouldError bool + } + // TODO: test error instance match expected errors + testcases := []testcase{ + { + name: "valid rune id", + input: "1:2", + expectedOutput: RuneId{ + BlockHeight: 1, + TxIndex: 2, + }, + shouldError: false, + }, + { + name: "too many separators", + input: "1:2:3", + expectedOutput: RuneId{}, + shouldError: true, + }, + { + name: "too few separators", + input: "1", + expectedOutput: RuneId{}, + shouldError: true, + }, + { + name: "invalid tx index", + input: "1:a", + expectedOutput: RuneId{}, + shouldError: true, + }, + { + name: "invalid block", + input: "a:1", + expectedOutput: RuneId{}, + shouldError: true, + }, + { + name: "empty tx index", + input: "1:", + expectedOutput: RuneId{}, + shouldError: true, + }, + { + name: "empty block", + input: ":1", + expectedOutput: RuneId{}, + shouldError: true, + }, + { + name: "empty block and tx index", + input: ":", + expectedOutput: RuneId{}, + shouldError: true, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + runeId, err := NewRuneIdFromString(tc.input) + if tc.shouldError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedOutput, runeId) + } + }) + } +} + +func TestRuneIdMarshal(t *testing.T) { + runeId := RuneId{ + BlockHeight: 1, + TxIndex: 2, + } + bytes, err := runeId.MarshalJSON() + assert.NoError(t, err) + assert.Equal(t, []byte(`"1:2"`), bytes) +} + +func TestRuneIdUnmarshal(t *testing.T) { + str := `"1:2"` + var runeId RuneId + err := runeId.UnmarshalJSON([]byte(str)) + assert.NoError(t, err) + assert.Equal(t, RuneId{ + BlockHeight: 1, + TxIndex: 2, + }, runeId) + + str = `1` + err = runeId.UnmarshalJSON([]byte(str)) + assert.Error(t, err) +} diff --git a/modules/runes/runes/rune_test.go b/modules/runes/runes/rune_test.go new file mode 100644 index 0000000..64fb3b6 --- /dev/null +++ b/modules/runes/runes/rune_test.go @@ -0,0 +1,272 @@ +package runes + +import ( + "fmt" + "math" + "strings" + "testing" + + "github.com/Cleverse/go-utilities/utils" + "github.com/gaze-network/indexer-network/common" + "github.com/gaze-network/uint128" + "github.com/stretchr/testify/assert" +) + +func TestRuneString(t *testing.T) { + test := func(rune Rune, encoded string) { + t.Run(encoded, func(t *testing.T) { + t.Parallel() + actualEncoded := rune.String() + assert.Equal(t, encoded, actualEncoded) + + actualRune, err := NewRuneFromString(encoded) + assert.NoError(t, err) + assert.Equal(t, rune, actualRune) + }) + } + + test(NewRune(0), "A") + test(NewRune(1), "B") + test(NewRune(2), "C") + test(NewRune(3), "D") + test(NewRune(4), "E") + test(NewRune(5), "F") + test(NewRune(6), "G") + test(NewRune(7), "H") + test(NewRune(8), "I") + test(NewRune(9), "J") + test(NewRune(10), "K") + test(NewRune(11), "L") + test(NewRune(12), "M") + test(NewRune(13), "N") + test(NewRune(14), "O") + test(NewRune(15), "P") + test(NewRune(16), "Q") + test(NewRune(17), "R") + test(NewRune(18), "S") + test(NewRune(19), "T") + test(NewRune(20), "U") + test(NewRune(21), "V") + test(NewRune(22), "W") + test(NewRune(23), "X") + test(NewRune(24), "Y") + test(NewRune(25), "Z") + test(NewRune(26), "AA") + test(NewRune(27), "AB") + test(NewRune(51), "AZ") + test(NewRune(52), "BA") + test(NewRune(53), "BB") + test(NewRuneFromUint128(utils.Must(uint128.FromString("2055900680524219742"))), "UNCOMMONGOODS") + test(NewRuneFromUint128(uint128.Max.Sub64(2)), "BCGDENLQRQWDSLRUGSNLBTMFIJAT") + test(NewRuneFromUint128(uint128.Max.Sub64(1)), "BCGDENLQRQWDSLRUGSNLBTMFIJAU") + test(NewRuneFromUint128(uint128.Max), "BCGDENLQRQWDSLRUGSNLBTMFIJAV") +} + +func TestNewRuneFromBase26Error(t *testing.T) { + _, err := NewRuneFromString("?") + assert.ErrorIs(t, err, ErrInvalidBase26) +} + +func TestFirstRuneHeight(t *testing.T) { + test := func(network common.Network, expected uint64) { + t.Run(network.String(), func(t *testing.T) { + t.Parallel() + actual := FirstRuneHeight(network) + assert.Equal(t, expected, actual) + }) + } + + test(common.NetworkMainnet, 840_000) + test(common.NetworkTestnet, 2_520_000) +} + +func TestMinimumRuneAtHeightMainnet(t *testing.T) { + test := func(height uint64, encoded string) { + t.Run(fmt.Sprintf("%d", height), func(t *testing.T) { + t.Parallel() + rune, err := NewRuneFromString(encoded) + assert.NoError(t, err) + actual := MinimumRuneAtHeight(common.NetworkMainnet, height) + assert.Equal(t, rune, actual) + }) + } + + start := FirstRuneHeight(common.NetworkMainnet) + end := start + common.HalvingInterval + interval := uint64(common.HalvingInterval / 12) + + test(0, "AAAAAAAAAAAAA") + test(start/2, "AAAAAAAAAAAAA") + test(start, "ZZYZXBRKWXVA") + test(start+1, "ZZXZUDIVTVQA") + test(end-1, "A") + test(end, "A") + test(end+1, "A") + test(math.MaxUint32, "A") + + test(start+interval*0-1, "AAAAAAAAAAAAA") + test(start+interval*0, "ZZYZXBRKWXVA") + test(start+interval*0+1, "ZZXZUDIVTVQA") + + test(start+interval*1-1, "AAAAAAAAAAAA") + test(start+interval*1, "ZZYZXBRKWXV") + test(start+interval*1+1, "ZZXZUDIVTVQ") + + test(start+interval*2-1, "AAAAAAAAAAA") + test(start+interval*2, "ZZYZXBRKWY") + test(start+interval*2+1, "ZZXZUDIVTW") + + test(start+interval*3-1, "AAAAAAAAAA") + test(start+interval*3, "ZZYZXBRKX") + test(start+interval*3+1, "ZZXZUDIVU") + + test(start+interval*4-1, "AAAAAAAAA") + test(start+interval*4, "ZZYZXBRL") + test(start+interval*4+1, "ZZXZUDIW") + + test(start+interval*5-1, "AAAAAAAA") + test(start+interval*5, "ZZYZXBS") + test(start+interval*5+1, "ZZXZUDJ") + + test(start+interval*6-1, "AAAAAAA") + test(start+interval*6, "ZZYZXC") + test(start+interval*6+1, "ZZXZUE") + + test(start+interval*7-1, "AAAAAA") + test(start+interval*7, "ZZYZY") + test(start+interval*7+1, "ZZXZV") + + test(start+interval*8-1, "AAAAA") + test(start+interval*8, "ZZZA") + test(start+interval*8+1, "ZZYA") + + test(start+interval*9-1, "AAAA") + test(start+interval*9, "ZZZ") + test(start+interval*9+1, "ZZY") + + test(start+interval*10-2, "AAC") + test(start+interval*10-1, "AAA") + test(start+interval*10, "AAA") + test(start+interval*10+1, "AAA") + + test(start+interval*10+interval/2, "NA") + + test(start+interval*11-2, "AB") + test(start+interval*11-1, "AA") + test(start+interval*11, "AA") + test(start+interval*11+1, "AA") + + test(start+interval*11+interval/2, "N") + + test(start+interval*12-2, "B") + test(start+interval*12-1, "A") + test(start+interval*12, "A") + test(start+interval*12+1, "A") +} + +func TestMinimumRuneAtHeightTestnet(t *testing.T) { + test := func(height uint64, runeStr string) { + t.Run(fmt.Sprintf("%d", height), func(t *testing.T) { + t.Parallel() + rune, err := NewRuneFromString(runeStr) + assert.NoError(t, err) + actual := MinimumRuneAtHeight(common.NetworkTestnet, height) + assert.Equal(t, rune, actual) + }) + } + + start := FirstRuneHeight(common.NetworkTestnet) + + test(start-1, "AAAAAAAAAAAAA") + test(start, "ZZYZXBRKWXVA") + test(start+1, "ZZXZUDIVTVQA") +} + +func TestIsReserved(t *testing.T) { + test := func(runeStr string, expected bool) { + t.Run(runeStr, func(t *testing.T) { + t.Parallel() + rune, err := NewRuneFromString(runeStr) + assert.NoError(t, err) + actual := rune.IsReserved() + assert.Equal(t, expected, actual) + }) + } + + test("A", false) + test("B", false) + test("ZZZZZZZZZZZZZZZZZZZZZZZZZZ", false) + test("AAAAAAAAAAAAAAAAAAAAAAAAAAA", true) + test("AAAAAAAAAAAAAAAAAAAAAAAAAAB", true) + test("BCGDENLQRQWDSLRUGSNLBTMFIJAV", true) +} + +func TestGetReservedRune(t *testing.T) { + test := func(blockHeight uint64, txIndex uint32, expected Rune) { + t.Run(fmt.Sprintf("blockHeight_%d_txIndex_%d", blockHeight, txIndex), func(t *testing.T) { + t.Parallel() + rune := GetReservedRune(blockHeight, txIndex) + assert.Equal(t, expected.String(), rune.String()) + }) + } + + test(0, 0, firstReservedRune) + test(0, 1, Rune(firstReservedRune.Uint128().Add(uint128.From64(1)))) + test(0, 2, Rune(firstReservedRune.Uint128().Add(uint128.From64(2)))) + test(1, 0, Rune(firstReservedRune.Uint128().Add(uint128.From64(1).Lsh(32)))) + test(1, 1, Rune(firstReservedRune.Uint128().Add(uint128.From64(1).Lsh(32).Add(uint128.From64(1))))) + test(1, 2, Rune(firstReservedRune.Uint128().Add(uint128.From64(1).Lsh(32).Add(uint128.From64(2))))) + test(2, 0, Rune(firstReservedRune.Uint128().Add(uint128.From64(2).Lsh(32)))) + test(2, 1, Rune(firstReservedRune.Uint128().Add(uint128.From64(2).Lsh(32).Add(uint128.From64(1))))) + test(2, 2, Rune(firstReservedRune.Uint128().Add(uint128.From64(2).Lsh(32).Add(uint128.From64(2))))) + test(math.MaxUint64, math.MaxUint32, Rune(firstReservedRune.Uint128().Add(uint128.From64(math.MaxUint64).Lsh(32).Add(uint128.From64(math.MaxUint32))))) +} + +func TestUnlockSteps(t *testing.T) { + for i := 0; i < len(unlockSteps); i++ { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + t.Parallel() + encoded := Rune(unlockSteps[i]).String() + expected := strings.Repeat("A", i+1) + assert.Equal(t, expected, encoded) + }) + } +} + +func TestCommitment(t *testing.T) { + test := func(rune Rune, expected []byte) { + t.Run(rune.String(), func(t *testing.T) { + t.Parallel() + actual := rune.Commitment() + assert.Equal(t, expected, actual) + }) + } + + test(NewRune(0), []byte{}) + test(NewRune(1), []byte{1}) + test(NewRune(2), []byte{2}) + test(NewRune(255), []byte{255}) + test(NewRune(256), []byte{0, 1}) + test(NewRune(257), []byte{1, 1}) + test(NewRune(65535), []byte{255, 255}) + test(NewRune(65536), []byte{0, 0, 1}) +} + +func TestRuneMarshal(t *testing.T) { + rune := NewRune(5) + bytes, err := rune.MarshalJSON() + assert.NoError(t, err) + assert.Equal(t, []byte(`"F"`), bytes) +} + +func TestRuneUnmarshal(t *testing.T) { + str := `"F"` + var rune Rune + err := rune.UnmarshalJSON([]byte(str)) + assert.NoError(t, err) + assert.Equal(t, NewRune(5), rune) + + str = `1` + err = rune.UnmarshalJSON([]byte(str)) + assert.Error(t, err) +} diff --git a/modules/runes/runes/runestone.go b/modules/runes/runes/runestone.go new file mode 100644 index 0000000..dafbcf0 --- /dev/null +++ b/modules/runes/runes/runestone.go @@ -0,0 +1,389 @@ +package runes + +import ( + "fmt" + "log" + "slices" + "unicode/utf8" + + "github.com/btcsuite/btcd/txscript" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/pkg/leb128" + "github.com/gaze-network/uint128" + "github.com/samber/lo" +) + +const ( + RUNESTONE_PAYLOAD_MAGIC_NUMBER = txscript.OP_13 + RUNE_COMMIT_BLOCKS = 6 +) + +type Runestone struct { + // Rune to etch in this transaction + Etching *Etching + // The rune ID of the runestone to mint in this transaction + Mint *RuneId + // Denotes the transaction output to allocate leftover runes to. If nil, use the first non-OP_RETURN output. If target output is OP_RETURN, those runes are burned. + Pointer *uint64 + // List of edicts to execute in this transaction + Edicts []Edict + // If true, the runestone is a cenotaph. All minted runes in a cenotaph are burned. Runes etched in a cenotaph are not mintable. + Cenotaph bool + // Bitmask of flaws that caused the runestone to be a cenotaph + Flaws Flaws +} + +// Encipher encodes a runestone into a scriptPubKey, ready to be put into a transaction output. +func (r Runestone) Encipher() ([]byte, error) { + var payload []byte + + encodeUint128 := func(value uint128.Uint128) { + payload = append(payload, leb128.EncodeUint128(value)...) + } + encodeTagValues := func(tag Tag, values ...uint128.Uint128) { + for _, value := range values { + // encode tag key + encodeUint128(tag.Uint128()) + // encode tag value + encodeUint128(value) + } + } + encodeEdict := func(previousRuneId RuneId, edict Edict) { + blockHeight, txIndex := previousRuneId.Delta(edict.Id) + encodeUint128(uint128.From64(blockHeight)) + encodeUint128(uint128.From64(uint64(txIndex))) + encodeUint128(edict.Amount) + encodeUint128(uint128.From64(uint64(edict.Output))) + } + + if r.Etching != nil { + etching := r.Etching + flags := Flags(uint128.Zero) + flags.Set(FlagEtching) + if etching.Terms != nil { + flags.Set(FlagTerms) + } + if etching.Turbo { + flags.Set(FlagTurbo) + } + encodeTagValues(TagFlags, flags.Uint128()) + + if etching.Rune != nil { + encodeTagValues(TagRune, etching.Rune.Uint128()) + } + if etching.Divisibility != nil { + encodeTagValues(TagDivisibility, uint128.From64(uint64(*etching.Divisibility))) + } + if etching.Spacers != nil { + encodeTagValues(TagSpacers, uint128.From64(uint64(*etching.Spacers))) + } + if etching.Symbol != nil { + encodeTagValues(TagSymbol, uint128.From64(uint64(*etching.Symbol))) + } + if etching.Premine != nil { + encodeTagValues(TagPremine, *etching.Premine) + } + if etching.Terms != nil { + terms := etching.Terms + if terms.Amount != nil { + encodeTagValues(TagAmount, *terms.Amount) + } + if terms.Cap != nil { + encodeTagValues(TagCap, *terms.Cap) + } + if terms.HeightStart != nil { + encodeTagValues(TagHeightStart, uint128.From64(*terms.HeightStart)) + } + if terms.HeightEnd != nil { + encodeTagValues(TagHeightEnd, uint128.From64(*terms.HeightEnd)) + } + if terms.OffsetStart != nil { + encodeTagValues(TagOffsetStart, uint128.From64(*terms.OffsetStart)) + } + if terms.OffsetEnd != nil { + encodeTagValues(TagOffsetEnd, uint128.From64(*terms.OffsetEnd)) + } + } + } + + if r.Mint != nil { + encodeTagValues(TagMint, uint128.From64(r.Mint.BlockHeight), uint128.From64(uint64(r.Mint.TxIndex))) + } + if r.Pointer != nil { + encodeTagValues(TagPointer, uint128.From64(*r.Pointer)) + } + if len(r.Edicts) > 0 { + encodeUint128(TagBody.Uint128()) + edicts := make([]Edict, len(r.Edicts)) + copy(edicts, r.Edicts) + // sort by block height first, then by tx index + slices.SortFunc(edicts, func(i, j Edict) int { + if i.Id.BlockHeight != j.Id.BlockHeight { + return int(i.Id.BlockHeight) - int(j.Id.BlockHeight) + } + return int(i.Id.TxIndex) - int(j.Id.TxIndex) + }) + var previousRuneId RuneId + for _, edict := range edicts { + encodeEdict(previousRuneId, edict) + previousRuneId = edict.Id + } + } + + sb := txscript.NewScriptBuilder(). + AddOp(txscript.OP_RETURN). + AddOp(RUNESTONE_PAYLOAD_MAGIC_NUMBER) + + // chunk payload to MaxScriptElementSize + for _, chunk := range lo.Chunk(payload, txscript.MaxScriptElementSize) { + sb.AddData(chunk) + } + + scriptPubKey, err := sb.Script() + if err != nil { + return nil, errors.Wrap(err, "cannot build scriptPubKey") + } + return scriptPubKey, nil +} + +// DecipherRunestone deciphers a runestone from a transaction. If the runestone is a cenotaph, the runestone is returned with Cenotaph set to true and Flaws set to the bitmask of flaws that caused the runestone to be a cenotaph. +// If no runestone is found, nil is returned. +func DecipherRunestone(tx *types.Transaction) (*Runestone, error) { + payload, flaws := runestonePayloadFromTx(tx) + if flaws != 0 { + return &Runestone{ + Cenotaph: true, + Flaws: flaws, + }, nil + } + if payload == nil { + return nil, nil + } + + integers, err := decodeLEB128VarIntsFromPayload(payload) + if err != nil { + log.Printf("warning: %v\n", err) + flaws |= FlawFlagVarInt.Mask() + return &Runestone{ + Cenotaph: true, + Flaws: flaws, + }, nil + } + message := MessageFromIntegers(tx, integers) + edicts, fields := message.Edicts, message.Fields + flaws |= message.Flaws + + flags, err := ParseFlags(lo.FromPtr(fields.Take(TagFlags))) + if err != nil { + return nil, errors.Wrap(err, "cannot parse flags") + } + + var etching *Etching + if flags.Take(FlagEtching) { + divisibilityU128 := fields.Take(TagDivisibility) + if divisibilityU128 != nil && divisibilityU128.Cmp64(uint64(maxDivisibility)) > 0 { + divisibilityU128 = nil + } + spacersU128 := fields.Take(TagSpacers) + if spacersU128 != nil && spacersU128.Cmp64(uint64(maxSpacers)) > 0 { + spacersU128 = nil + } + symbolU128 := fields.Take(TagSymbol) + if symbolU128 != nil && symbolU128.Cmp64(utf8.MaxRune) > 0 { + symbolU128 = nil + } + + var terms *Terms + if flags.Take(FlagTerms) { + var heightStart, heightEnd, offsetStart, offsetEnd *uint64 + if value := fields.Take(TagHeightStart); value != nil && value.IsUint64() { + heightStart = lo.ToPtr(value.Uint64()) + } + if value := fields.Take(TagHeightEnd); value != nil && value.IsUint64() { + heightEnd = lo.ToPtr(value.Uint64()) + } + if value := fields.Take(TagOffsetStart); value != nil && value.IsUint64() { + offsetStart = lo.ToPtr(value.Uint64()) + } + if value := fields.Take(TagOffsetEnd); value != nil && value.IsUint64() { + offsetEnd = lo.ToPtr(value.Uint64()) + } + terms = &Terms{ + Amount: fields.Take(TagAmount), + Cap: fields.Take(TagCap), + HeightStart: heightStart, + HeightEnd: heightEnd, + OffsetStart: offsetStart, + OffsetEnd: offsetEnd, + } + } + + var divisibility *uint8 + if divisibilityU128 != nil { + divisibility = lo.ToPtr(divisibilityU128.Uint8()) + } + var spacers *uint32 + if spacersU128 != nil { + spacers = lo.ToPtr(spacersU128.Uint32()) + } + var symbol *rune + if symbolU128 != nil { + symbol = lo.ToPtr(rune(symbolU128.Uint32())) + } + + etching = &Etching{ + Divisibility: divisibility, + Premine: fields.Take(TagPremine), + Rune: (*Rune)(fields.Take(TagRune)), + Spacers: spacers, + Symbol: symbol, + Terms: terms, + Turbo: flags.Take(FlagTurbo), + } + } + + var mint *RuneId + mintValues := fields[TagMint] + if len(mintValues) >= 2 { + mintRuneIdBlock := lo.FromPtr(fields.Take(TagMint)) + mintRuneIdTx := lo.FromPtr(fields.Take(TagMint)) + if mintRuneIdBlock.IsUint64() && mintRuneIdTx.IsUint32() { + runeId, err := NewRuneId(mintRuneIdBlock.Uint64(), mintRuneIdTx.Uint32()) + if err != nil { + // invalid mint + flaws |= FlawFlagUnrecognizedEvenTag.Mask() + } else { + mint = &runeId + } + } + } + var pointer *uint64 + pointerU128 := fields.Take(TagPointer) + if pointerU128 != nil { + if pointerU128.Cmp64(uint64(len(tx.TxOut))) < 0 { + pointer = lo.ToPtr(pointerU128.Uint64()) + } else { + // invalid pointer + flaws |= FlawFlagUnrecognizedEvenTag.Mask() + } + } + + if etching != nil { + _, err = etching.Supply() + if err != nil { + if errors.Is(err, errs.OverflowUint128) { + flaws |= FlawFlagSupplyOverflow.Mask() + } else { + return nil, errors.Wrap(err, "cannot calculate supply") + } + } + } + + if !flags.Uint128().IsZero() { + flaws |= FlawFlagUnrecognizedFlag.Mask() + } + leftoverEvenTags := lo.Filter(lo.Keys(fields), func(tag Tag, _ int) bool { + return tag.Uint128().Mod64(2) == 0 + }) + if len(leftoverEvenTags) != 0 { + flaws |= FlawFlagUnrecognizedEvenTag.Mask() + } + if flaws != 0 { + var cenotaphEtching *Etching + if etching != nil && etching.Rune != nil { + cenotaphEtching = &Etching{ + Rune: etching.Rune, + } + } + return &Runestone{ + Cenotaph: true, + Flaws: flaws, + Mint: mint, + Etching: cenotaphEtching, // return etching with only Rune field if runestone is cenotaph + }, nil + } + + return &Runestone{ + Etching: etching, + Mint: mint, + Edicts: edicts, + Pointer: pointer, + }, nil +} + +func runestonePayloadFromTx(tx *types.Transaction) ([]byte, Flaws) { + for _, output := range tx.TxOut { + tokenizer := txscript.MakeScriptTokenizer(0, output.PkScript) + + // payload must start with OP_RETURN + if ok := tokenizer.Next(); !ok { + // script ended + continue + } + if err := tokenizer.Err(); err != nil { + continue + } + if opCode := tokenizer.Opcode(); opCode != txscript.OP_RETURN { + continue + } + + // next opcode must be the magic number + if ok := tokenizer.Next(); !ok { + // script ended + continue + } + if err := tokenizer.Err(); err != nil { + fmt.Println(err.Error()) + continue + } + if opCode := tokenizer.Opcode(); opCode != RUNESTONE_PAYLOAD_MAGIC_NUMBER { + continue + } + + // this output is now selected to be the runestone output. Any errors from now on will be considered a flaw. + + // construct the payload by concatenating the remaining data pushes + payload := make([]byte, 0) + for tokenizer.Next() { + if tokenizer.Err() != nil { + return nil, FlawFlagInvalidScript.Mask() + } + if !IsDataPushOpCode(tokenizer.Opcode()) { + return nil, FlawFlagOpCode.Mask() + } + payload = append(payload, tokenizer.Data()...) + } + if tokenizer.Err() != nil { + return nil, FlawFlagInvalidScript.Mask() + } + + return payload, Flaws(0) + } + + // if not found, return nil + return nil, 0 +} + +func decodeLEB128VarIntsFromPayload(payload []byte) ([]uint128.Uint128, error) { + integers := make([]uint128.Uint128, 0) + i := 0 + + for i < len(payload) { + n, length, err := leb128.DecodeUint128(payload[i:]) + if err != nil { + return nil, errors.Wrap(err, "cannot decode LEB128 varint") + } + + integers = append(integers, n) + i += length + } + + return integers, nil +} + +func IsDataPushOpCode(opCode byte) bool { + // includes OP_0, OP_DATA_1 to OP_DATA_75, OP_PUSHDATA1, OP_PUSHDATA2, OP_PUSHDATA4 + return opCode <= txscript.OP_PUSHDATA4 +} diff --git a/modules/runes/runes/runestone_test.go b/modules/runes/runes/runestone_test.go new file mode 100644 index 0000000..7b38ec5 --- /dev/null +++ b/modules/runes/runes/runestone_test.go @@ -0,0 +1,1671 @@ +package runes + +import ( + "math" + "slices" + "testing" + "unicode/utf8" + + "github.com/Cleverse/go-utilities/utils" + "github.com/btcsuite/btcd/txscript" + "github.com/gaze-network/indexer-network/core/types" + "github.com/gaze-network/indexer-network/pkg/leb128" + "github.com/gaze-network/uint128" + "github.com/samber/lo" + "github.com/stretchr/testify/assert" +) + +func encodeLEB128VarIntsToPayload(integers []uint128.Uint128) []byte { + payload := make([]byte, 0) + for _, integer := range integers { + payload = append(payload, leb128.EncodeUint128(integer)...) + } + return payload +} + +func TestDecipherRunestone(t *testing.T) { + testDecipherTx := func(t *testing.T, tx *types.Transaction, expected *Runestone) { + t.Helper() + runestone, err := DecipherRunestone(tx) + assert.NoError(t, err) + assert.Equal(t, expected, runestone) + } + + testDecipherInteger := func(t *testing.T, integers []uint128.Uint128, expected *Runestone) { + t.Helper() + payload := encodeLEB128VarIntsToPayload(integers) + pkScript, err := txscript.NewScriptBuilder(). + AddOp(txscript.OP_RETURN). + AddOp(RUNESTONE_PAYLOAD_MAGIC_NUMBER). + AddData(payload). + Script() + assert.NoError(t, err) + tx := &types.Transaction{ + Version: 2, + LockTime: 0, + TxIn: []*types.TxIn{}, + TxOut: []*types.TxOut{ + { + PkScript: pkScript, + Value: 0, + }, + }, + } + testDecipherTx(t, tx, expected) + } + + testDecipherPkScript := func(t *testing.T, pkScript []byte, expected *Runestone) { + t.Helper() + tx := &types.Transaction{ + Version: 2, + LockTime: 0, + TxIn: []*types.TxIn{}, + TxOut: []*types.TxOut{ + { + PkScript: pkScript, + Value: 0, + }, + }, + } + testDecipherTx(t, tx, expected) + } + + t.Run("decipher_returns_none_if_first_opcode_is_malformed", func(t *testing.T) { + testDecipherPkScript( + t, + utils.Must(txscript.NewScriptBuilder().AddOp(txscript.OP_DATA_4).Script()), + nil, + ) + }) + t.Run("deciphering_transaction_with_non_op_return_output_returns_none", func(t *testing.T) { + testDecipherTx( + t, + &types.Transaction{ + Version: 2, + LockTime: 0, + TxIn: []*types.TxIn{}, + TxOut: []*types.TxOut{}, + }, + nil, + ) + }) + t.Run("deciphering_transaction_with_bare_op_return_returns_none", func(t *testing.T) { + testDecipherPkScript( + t, + utils.Must(txscript.NewScriptBuilder().AddOp(txscript.OP_RETURN).Script()), + nil, + ) + }) + t.Run("deciphering_transaction_with_non_matching_op_return_returns_none", func(t *testing.T) { + testDecipherPkScript( + t, + utils.Must(txscript.NewScriptBuilder().AddOp(txscript.OP_RETURN).AddOp(txscript.OP_1).Script()), + nil, + ) + }) + t.Run("deciphering_valid_runestone_with_invalid_script_postfix_returns_invalid_payload", func(t *testing.T) { + testDecipherPkScript( + t, + utils.Must(txscript.NewScriptBuilder(). + AddOp(txscript.OP_RETURN). + AddOp(RUNESTONE_PAYLOAD_MAGIC_NUMBER). + AddOp(txscript.OP_DATA_4). + Script()), + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagInvalidScript.Mask(), + }, + ) + }) + t.Run("deciphering_runestone_with_truncated_varint_is_cenotaph", func(t *testing.T) { + testDecipherPkScript( + t, + utils.Must(txscript.NewScriptBuilder(). + AddOp(txscript.OP_RETURN). + AddOp(RUNESTONE_PAYLOAD_MAGIC_NUMBER). + AddData([]byte{128}). + Script()), + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagVarInt.Mask(), + }, + ) + }) + t.Run("outputs_with_non_pushdata_opcodes_are_cenotaph_1", func(t *testing.T) { + testDecipherPkScript( + t, + utils.Must(txscript.NewScriptBuilder(). + AddOp(txscript.OP_RETURN). + AddOp(RUNESTONE_PAYLOAD_MAGIC_NUMBER). + AddOp(txscript.OP_VERIFY). + AddData([]byte{0}). + AddData(leb128.EncodeUint128(uint128.From64(1))). + AddData(leb128.EncodeUint128(uint128.From64(1))). + AddData([]byte{2, 0}). + Script()), + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagOpCode.Mask(), + }, + ) + }) + t.Run("outputs_with_non_pushdata_opcodes_are_cenotaph_2", func(t *testing.T) { + testDecipherPkScript( + t, + utils.Must(txscript.NewScriptBuilder(). + AddOp(txscript.OP_RETURN). + AddOp(RUNESTONE_PAYLOAD_MAGIC_NUMBER). + AddData([]byte{0}). + AddData(leb128.EncodeUint128(uint128.From64(1))). + AddData(leb128.EncodeUint128(uint128.From64(2))). + AddData([]byte{3, 0}). + Script()), + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagOpCode.Mask(), + }, + ) + }) + t.Run("pushnum_opcodes_in_runestone_produce_cenotaph", func(t *testing.T) { + testDecipherPkScript( + t, + utils.Must(txscript.NewScriptBuilder(). + AddOp(txscript.OP_RETURN). + AddOp(RUNESTONE_PAYLOAD_MAGIC_NUMBER). + AddOp(txscript.OP_1). + Script()), + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagOpCode.Mask(), + }, + ) + }) + t.Run("deciphering_empty_runestone_is_successful", func(t *testing.T) { + testDecipherPkScript( + t, + utils.Must(txscript.NewScriptBuilder(). + AddOp(txscript.OP_RETURN). + AddOp(RUNESTONE_PAYLOAD_MAGIC_NUMBER). + Script()), + &Runestone{}, + ) + }) + t.Run("invalid_input_scripts_are_skipped_when_searching_for_runestone", func(t *testing.T) { + testDecipherTx( + t, + &types.Transaction{ + Version: 2, + LockTime: 0, + TxIn: []*types.TxIn{}, + TxOut: []*types.TxOut{ + { + PkScript: utils.Must(txscript.NewScriptBuilder(). + AddOp(txscript.OP_RETURN). + AddOp(txscript.OP_DATA_9). + AddOp(RUNESTONE_PAYLOAD_MAGIC_NUMBER). + AddOp(txscript.OP_DATA_4). + Script()), + }, + { + PkScript: utils.Must(txscript.NewScriptBuilder(). + AddOp(txscript.OP_RETURN). + AddOp(RUNESTONE_PAYLOAD_MAGIC_NUMBER). + AddData(encodeLEB128VarIntsToPayload([]uint128.Uint128{ + TagMint.Uint128(), + uint128.From64(1), + TagMint.Uint128(), + uint128.From64(1), + })). + Script()), + }, + }, + }, + &Runestone{ + Mint: lo.ToPtr(RuneId{1, 1}), + }, + ) + }) + t.Run("deciphering_non_empty_runestone_is_successful", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{TagBody.Uint128(), uint128.From64(1), uint128.From64(1), uint128.From64(2), uint128.From64(0)}, + &Runestone{ + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + }, + ) + }) + t.Run("decipher_etching", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Etching: &Etching{}, + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + }, + ) + }) + t.Run("decipher_etching_with_rune", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + TagRune.Uint128(), + uint128.From64(4), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Etching: &Etching{ + Rune: lo.ToPtr(NewRune(4)), + }, + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + }, + ) + }) + t.Run("terms_flag_without_etching_flag_produces_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagTerms.Mask().Uint128(), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagUnrecognizedFlag.Mask(), + }, + ) + }) + t.Run("recognized_fields_without_flag_produces_cenotaph", func(t *testing.T) { + testcase := func(integers []uint128.Uint128) { + testDecipherInteger( + t, + integers, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagUnrecognizedEvenTag.Mask(), + }, + ) + } + + testcase([]uint128.Uint128{TagPremine.Uint128(), uint128.Zero}) + testcase([]uint128.Uint128{TagRune.Uint128(), uint128.Zero}) + testcase([]uint128.Uint128{TagCap.Uint128(), uint128.Zero}) + testcase([]uint128.Uint128{TagAmount.Uint128(), uint128.Zero}) + testcase([]uint128.Uint128{TagOffsetStart.Uint128(), uint128.Zero}) + testcase([]uint128.Uint128{TagOffsetEnd.Uint128(), uint128.Zero}) + testcase([]uint128.Uint128{TagHeightStart.Uint128(), uint128.Zero}) + testcase([]uint128.Uint128{TagHeightEnd.Uint128(), uint128.Zero}) + + testcase([]uint128.Uint128{TagFlags.Uint128(), FlagEtching.Mask().Uint128(), TagCap.Uint128(), uint128.Zero}) + testcase([]uint128.Uint128{TagFlags.Uint128(), FlagEtching.Mask().Uint128(), TagAmount.Uint128(), uint128.Zero}) + testcase([]uint128.Uint128{TagFlags.Uint128(), FlagEtching.Mask().Uint128(), TagOffsetStart.Uint128(), uint128.Zero}) + testcase([]uint128.Uint128{TagFlags.Uint128(), FlagEtching.Mask().Uint128(), TagOffsetEnd.Uint128(), uint128.Zero}) + testcase([]uint128.Uint128{TagFlags.Uint128(), FlagEtching.Mask().Uint128(), TagHeightStart.Uint128(), uint128.Zero}) + testcase([]uint128.Uint128{TagFlags.Uint128(), FlagEtching.Mask().Uint128(), TagHeightEnd.Uint128(), uint128.Zero}) + }) + t.Run("decipher_etching_with_term", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128().Or(FlagTerms.Mask().Uint128()), + TagOffsetEnd.Uint128(), + uint128.From64(4), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Etching: &Etching{ + Terms: &Terms{ + OffsetEnd: lo.ToPtr(uint64(4)), + }, + }, + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + }, + ) + }) + t.Run("decipher_etching_with_amount", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128().Or(FlagTerms.Mask().Uint128()), + TagAmount.Uint128(), + uint128.From64(4), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Etching: &Etching{ + Terms: &Terms{ + Amount: lo.ToPtr(uint128.From64(4)), + }, + }, + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + }, + ) + }) + t.Run("duplicate_even_tags_produce_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + TagRune.Uint128(), + uint128.From64(4), + TagRune.Uint128(), + uint128.From64(5), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Cenotaph: true, + Etching: &Etching{ + Rune: lo.ToPtr(NewRune(4)), + }, + Flaws: FlawFlagUnrecognizedEvenTag.Mask(), + }, + ) + }) + t.Run("duplicate_odd_tags_are_ignored", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + TagDivisibility.Uint128(), + uint128.From64(4), + TagDivisibility.Uint128(), + uint128.From64(5), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Etching: &Etching{ + Divisibility: lo.ToPtr(uint8(4)), + }, + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + }, + ) + }) + t.Run("unrecognized_odd_tag_is_ignored", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagNop.Uint128(), + uint128.From64(5), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + }, + ) + }) + t.Run("runestone_with_unrecognized_even_tag_is_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagCenotaph.Uint128(), + uint128.From64(5), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagUnrecognizedEvenTag.Mask(), + }, + ) + }) + t.Run("runestone_with_unrecognized_flag_is_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagCenotaph.Mask().Uint128(), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagUnrecognizedFlag.Mask(), + }, + ) + }) + t.Run("runestone_with_edict_id_with_zero_block_and_nonzero_tx_is_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagBody.Uint128(), + uint128.From64(0), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagEdictRuneId.Mask(), + }, + ) + }) + t.Run("runestone_with_overflowing_edict_id_delta_is_cenotaph_1", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(0), + uint128.From64(0), + uint128.From64(0), + uint128.From64(math.MaxUint64), + uint128.From64(0), + uint128.From64(0), + uint128.From64(0), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagEdictRuneId.Mask(), + }, + ) + }) + t.Run("runestone_with_overflowing_edict_id_delta_is_cenotaph_2", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(0), + uint128.From64(0), + uint128.From64(0), + uint128.From64(math.MaxUint64), + uint128.From64(0), + uint128.From64(0), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagEdictRuneId.Mask(), + }, + ) + }) + t.Run("runestone_with_output_over_max_is_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(2), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagEdictOutput.Mask(), + }, + ) + }) + t.Run("tag_with_no_value_is_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + uint128.From64(1), + TagFlags.Uint128(), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagTruncatedField.Mask(), + }, + ) + }) + t.Run("trailing_integers_in_body_is_cenotaph", func(t *testing.T) { + integers := []uint128.Uint128{ + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + } + for i := 0; i < 4; i++ { + if i == 0 { + testDecipherInteger(t, integers, &Runestone{ + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + }) + } else { + testDecipherInteger(t, integers, &Runestone{ + Cenotaph: true, + Flaws: FlawFlagTrailingIntegers.Mask(), + }) + } + integers = append(integers, uint128.Zero) + } + }) + t.Run("decipher_etching_with_divisibility", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + TagRune.Uint128(), + uint128.From64(4), + TagDivisibility.Uint128(), + uint128.From64(5), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + Etching: &Etching{ + Rune: lo.ToPtr(NewRune(4)), + Divisibility: lo.ToPtr(uint8(5)), + }, + }, + ) + }) + t.Run("divisibility_above_max_is_ignored", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + TagRune.Uint128(), + uint128.From64(4), + TagDivisibility.Uint128(), + uint128.From64(uint64(maxDivisibility + 1)), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + Etching: &Etching{ + Rune: lo.ToPtr(NewRune(4)), + }, + }, + ) + }) + t.Run("symbol_above_max_is_ignored", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + TagSymbol.Uint128(), + uint128.From64(utf8.MaxRune + 1), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + Etching: &Etching{}, + }, + ) + }) + t.Run("decipher_etching_with_symbol", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + TagRune.Uint128(), + uint128.From64(4), + TagSymbol.Uint128(), + uint128.From64('a'), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + Etching: &Etching{ + Rune: lo.ToPtr(NewRune(4)), + Symbol: lo.ToPtr('a'), + }, + }, + ) + }) + t.Run("decipher_etching_with_all_etching_tags", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Or(FlagTerms.Mask()).Or(FlagTurbo.Mask()).Uint128(), + TagRune.Uint128(), + uint128.From64(4), + TagDivisibility.Uint128(), + uint128.From64(1), + TagSpacers.Uint128(), + uint128.From64(5), + TagSymbol.Uint128(), + uint128.From64('a'), + TagOffsetEnd.Uint128(), + uint128.From64(2), + TagAmount.Uint128(), + uint128.From64(3), + TagPremine.Uint128(), + uint128.From64(8), + TagCap.Uint128(), + uint128.From64(9), + TagPointer.Uint128(), + uint128.From64(0), + TagMint.Uint128(), + uint128.From64(1), + TagMint.Uint128(), + uint128.From64(1), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + Etching: &Etching{ + Divisibility: lo.ToPtr(uint8(1)), + Premine: lo.ToPtr(uint128.From64(8)), + Rune: lo.ToPtr(NewRune(4)), + Spacers: lo.ToPtr(uint32(5)), + Symbol: lo.ToPtr('a'), + Terms: &Terms{ + Amount: lo.ToPtr(uint128.From64(3)), + Cap: lo.ToPtr(uint128.From64(9)), + OffsetEnd: lo.ToPtr(uint64(2)), + }, + Turbo: true, + }, + Pointer: lo.ToPtr(uint64(0)), + Mint: lo.ToPtr(RuneId{1, 1}), + }, + ) + }) + t.Run("recognized_even_etching_fields_produce_cenotaph_if_etching_flag_is_not_set", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagRune.Uint128(), + uint128.From64(4), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagUnrecognizedEvenTag.Mask(), + }, + ) + }) + t.Run("decipher_etching_with_divisibility_and_symbol", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + TagRune.Uint128(), + uint128.From64(4), + TagDivisibility.Uint128(), + uint128.From64(1), + TagSymbol.Uint128(), + uint128.From64('a'), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + Etching: &Etching{ + Rune: lo.ToPtr(NewRune(4)), + Divisibility: lo.ToPtr(uint8(1)), + Symbol: lo.ToPtr('a'), + }, + }, + ) + }) + t.Run("tag_values_are_not_parsed_as_tags", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + TagDivisibility.Uint128(), + TagBody.Uint128(), + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + }, + &Runestone{ + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + Etching: &Etching{ + Divisibility: lo.ToPtr(uint8(0)), + }, + }, + ) + }) + t.Run("runestone_may_contain_multiple_edicts", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + uint128.From64(0), + uint128.From64(3), + uint128.From64(5), + uint128.From64(0), + }, + &Runestone{ + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + { + Id: RuneId{1, 4}, + Amount: uint128.From64(5), + Output: 0, + }, + }, + }, + ) + }) + t.Run("runestones_with_invalid_rune_id_blocks_are_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + uint128.Max, + uint128.From64(1), + uint128.From64(0), + uint128.From64(0), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagEdictRuneId.Mask(), + }, + ) + }) + t.Run("runestones_with_invalid_rune_id_txs_are_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(2), + uint128.From64(0), + uint128.From64(1), + uint128.Max, + uint128.From64(0), + uint128.From64(0), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagEdictRuneId.Mask(), + }, + ) + }) + t.Run("payload_pushes_are_concatenated", func(t *testing.T) { + // cannot use txscript.ScriptBuilder because ScriptBuilder.AddData transforms data with low value into small integer opcodes + pkScript := []byte{ + txscript.OP_RETURN, + RUNESTONE_PAYLOAD_MAGIC_NUMBER, + } + addData := func(data []byte) { + pkScript = append(pkScript, txscript.OP_DATA_1-1+byte(len(data))) + pkScript = append(pkScript, data...) + } + addData(leb128.EncodeUint128(TagFlags.Uint128())) + addData(leb128.EncodeUint128(FlagEtching.Mask().Uint128())) + addData(leb128.EncodeUint128(TagDivisibility.Uint128())) + addData(leb128.EncodeUint128(uint128.From64(5))) + addData(leb128.EncodeUint128(TagBody.Uint128())) + addData(leb128.EncodeUint128(uint128.From64(1))) + addData(leb128.EncodeUint128(uint128.From64(1))) + addData(leb128.EncodeUint128(uint128.From64(2))) + addData(leb128.EncodeUint128(uint128.From64(0))) + testDecipherPkScript( + t, + pkScript, + &Runestone{ + Edicts: []Edict{ + { + Id: RuneId{1, 1}, + Amount: uint128.From64(2), + Output: 0, + }, + }, + Etching: &Etching{ + Divisibility: lo.ToPtr(uint8(5)), + }, + }, + ) + }) + t.Run("runestone_size", func(t *testing.T) { + testcase := func(edicts []Edict, etching *Etching, expectedSize int) { + bytes, err := Runestone{ + Edicts: edicts, + Etching: etching, + }.Encipher() + assert.NoError(t, err) + assert.Equal(t, expectedSize, len(bytes)) + } + + testcase(nil, nil, 2) + testcase( + nil, + &Etching{ + Divisibility: lo.ToPtr(maxDivisibility), + Rune: lo.ToPtr(NewRune(0)), + }, + 9, + ) + testcase( + nil, + &Etching{ + Divisibility: lo.ToPtr(maxDivisibility), + Premine: lo.ToPtr(uint128.From64(math.MaxUint64)), + Rune: lo.ToPtr(NewRuneFromUint128(uint128.Max)), + Spacers: lo.ToPtr(maxSpacers), + Symbol: lo.ToPtr(utf8.MaxRune), + Terms: &Terms{ + Amount: lo.ToPtr(uint128.From64(math.MaxUint64)), + Cap: lo.ToPtr(uint128.From64(math.MaxUint32)), + HeightStart: lo.ToPtr(uint64(math.MaxUint32)), + HeightEnd: lo.ToPtr(uint64(math.MaxUint32)), + OffsetStart: lo.ToPtr(uint64(math.MaxUint32)), + OffsetEnd: lo.ToPtr(uint64(math.MaxUint32)), + }, + Turbo: true, + }, + 89, + ) + testcase( + []Edict{ + { + Id: RuneId{0, 0}, + Amount: uint128.From64(0), + Output: 0, + }, + }, + &Etching{ + Divisibility: lo.ToPtr(maxDivisibility), + Rune: lo.ToPtr(NewRuneFromUint128(uint128.Max)), + }, + 32, + ) + testcase( + []Edict{ + { + Id: RuneId{0, 0}, + Amount: uint128.Max, + Output: 0, + }, + }, + &Etching{ + Divisibility: lo.ToPtr(maxDivisibility), + Rune: lo.ToPtr(NewRuneFromUint128(uint128.Max)), + }, + 50, + ) + testcase( + []Edict{ + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.From64(0), + Output: 0, + }, + }, + nil, + 14, + ) + testcase( + []Edict{ + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.Max, + Output: 0, + }, + }, + nil, + 32, + ) + testcase( + []Edict{ + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.Max, + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.Max, + Output: 0, + }, + }, + nil, + 54, + ) + testcase( + []Edict{ + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.Max, + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.Max, + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.Max, + Output: 0, + }, + }, + nil, + 76, + ) + testcase( + []Edict{ + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + }, + nil, + 62, + ) + testcase( + []Edict{ + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + }, + nil, + 75, + ) + testcase( + []Edict{ + { + Id: RuneId{BlockHeight: 0, TxIndex: math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + { + Id: RuneId{0, math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + { + Id: RuneId{0, math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + { + Id: RuneId{0, math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + { + Id: RuneId{0, math.MaxUint32}, + Amount: uint128.From64(math.MaxUint64), + Output: 0, + }, + }, + nil, + 73, + ) + testcase( + []Edict{ + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: utils.Must(uint128.FromString("1000000000000000000")), + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: utils.Must(uint128.FromString("1000000000000000000")), + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: utils.Must(uint128.FromString("1000000000000000000")), + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: utils.Must(uint128.FromString("1000000000000000000")), + Output: 0, + }, + { + Id: RuneId{1_000_000, math.MaxUint32}, + Amount: utils.Must(uint128.FromString("1000000000000000000")), + Output: 0, + }, + }, + nil, + 70, + ) + }) + t.Run("etching_with_term_greater_than_maximum_is_still_an_etching", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + TagOffsetEnd.Uint128(), + uint128.From64(math.MaxUint64).Add64(1), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagUnrecognizedEvenTag.Mask(), + }, + ) + }) + t.Run("encipher", func(t *testing.T) { + testcase := func(runestone Runestone, expected []uint128.Uint128) { + pkScript, err := runestone.Encipher() + assert.NoError(t, err) + + tx := &types.Transaction{ + Version: 2, + LockTime: 0, + TxIn: []*types.TxIn{}, + TxOut: []*types.TxOut{ + { + PkScript: pkScript, + Value: 0, + }, + }, + } + + payload, flaws := runestonePayloadFromTx(tx) + assert.NoError(t, err) + assert.Equal(t, Flaws(0), flaws) + + integers, err := decodeLEB128VarIntsFromPayload(payload) + assert.NoError(t, err) + assert.Equal(t, expected, integers) + + slices.SortFunc(runestone.Edicts, func(i, j Edict) int { + return i.Id.Cmp(j.Id) + }) + decipheredRunestone, err := DecipherRunestone(tx) + assert.NoError(t, err) + assert.Equal(t, runestone, *decipheredRunestone) + } + + testcase(Runestone{}, []uint128.Uint128{}) + testcase( + Runestone{ + Edicts: []Edict{ + { + Id: RuneId{2, 3}, + Amount: uint128.From64(1), + Output: 0, + }, + { + Id: RuneId{5, 6}, + Amount: uint128.From64(4), + Output: 1, + }, + }, + Etching: &Etching{ + Divisibility: lo.ToPtr(uint8(7)), + Premine: lo.ToPtr(uint128.From64(8)), + Rune: lo.ToPtr(NewRune(9)), + Spacers: lo.ToPtr(uint32(10)), + Symbol: lo.ToPtr('@'), + Terms: &Terms{ + Amount: lo.ToPtr(uint128.From64(14)), + Cap: lo.ToPtr(uint128.From64(11)), + HeightStart: lo.ToPtr(uint64(12)), + HeightEnd: lo.ToPtr(uint64(13)), + OffsetStart: lo.ToPtr(uint64(15)), + OffsetEnd: lo.ToPtr(uint64(16)), + }, + Turbo: true, + }, + Mint: lo.ToPtr(RuneId{17, 18}), + Pointer: lo.ToPtr(uint64(0)), + }, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Or(FlagTerms.Mask()).Or(FlagTurbo.Mask()).Uint128(), + TagRune.Uint128(), + uint128.From64(9), + TagDivisibility.Uint128(), + uint128.From64(7), + TagSpacers.Uint128(), + uint128.From64(10), + TagSymbol.Uint128(), + uint128.From64('@'), + TagPremine.Uint128(), + uint128.From64(8), + TagAmount.Uint128(), + uint128.From64(14), + TagCap.Uint128(), + uint128.From64(11), + TagHeightStart.Uint128(), + uint128.From64(12), + TagHeightEnd.Uint128(), + uint128.From64(13), + TagOffsetStart.Uint128(), + uint128.From64(15), + TagOffsetEnd.Uint128(), + uint128.From64(16), + TagMint.Uint128(), + uint128.From64(17), + TagMint.Uint128(), + uint128.From64(18), + TagPointer.Uint128(), + uint128.From64(0), + TagBody.Uint128(), + uint128.From64(2), + uint128.From64(3), + uint128.From64(1), + uint128.From64(0), + uint128.From64(3), + uint128.From64(6), + uint128.From64(4), + uint128.From64(1), + }, + ) + testcase( + Runestone{ + Etching: &Etching{}, + }, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + }, + ) + }) + t.Run("runestone_payload_is_chunked", func(t *testing.T) { + checkScriptInstructionCount := func(pkScript []byte, expectedCount int) { + tokenizer := txscript.MakeScriptTokenizer(0, pkScript) + actualCount := 0 + for tokenizer.Next() { + actualCount++ + } + assert.Equal(t, expectedCount, actualCount) + } + + edicts := make([]Edict, 0) + for i := 0; i < 129; i++ { + edicts = append(edicts, Edict{ + Id: RuneId{}, + Amount: uint128.From64(0), + Output: 0, + }) + } + pkScript, err := Runestone{ + Edicts: edicts, + }.Encipher() + assert.NoError(t, err) + checkScriptInstructionCount(pkScript, 3) + + edicts = make([]Edict, 0) + for i := 0; i < 130; i++ { + edicts = append(edicts, Edict{ + Id: RuneId{}, + Amount: uint128.From64(0), + Output: 0, + }) + } + pkScript, err = Runestone{ + Edicts: edicts, + }.Encipher() + assert.NoError(t, err) + checkScriptInstructionCount(pkScript, 4) + }) + t.Run("edict_output_greater_than_32_max_produces_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagBody.Uint128(), + uint128.From64(1), + uint128.From64(1), + uint128.From64(1), + uint128.From64(math.MaxUint32).Add64(1), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagEdictOutput.Mask(), + }, + ) + }) + t.Run("partial_mint_produces_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagMint.Uint128(), + uint128.From64(1), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagUnrecognizedEvenTag.Mask(), + }, + ) + }) + t.Run("invalid_mint_produces_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagMint.Uint128(), + uint128.From64(0), + TagMint.Uint128(), + uint128.From64(1), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagUnrecognizedEvenTag.Mask(), + }, + ) + }) + t.Run("invalid_deadline_produces_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagOffsetEnd.Uint128(), + uint128.Max, + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagUnrecognizedEvenTag.Mask(), + }, + ) + }) + t.Run("invalid_default_output_produces_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagPointer.Uint128(), + uint128.From64(1), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagUnrecognizedEvenTag.Mask(), + }, + ) + testDecipherInteger( + t, + []uint128.Uint128{ + TagPointer.Uint128(), + uint128.Max, + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagUnrecognizedEvenTag.Mask(), + }, + ) + }) + t.Run("invalid_divisibility_does_not_produce_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagDivisibility.Uint128(), + uint128.Max, + }, + &Runestone{}, + ) + }) + t.Run("min_and_max_runes_are_not_cenotaphs", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + TagRune.Uint128(), + uint128.From64(0), + }, + &Runestone{ + Etching: &Etching{ + Rune: lo.ToPtr(NewRune(0)), + }, + }, + ) + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Uint128(), + TagRune.Uint128(), + uint128.Max, + }, + &Runestone{ + Etching: &Etching{ + Rune: lo.ToPtr(NewRuneFromUint128(uint128.Max)), + }, + }, + ) + }) + t.Run("invalid_spacers_does_not_produce_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagSpacers.Uint128(), + uint128.Max, + }, + &Runestone{}, + ) + }) + t.Run("invalid_symbol_does_not_produce_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagSymbol.Uint128(), + uint128.Max, + }, + &Runestone{}, + ) + }) + t.Run("invalid_term_produces_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagOffsetEnd.Uint128(), + uint128.Max, + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagUnrecognizedEvenTag.Mask(), + }, + ) + }) + t.Run("invalid_supply_produces_cenotaph", func(t *testing.T) { + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Or(FlagTerms.Mask()).Uint128(), + TagCap.Uint128(), + uint128.From64(1), + TagAmount.Uint128(), + uint128.Max, + }, + &Runestone{ + Etching: &Etching{ + Terms: &Terms{ + Amount: lo.ToPtr(uint128.Max), + Cap: lo.ToPtr(uint128.From64(1)), + }, + }, + }, + ) + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Or(FlagTerms.Mask()).Uint128(), + TagCap.Uint128(), + uint128.From64(2), + TagAmount.Uint128(), + uint128.Max, + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagSupplyOverflow.Mask(), + }, + ) + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Or(FlagTerms.Mask()).Uint128(), + TagCap.Uint128(), + uint128.From64(2), + TagAmount.Uint128(), + uint128.Max.Div64(2).Add64(1), + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagSupplyOverflow.Mask(), + }, + ) + testDecipherInteger( + t, + []uint128.Uint128{ + TagFlags.Uint128(), + FlagEtching.Mask().Or(FlagTerms.Mask()).Uint128(), + TagPremine.Uint128(), + uint128.From64(1), + TagCap.Uint128(), + uint128.From64(1), + TagAmount.Uint128(), + uint128.Max, + }, + &Runestone{ + Cenotaph: true, + Flaws: FlawFlagSupplyOverflow.Mask(), + }, + ) + }) + t.Run("all_pushdata_opcodes_are_valid", func(t *testing.T) { + // PushData opcodes include (per ord's spec): + // 1. OP_0 + // 2. OP_DATA_1 - OP_DATA_76 + // 3. OP_PUSHDATA1, OP_PUSHDATA2, OP_PUSHDATA4 + for i := 0; i < 79; i++ { + pkScript := make([]byte, 0) + + pkScript = append(pkScript, txscript.OP_RETURN) + pkScript = append(pkScript, RUNESTONE_PAYLOAD_MAGIC_NUMBER) + pkScript = append(pkScript, byte(i)) + + if i <= 75 { + for j := 0; j < i; j++ { + pkScript = append(pkScript, lo.Ternary(j%2 == 0, byte(1), byte(0))) + } + if i%2 == 1 { + pkScript = append(pkScript, byte(1), byte(1)) + } + } else if i == 76 { + pkScript = append(pkScript, byte(0)) + } else if i == 77 { + pkScript = append(pkScript, byte(0), byte(0)) + } else { + pkScript = append(pkScript, byte(0), byte(0), byte(0), byte(0)) + } + + testDecipherPkScript(t, pkScript, &Runestone{}) + } + }) + t.Run("all_non_pushdata_opcodes_are_invalid", func(t *testing.T) { + for i := 79; i <= math.MaxUint8; i++ { + pkScript := []byte{ + txscript.OP_RETURN, + RUNESTONE_PAYLOAD_MAGIC_NUMBER, + byte(i), + } + testDecipherPkScript(t, pkScript, &Runestone{ + Cenotaph: true, + Flaws: FlawFlagOpCode.Mask(), + }) + } + }) +} diff --git a/modules/runes/runes/spaced_rune.go b/modules/runes/runes/spaced_rune.go new file mode 100644 index 0000000..38b61f1 --- /dev/null +++ b/modules/runes/runes/spaced_rune.go @@ -0,0 +1,92 @@ +package runes + +import ( + "math/bits" + "strings" + + "github.com/cockroachdb/errors" +) + +type SpacedRune struct { + Rune Rune + Spacers uint32 +} + +func NewSpacedRune(rune Rune, spacers uint32) SpacedRune { + return SpacedRune{ + Rune: rune, + Spacers: spacers, + } +} + +var ( + ErrLeadingSpacer = errors.New("runes cannot start with a spacer") + ErrTrailingSpacer = errors.New("runes cannot end with a spacer") + ErrDoubleSpacer = errors.New("runes cannot have more than one spacer between characters") + ErrInvalidSpacedRuneCharacter = errors.New("invalid spaced rune character: must satisfy regex [A-Z•.]") +) + +func NewSpacedRuneFromString(input string) (SpacedRune, error) { + var sb strings.Builder + var spacers uint32 + + for _, c := range input { + if c >= 'A' && c <= 'Z' { + sb.WriteRune(c) + continue + } + if c == '•' || c == '.' { + if sb.Len() == 0 { + return SpacedRune{}, errors.WithStack(ErrLeadingSpacer) + } + flag := 1 << (sb.Len() - 1) + if spacers&uint32(flag) != 0 { + return SpacedRune{}, errors.WithStack(ErrDoubleSpacer) + } + spacers |= 1 << (sb.Len() - 1) + continue + } + return SpacedRune{}, errors.WithStack(ErrInvalidSpacedRuneCharacter) + } + + if 32-bits.LeadingZeros32(spacers) >= sb.Len() { + return SpacedRune{}, errors.WithStack(ErrTrailingSpacer) + } + rune, err := NewRuneFromString(sb.String()) + if err != nil { + return SpacedRune{}, errors.Wrap(err, "failed to parse rune from string") + } + return NewSpacedRune(rune, spacers), nil +} + +func (r SpacedRune) String() string { + runeStr := r.Rune.String() + var sb strings.Builder + for i, c := range runeStr { + sb.WriteRune(c) + if i < len(runeStr)-1 && r.Spacers&(1<= 0 { + if b.Bytes()[i] == '\n' { + b.Truncate(i) + } + } +} + +func Get() *Buffer { + buf := pool.Get().(*Buffer) + buf.Reset() + buf.pool = pool + return buf +} + +func Put(buf *Buffer) { + pool.Put(buf) +} diff --git a/pkg/errorhandler/http.go b/pkg/errorhandler/http.go new file mode 100644 index 0000000..d3ea6e0 --- /dev/null +++ b/pkg/errorhandler/http.go @@ -0,0 +1,33 @@ +package errorhandler + +import ( + "net/http" + + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/pkg/logger" + "github.com/gaze-network/indexer-network/pkg/logger/slogx" + "github.com/gofiber/fiber/v2" +) + +func NewHTTPErrorHandler() func(ctx *fiber.Ctx, err error) error { + return func(ctx *fiber.Ctx, err error) error { + if e := new(errs.PublicError); errors.As(err, &e) { + return errors.WithStack(ctx.Status(http.StatusBadRequest).JSON(map[string]any{ + "error": e.Message(), + })) + } + if e := new(fiber.Error); errors.As(err, &e) { + return errors.WithStack(ctx.Status(e.Code).SendString(e.Error())) + } + + logger.ErrorContext(ctx.UserContext(), "Something went wrong, unhandled api error", + slogx.String("event", "api_unhandled_error"), + slogx.Error(err), + ) + + return errors.WithStack(ctx.Status(http.StatusInternalServerError).JSON(map[string]any{ + "error": "Internal Server Error", + })) + } +} diff --git a/pkg/httpclient/httpclient.go b/pkg/httpclient/httpclient.go new file mode 100644 index 0000000..f6b03ff --- /dev/null +++ b/pkg/httpclient/httpclient.go @@ -0,0 +1,171 @@ +package httpclient + +import ( + "context" + "encoding/json" + "log/slog" + "net/url" + "strings" + "time" + + "github.com/Cleverse/go-utilities/utils" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/pkg/logger" + "github.com/valyala/fasthttp" +) + +type Config struct { + // Enable debug mode + Debug bool + + // Default headers + Headers map[string]string +} + +type Client struct { + baseURL string + Config +} + +func New(baseURL string, config ...Config) (*Client, error) { + if _, err := url.Parse(baseURL); err != nil { + return nil, errors.Join(errs.InvalidArgument, errors.Wrap(err, "can't parse base url")) + } + var cf Config + if len(config) > 0 { + cf = config[0] + } + if len(cf.Headers) == 0 { + cf.Headers = make(map[string]string) + } + return &Client{ + baseURL: baseURL, + Config: cf, + }, nil +} + +type RequestOptions struct { + path string + method string + Body []byte + Query url.Values + Header map[string]string + FormData url.Values +} + +type HttpResponse struct { + URL string + fasthttp.Response +} + +func (r *HttpResponse) UnmarshalBody(out any) error { + err := json.Unmarshal(r.Body(), out) + if err != nil { + return errors.Wrapf(err, "can't unmarshal json body from %v, %v", r.URL, string(r.Body())) + } + return nil +} + +func (h *Client) request(ctx context.Context, reqOptions RequestOptions) (*HttpResponse, error) { + start := time.Now() + req := fasthttp.AcquireRequest() + req.Header.SetMethod(reqOptions.method) + for k, v := range h.Headers { + req.Header.Set(k, v) + } + for k, v := range reqOptions.Header { + req.Header.Set(k, v) + } + parsedUrl := utils.Must(url.Parse(h.baseURL)) // checked in httpclient.New + parsedUrl.Path = reqOptions.path + parsedUrl.RawQuery = reqOptions.Query.Encode() + + // remove %20 from url (empty space) + url := strings.TrimSuffix(parsedUrl.String(), "%20") + url = strings.Replace(url, "%20?", "?", 1) + req.SetRequestURI(url) + if reqOptions.Body != nil { + req.Header.SetContentType("application/json") + req.SetBody(reqOptions.Body) + } else if reqOptions.FormData != nil { + req.Header.SetContentType("application/x-www-form-urlencoded") + req.SetBodyString(reqOptions.FormData.Encode()) + } + + resp := fasthttp.AcquireResponse() + startDo := time.Now() + + defer func() { + if h.Debug { + logger := logger.With( + slog.String("method", reqOptions.method), + slog.String("url", url), + slog.Duration("duration", time.Since(start)), + slog.Duration("latency", time.Since(startDo)), + slog.Int("req_header_size", len(req.Header.Header())), + slog.Int("req_content_length", req.Header.ContentLength()), + ) + + if resp.StatusCode() >= 0 { + logger = logger.With( + slog.Int("status_code", resp.StatusCode()), + slog.String("resp_content_type", string(resp.Header.ContentType())), + slog.Int("resp_content_length", len(resp.Body())), + ) + } + + logger.InfoContext(ctx, "Finished make request", slog.String("package", "httpclient")) + } + + fasthttp.ReleaseResponse(resp) + fasthttp.ReleaseRequest(req) + }() + + if err := fasthttp.Do(req, resp); err != nil { + return nil, errors.Wrapf(err, "url: %s", url) + } + + httpResponse := HttpResponse{ + URL: url, + } + resp.CopyTo(&httpResponse.Response) + + return &httpResponse, nil +} + +func (h *Client) Do(ctx context.Context, method, path string, reqOptions RequestOptions) (*HttpResponse, error) { + reqOptions.path = path + reqOptions.method = method + return h.request(ctx, reqOptions) +} + +func (h *Client) Get(ctx context.Context, path string, reqOptions RequestOptions) (*HttpResponse, error) { + reqOptions.path = path + reqOptions.method = fasthttp.MethodGet + return h.request(ctx, reqOptions) +} + +func (h *Client) Post(ctx context.Context, path string, reqOptions RequestOptions) (*HttpResponse, error) { + reqOptions.path = path + reqOptions.method = fasthttp.MethodPost + return h.request(ctx, reqOptions) +} + +func (h *Client) Put(ctx context.Context, path string, reqOptions RequestOptions) (*HttpResponse, error) { + reqOptions.path = path + reqOptions.method = fasthttp.MethodPut + return h.request(ctx, reqOptions) +} + +func (h *Client) Patch(ctx context.Context, path string, reqOptions RequestOptions) (*HttpResponse, error) { + reqOptions.path = path + reqOptions.method = fasthttp.MethodPatch + return h.request(ctx, reqOptions) +} + +func (h *Client) Delete(ctx context.Context, path string, reqOptions RequestOptions) (*HttpResponse, error) { + reqOptions.path = path + reqOptions.method = fasthttp.MethodDelete + return h.request(ctx, reqOptions) +} diff --git a/pkg/leb128/leb128.go b/pkg/leb128/leb128.go new file mode 100644 index 0000000..bbd11df --- /dev/null +++ b/pkg/leb128/leb128.go @@ -0,0 +1,48 @@ +package leb128 + +import ( + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/uint128" +) + +var ( + ErrEmpty = errors.New("leb128: empty byte sequence") + ErrUnterminated = errors.New("leb128: unterminated byte sequence") +) + +func EncodeUint128(input uint128.Uint128) []byte { + bytes := make([]byte, 0) + // for n >> 7 > 0 + for !input.Rsh(7).IsZero() { + last_7_bits := input.And64(0b0111_1111).Uint8() + bytes = append(bytes, last_7_bits|0b1000_0000) + input = input.Rsh(7) + } + last_byte := input.Uint8() + bytes = append(bytes, last_byte) + return bytes +} + +func DecodeUint128(data []byte) (n uint128.Uint128, length int, err error) { + if len(data) == 0 { + return uint128.Uint128{}, 0, ErrEmpty + } + n = uint128.From64(0) + + for i, b := range data { + if i > 18 { + return uint128.Uint128{}, 0, errs.OverflowUint128 + } + value := uint128.New(uint64(b&0b0111_1111), 0) + if i == 18 && !value.And64(0b0111_1100).IsZero() { + return uint128.Uint128{}, 0, errs.OverflowUint128 + } + n = n.Or(value.Lsh(uint(7 * i))) + // if the high bit is not set, then this is the last byte + if b&0b1000_0000 == 0 { + return n, i + 1, nil + } + } + return uint128.Uint128{}, 0, ErrUnterminated +} diff --git a/pkg/leb128/leb128_test.go b/pkg/leb128/leb128_test.go new file mode 100644 index 0000000..eb654c2 --- /dev/null +++ b/pkg/leb128/leb128_test.go @@ -0,0 +1,83 @@ +package leb128 + +import ( + "testing" + + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/uint128" + "github.com/stretchr/testify/assert" +) + +func TestRoundTrip(t *testing.T) { + test := func(n uint128.Uint128) { + t.Run(n.String(), func(t *testing.T) { + t.Parallel() + encoded := EncodeUint128(n) + decoded, length, err := DecodeUint128(encoded) + assert.NoError(t, err) + assert.Equal(t, n, decoded) + assert.Equal(t, len(encoded), length) + }) + } + + test(uint128.Zero) + // powers of two + for i := 0; i < 128; i++ { + n := uint128.From64(1) + n = n.Lsh(uint(i)) + test(n) + } + + // alternating bits + n := uint128.Zero + for i := 0; i < 128; i++ { + n = n.Lsh(1).Or(uint128.From64(uint64(i % 2))) + test(n) + } +} + +func TestDecodeError(t *testing.T) { + testError := func(name string, bytes []byte, expectedError error) { + t.Run(name, func(t *testing.T) { + t.Parallel() + _, _, err := DecodeUint128(bytes) + if expectedError == nil { + assert.NoError(t, err) + } else { + assert.ErrorIs(t, err, expectedError) + } + }) + } + + testError("empty", []byte{}, ErrEmpty) + testError("unterminated", []byte{0b1000_0000}, ErrUnterminated) + + // may not be longer than 19 bytes + testError("valid 18 bytes", []byte{ + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, + }, nil) + testError("overflow 19 bytes", []byte{ + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 0, + }, errs.OverflowUint128) + + // may not overflow uint128 + testError("overflow 1", []byte{ + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 64, + }, errs.OverflowUint128) + testError("overflow 2", []byte{ + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 32, + }, errs.OverflowUint128) + testError("overflow 3", []byte{ + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 16, + }, errs.OverflowUint128) + testError("overflow 4", []byte{ + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 8, + }, errs.OverflowUint128) + testError("overflow 5", []byte{ + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 4, + }, errs.OverflowUint128) + testError("not overflow", []byte{ + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 2, + }, nil) +} diff --git a/pkg/logger/context.go b/pkg/logger/context.go new file mode 100644 index 0000000..8e7b12a --- /dev/null +++ b/pkg/logger/context.go @@ -0,0 +1,78 @@ +package logger + +import ( + "context" + "log/slog" + "os" +) + +type loggerKey struct{} + +// FromContext returns the logger from the context. If no logger is found, a new +func FromContext(ctx context.Context) *slog.Logger { + if ctx == nil { + return logger.With() + } + + if log, ok := ctx.Value(loggerKey{}).(*slog.Logger); ok { + return log + } + + return logger.With() +} + +// NewContext returns a new context with logger attached. +func NewContext(ctx context.Context, log *slog.Logger) context.Context { + if ctx == nil { + ctx = context.Background() + } + + return context.WithValue(ctx, loggerKey{}, log) +} + +// WithContext returns a new context with given logger attributes. +func WithContext(ctx context.Context, args ...any) context.Context { + return NewContext(ctx, FromContext(ctx).With(args...)) +} + +// WithGroupContext returns a new context with given group. +func WithGroupContext(ctx context.Context, group string) context.Context { + return NewContext(ctx, FromContext(ctx).WithGroup(group)) +} + +// DebugContext logs at [LevelDebug] from logger in the given context. +func DebugContext(ctx context.Context, msg string, args ...any) { + log(ctx, FromContext(ctx), slog.LevelDebug, msg, args...) +} + +// InfoContext logs at [LevelInfo] from logger in the given context. +func InfoContext(ctx context.Context, msg string, args ...any) { + log(ctx, FromContext(ctx), slog.LevelInfo, msg, args...) +} + +// WarnContext logs at [LevelWarn] from logger in the given context. +func WarnContext(ctx context.Context, msg string, args ...any) { + log(ctx, FromContext(ctx), slog.LevelWarn, msg, args...) +} + +// ErrorContext logs at [LevelError] from logger in the given context. +func ErrorContext(ctx context.Context, msg string, args ...any) { + log(ctx, FromContext(ctx), slog.LevelError, msg, args...) +} + +// PanicContext logs at [LevelPanic] and then panics from logger in the given context. +func PanicContext(ctx context.Context, msg string, args ...any) { + log(ctx, FromContext(ctx), LevelPanic, msg, args...) + panic(msg) +} + +// FatalContext logs at [LevelFatal] and then [os.Exit](1) from logger in the given context. +func FatalContext(ctx context.Context, msg string, args ...any) { + log(ctx, FromContext(ctx), LevelFatal, msg, args...) + os.Exit(1) +} + +// LogContext logs at the given level from logger in the given context. +func LogContext(ctx context.Context, level slog.Level, msg string, args ...any) { + log(ctx, FromContext(ctx), level, msg, args...) +} diff --git a/pkg/logger/duration.go b/pkg/logger/duration.go new file mode 100644 index 0000000..af28f7b --- /dev/null +++ b/pkg/logger/duration.go @@ -0,0 +1,15 @@ +package logger + +import ( + "log/slog" +) + +func durationToMsAttrReplacer(groups []string, attr slog.Attr) slog.Attr { + if attr.Value.Kind() == slog.KindDuration { + return slog.Attr{ + Key: attr.Key, + Value: slog.Int64Value(attr.Value.Duration().Milliseconds()), + } + } + return attr +} diff --git a/pkg/logger/error.go b/pkg/logger/error.go new file mode 100644 index 0000000..a653b44 --- /dev/null +++ b/pkg/logger/error.go @@ -0,0 +1,57 @@ +package logger + +import ( + "context" + "log/slog" + + "github.com/gaze-network/indexer-network/pkg/logger/slogx" + "github.com/gaze-network/indexer-network/pkg/stacktrace" +) + +func middlewareErrorStackTrace() middleware { + return func(next handleFunc) handleFunc { + return func(ctx context.Context, rec slog.Record) error { + rec.Attrs(func(attr slog.Attr) bool { + if attr.Key == slogx.ErrorKey || attr.Key == "err" { + err := attr.Value.Any() + if err, ok := err.(error); ok && err != nil { + // rec.AddAttrs(slog.String(slogx.ErrorVerboseKey, fmt.Sprintf("%+v", err))) + rec.AddAttrs(slog.Any(slogx.ErrorStackTraceKey, stacktrace.ExtractErrorStackTraces(err))) + } + } + return false + }) + return next(ctx, rec) + } + } +} + +func errorAttrReplacer(groups []string, attr slog.Attr) slog.Attr { + if len(groups) == 0 { + switch attr.Key { + case slogx.ErrorKey, "err": + if err, ok := attr.Value.Any().(error); ok { + if err != nil { + return slog.Attr{Key: slogx.ErrorKey, Value: slog.StringValue(err.Error())} + } + return slog.Attr{Key: slogx.ErrorKey, Value: slog.StringValue("null")} + } + case slogx.ErrorStackTraceKey: + type stackDetails struct { + Error string `json:"error"` + Stacks []string `json:"stacks"` + } + if st, ok := attr.Value.Any().(stacktrace.ErrorStackTraces); ok { + errsStacks := make([]stackDetails, 0) + for _, errStack := range st { + errsStacks = append(errsStacks, stackDetails{ + Error: errStack.Error(), + Stacks: errStack.StackTrace.FramesStrings(), + }) + } + return slog.Attr{Key: slogx.ErrorStackTraceKey, Value: slog.AnyValue(errsStacks)} + } + } + } + return attr +} diff --git a/pkg/logger/level.go b/pkg/logger/level.go new file mode 100644 index 0000000..e6e689d --- /dev/null +++ b/pkg/logger/level.go @@ -0,0 +1,39 @@ +package logger + +import ( + "fmt" + "log/slog" + + "github.com/gaze-network/indexer-network/pkg/logger/slogx" +) + +const ( + LevelCritical = slog.Level(12) + LevelPanic = slog.Level(14) + LevelFatal = slog.Level(16) +) + +func levelAttrReplacer(groups []string, attr slog.Attr) slog.Attr { + if len(groups) == 0 && attr.Key == slogx.LevelKey { + str := func(base string, val slog.Level) string { + if val == 0 { + return base + } + return fmt.Sprintf("%s%+d", base, val) + } + + if l, ok := attr.Value.Any().(slog.Level); ok { + switch { + case l < LevelCritical: + return attr + case l < LevelPanic: + return slog.Attr{Key: attr.Key, Value: slog.StringValue(str("CRITICAL", l-LevelCritical))} + case l < LevelFatal: + return slog.Attr{Key: attr.Key, Value: slog.StringValue(str("PANIC", l-LevelPanic))} + default: + return slog.Attr{Key: attr.Key, Value: slog.StringValue(str("FATAL", l-LevelFatal))} + } + } + } + return attr +} diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go new file mode 100644 index 0000000..4de1aaa --- /dev/null +++ b/pkg/logger/logger.go @@ -0,0 +1,225 @@ +// nolint: sloglint +package logger + +import ( + "context" + "log/slog" + "os" + "runtime" + "strings" + "time" +) + +const ( + // DefaultLevel is the default minimum reporting level for the logger + DefaultLevel = slog.LevelDebug + + // logLevel set `log` output level to `DEBUG`. + // `log` is allowed for debugging purposes only. + // + // NOTE: Please use `slog` for logging instead of `log`, and + // do not use `log` for production code. + logLevel = slog.LevelDebug +) + +var ( + // minimum reporting level for the logger + lvl = new(slog.LevelVar) + + // top-level logger + logger *slog.Logger = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + Level: lvl, + ReplaceAttr: levelAttrReplacer, + })) +) + +// Set default slog logger +func init() { + lvl.Set(DefaultLevel) + slog.SetDefault(logger) +} + +// Set `log` output level +func init() { + slog.SetLogLoggerLevel(logLevel) +} + +// SetLevel sets the minimum reporting level for the logger +func SetLevel(level slog.Level) (old slog.Level) { + old = lvl.Level() + lvl.Set(level) + return old +} + +// With returns a Logger that includes the given attributes +// in each output operation. Arguments are converted to +// attributes as if by [Logger.Log]. +func With(args ...any) *slog.Logger { + return logger.With(args...) +} + +// WithGroup returns a Logger that starts a group, if name is non-empty. +// The keys of all attributes added to the Logger will be qualified by the given +// name. (How that qualification happens depends on the [Handler.WithGroup] +// method of the Logger's Handler.) +// +// If name is empty, WithGroup returns the receiver. +func WithGroup(group string) *slog.Logger { + return logger.WithGroup(group) +} + +// Debug logs at [LevelDebug]. +func Debug(msg string, args ...any) { + log(context.Background(), logger, slog.LevelDebug, msg, args...) +} + +// Info logs at [LevelInfo]. +func Info(msg string, args ...any) { + log(context.Background(), logger, slog.LevelInfo, msg, args...) +} + +// Warn logs at [LevelWarn]. +func Warn(msg string, args ...any) { + log(context.Background(), logger, slog.LevelWarn, msg, args...) +} + +// Error logs at [LevelError] with an error. +func Error(msg string, args ...any) { + log(context.Background(), logger, slog.LevelError, msg, args...) +} + +// Panic logs at [LevelPanic] and then panics. +func Panic(msg string, args ...any) { + log(context.Background(), logger, LevelPanic, msg, args...) + panic(msg) +} + +// Fatal logs at [LevelFatal] followed by a call to [os.Exit](1). +func Fatal(msg string, args ...any) { + log(context.Background(), logger, LevelFatal, msg, args...) + os.Exit(1) +} + +// Log emits a log record with the current time and the given level and message. +// The Record's Attrs consist of the Logger's attributes followed by +// the Attrs specified by args. +func Log(level slog.Level, msg string, args ...any) { + log(context.Background(), logger, level, msg, args...) +} + +// LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs. +func LogAttrs(ctx context.Context, level slog.Level, msg string, attrs ...slog.Attr) { + logAttrs(ctx, FromContext(ctx), level, msg, attrs...) +} + +// Config is the logger configuration. +type Config struct { + // Output is the logger output format. + // Possible values: + // - Text (default) + // - JSON + // - GCP: Output format for Stackdriver Logging/Cloud Logging or others GCP services. + Output string `mapstructure:"output"` + + // Debug is enabled logger level debug. (default: false) + Debug bool `mapstructure:"debug"` +} + +var ( + // Default Attribute Replacers + defaultAttrReplacers = []func([]string, slog.Attr) slog.Attr{ + levelAttrReplacer, + errorAttrReplacer, + } + + // Default Middlewares + defaultMiddleware = []middleware{} +) + +// Init initializes global logger and slog logger with given configuration. +func Init(cfg Config) error { + var ( + handler slog.Handler + options = &slog.HandlerOptions{ + AddSource: false, + Level: lvl, + ReplaceAttr: attrReplacerChain(defaultAttrReplacers...), + } + middlewares = append([]middleware{}, defaultMiddleware...) + ) + + lvl.Set(slog.LevelInfo) + if cfg.Debug { + lvl.Set(slog.LevelDebug) + options.AddSource = true + middlewares = append(middlewares, middlewareErrorStackTrace()) + } + + switch strings.ToLower(cfg.Output) { + case "json": + lvl.Set(slog.LevelInfo) + handler = slog.NewJSONHandler(os.Stdout, options) + case "gcp": + handler = NewGCPHandler(options) + default: + handler = slog.NewTextHandler(os.Stdout, options) + } + + logger = slog.New(newChainHandlers(handler, middlewares...)) + slog.SetDefault(logger) + return nil +} + +// attrReplacerChain returns a function that applies a chain of replacers to an attribute. +func attrReplacerChain(replacers ...func([]string, slog.Attr) slog.Attr) func([]string, slog.Attr) slog.Attr { + return func(groups []string, attr slog.Attr) slog.Attr { + for _, replacer := range replacers { + attr = replacer(groups, attr) + } + return attr + } +} + +// log is the low-level logging method for methods that take ...any. +// It must always be called directly by an exported logging method +// or function, because it uses a fixed call depth to obtain the pc. +func log(ctx context.Context, l *slog.Logger, level slog.Level, msg string, args ...any) { + if ctx == nil { + ctx = context.Background() + } + + if !l.Enabled(ctx, level) { + return + } + + var pc uintptr + var pcs [1]uintptr + // skip [runtime.Callers, this function, this function's caller] + runtime.Callers(3, pcs[:]) + pc = pcs[0] + + r := slog.NewRecord(time.Now(), level, msg, pc) + r.Add(args...) + _ = l.Handler().Handle(ctx, r) +} + +// logAttrs is like [Logger.log], but for methods that take ...Attr. +func logAttrs(ctx context.Context, l *slog.Logger, level slog.Level, msg string, attrs ...slog.Attr) { + if ctx == nil { + ctx = context.Background() + } + + if !l.Enabled(ctx, level) { + return + } + + var pc uintptr + var pcs [1]uintptr + // skip [runtime.Callers, this function, this function's caller] + runtime.Callers(3, pcs[:]) + pc = pcs[0] + + r := slog.NewRecord(time.Now(), level, msg, pc) + r.AddAttrs(attrs...) + _ = l.Handler().Handle(ctx, r) +} diff --git a/pkg/logger/logger_gcp.go b/pkg/logger/logger_gcp.go new file mode 100644 index 0000000..7f44a8b --- /dev/null +++ b/pkg/logger/logger_gcp.go @@ -0,0 +1,62 @@ +package logger + +import ( + "log/slog" + "os" + + "github.com/gaze-network/indexer-network/pkg/logger/slogx" +) + +// NewGCPHandler returns a new GCP handler. +// The handler writes logs to the os.Stdout and +// replaces the default attribute keys/values with the GCP logging attribute keys/values +// +// https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry +func NewGCPHandler(opts *slog.HandlerOptions) slog.Handler { + return slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ + AddSource: true, + Level: opts.Level, + ReplaceAttr: attrReplacerChain( + GCPAttrReplacer, + durationToMsAttrReplacer, + opts.ReplaceAttr, + ), + }) +} + +// GCPAttrReplacer replaces the default attribute keys with the GCP logging attribute keys. +func GCPAttrReplacer(groups []string, attr slog.Attr) slog.Attr { + switch attr.Key { + case slogx.MessageKey: + attr.Key = "message" + case slogx.SourceKey: + attr.Key = "logging.googleapis.com/sourceLocation" + case slogx.LevelKey: + attr.Key = "severity" + lvl, ok := attr.Value.Any().(slog.Level) + if ok { + attr.Value = slog.StringValue(gcpSeverityMapping(lvl)) + } + } + return attr +} + +// https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry#logseverity +func gcpSeverityMapping(lvl slog.Level) string { + switch { + case lvl < slog.LevelInfo: + return "DEBUG" + case lvl < slog.LevelWarn: + return "INFO" + case lvl < slog.LevelError: + return "WARNING" + case lvl < LevelCritical: + return "ERROR" + case lvl < LevelPanic: + return "CRITICAL" + case lvl < LevelFatal: + return "ALERT" + default: + return "EMERGENCY" + } +} diff --git a/pkg/logger/multi_handlers.go b/pkg/logger/multi_handlers.go new file mode 100644 index 0000000..cb38ffb --- /dev/null +++ b/pkg/logger/multi_handlers.go @@ -0,0 +1,49 @@ +package logger + +import ( + "context" + "log/slog" +) + +type ( + handleFunc func(context.Context, slog.Record) error + middleware func(handleFunc) handleFunc +) + +type multiHandlers struct { + h slog.Handler + middlewares []middleware +} + +func newChainHandlers(handler slog.Handler, middlewares ...middleware) *multiHandlers { + return &multiHandlers{ + h: handler, + middlewares: middlewares, + } +} + +func (c *multiHandlers) Enabled(ctx context.Context, lvl slog.Level) bool { + return c.h.Enabled(ctx, lvl) +} + +func (c *multiHandlers) Handle(ctx context.Context, rec slog.Record) error { + h := c.h.Handle + for i := len(c.middlewares) - 1; i >= 0; i-- { + h = c.middlewares[i](h) + } + return h(ctx, rec) +} + +func (c *multiHandlers) WithGroup(group string) slog.Handler { + return &multiHandlers{ + middlewares: c.middlewares, + h: c.h.WithGroup(group), + } +} + +func (c *multiHandlers) WithAttrs(attrs []slog.Attr) slog.Handler { + return &multiHandlers{ + middlewares: c.middlewares, + h: c.h.WithAttrs(attrs), + } +} diff --git a/pkg/logger/slogx/attr.go b/pkg/logger/slogx/attr.go new file mode 100644 index 0000000..c40a42f --- /dev/null +++ b/pkg/logger/slogx/attr.go @@ -0,0 +1,185 @@ +package slogx + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "log/slog" + "time" + + "github.com/gaze-network/indexer-network/pkg/bufferpool" + "github.com/gaze-network/indexer-network/pkg/stacktrace" +) + +// Any returns an slog.Attr for the supplied value. +// See [AnyValue] for how values are treated. +func Any(key string, value any) slog.Attr { + return slog.Any(key, value) +} + +// Group returns an slog.Attr for a Group [Value]. +// The first argument is the key; the remaining arguments +// are converted to Attrs as in [Logger.Log]. +// +// Use Group to collect several key-value pairs under a single +// key on a log line, or as the result of LogValue +// in order to log a single value as multiple Attrs. +func Group(key string, args ...any) slog.Attr { + return slog.Group(key, args...) +} + +// Error returns an slog.Attr for an error value. +func Error(err error) slog.Attr { + if err == nil { + return slog.Attr{} + } + return slog.Any(ErrorKey, err) +} + +// NamedError returns an slog.Attr for an error value with a key. +func NamedError(key string, err error) slog.Attr { + if err == nil { + return slog.Attr{} + } + return slog.Any(key, err) +} + +// String returns an slog.Attr for a string value. +func String(key, value string) slog.Attr { + return slog.String(key, value) +} + +// func Stringp(key string, value *string) slog.Attr {} + +// Stringer returns an slog.Attr for a fmt.Stringer value. +func Stringer(key string, value fmt.Stringer) slog.Attr { + return slog.String(key, value.String()) +} + +// Int64 returns an slog.Attr for an int64. +func Int64(key string, value int64) slog.Attr { + return slog.Int64(key, value) +} + +// Int32 converts an int32 to an int64 and returns +func Int32(key string, value int32) slog.Attr { + return Int64(key, int64(value)) +} + +// Int16 converts an int16 to an int64 and returns +func Int16(key string, value int16) slog.Attr { + return Int64(key, int64(value)) +} + +// Int8 converts an int8 to an int64 and returns +func Int8(key string, value int8) slog.Attr { + return Int64(key, int64(value)) +} + +// Int converts an int to an int64 and returns +// an slog.Attr with that value. +func Int(key string, value int) slog.Attr { + return Int64(key, int64(value)) +} + +// Uint64 returns an slog.Attr for a uint64. +func Uint64(key string, v uint64) slog.Attr { + return slog.Uint64(key, v) +} + +// Uint32 converts a uint32 to a uint64 and returns +func Uint32(key string, v uint32) slog.Attr { + return Uint64(key, uint64(v)) +} + +// Uint16 converts a uint16 to a uint64 and returns +func Uint16(key string, v uint16) slog.Attr { + return Uint64(key, uint64(v)) +} + +// Uint8 converts a uint8 to a uint64 and returns +func Uint8(key string, v uint8) slog.Attr { + return Uint64(key, uint64(v)) +} + +// Uint converts a uint to a uint64 and returns +func Uint(key string, v uint) slog.Attr { + return Uint64(key, uint64(v)) +} + +// Uintptr returns an slog.Attr for a uintptr. +func Uintptr(key string, v uintptr) slog.Attr { + return Uint64(key, uint64(v)) +} + +// Float64 returns an slog.Attr for a floating-point number. +func Float64(key string, v float64) slog.Attr { + return slog.Float64(key, v) +} + +// Float32 converts a float32 to a float64 and returns +// an slog.Attr with that value. +func Float32(key string, v float32) slog.Attr { + return Float64(key, float64(v)) +} + +// Bool returns an slog.Attr for a bool. +func Bool(key string, v bool) slog.Attr { + return slog.Bool(key, v) +} + +// Time returns an slog.Attr for a [time.Time]. +// It discards the monotonic portion. +func Time(key string, v time.Time) slog.Attr { + return slog.Time(key, v) +} + +// Duration returns an slog.Attr for a [time.Duration]. +func Duration(key string, v time.Duration) slog.Attr { + return slog.Duration(key, v) +} + +// Binary returns an slog.Attr for a binary blob. +// +// Binary data is serialized in an encoding-appropriate format. For example, +// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, +// use ByteString. +func Binary(key string, v []byte) slog.Attr { + return slog.String(key, base64.StdEncoding.EncodeToString(v)) +} + +// ByteString returns an slog.Attr for a UTF-8 encoded byte string. +// +// To log opaque binary blobs (which aren't necessarily valid UTF-8), use +// Binary. +func ByteString(key string, v []byte) slog.Attr { + return slog.String(key, string(v)) +} + +// Reflect returns an slog.Attr for an arbitrary object. +// It uses an json encoding, reflection-based function to lazily serialize nearly +// any object into an slog.Attr, but it's relatively slow and +// allocation-heavy. Any is always a better choice. +func Reflect(key string, v interface{}) slog.Attr { + buff := bufferpool.Get() + defer buff.Free() + enc := json.NewEncoder(buff) + enc.SetEscapeHTML(false) + _ = enc.Encode(v) + buff.TrimNewline() + return slog.String(key, buff.String()) +} + +// Stack returns an slog.Attr for the current stack trace. +// Keep in mind that taking a stacktrace is eager and +// expensive (relatively speaking); this function both makes an allocation and +// takes about two microseconds. +func Stack(key string) slog.Attr { + return StackSkip(key, 1) +} + +// StackSkip returns an slog.Attr for the stack trace similarly to Stack, +// but also skips the given number of frames from the top of the stacktrace. +func StackSkip(key string, skip int) slog.Attr { + return slog.Any(key, stacktrace.Capture(skip+1).FramesStrings()) +} diff --git a/pkg/logger/slogx/attr_keys.go b/pkg/logger/slogx/attr_keys.go new file mode 100644 index 0000000..da90e42 --- /dev/null +++ b/pkg/logger/slogx/attr_keys.go @@ -0,0 +1,14 @@ +package slogx + +import "log/slog" + +// Keys for log attributes. +const ( + TimeKey = slog.TimeKey + LevelKey = slog.LevelKey + MessageKey = slog.MessageKey + SourceKey = slog.SourceKey + ErrorKey = "error" + ErrorVerboseKey = "error_verbose" + ErrorStackTraceKey = "error_stacktrace" +) diff --git a/pkg/logger/slogx/slogx.go b/pkg/logger/slogx/slogx.go new file mode 100644 index 0000000..434e406 --- /dev/null +++ b/pkg/logger/slogx/slogx.go @@ -0,0 +1,5 @@ +/* +slogx is extension of slog package and logger package. +It provides additional attributes and helper functions for logging. +*/ +package slogx diff --git a/pkg/reportingclient/reportingclient.go b/pkg/reportingclient/reportingclient.go new file mode 100644 index 0000000..99cf02f --- /dev/null +++ b/pkg/reportingclient/reportingclient.go @@ -0,0 +1,115 @@ +package reportingclient + +import ( + "context" + "encoding/json" + "log/slog" + + "github.com/Cleverse/go-utilities/utils" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/cockroachdb/errors" + "github.com/gaze-network/indexer-network/common" + "github.com/gaze-network/indexer-network/common/errs" + "github.com/gaze-network/indexer-network/pkg/httpclient" + "github.com/gaze-network/indexer-network/pkg/logger" +) + +type Config struct { + Disabled bool `mapstructure:"disabled"` + BaseURL string `mapstructure:"base_url"` + Name string `mapstructure:"name"` + WebsiteURL string `mapstructure:"website_url"` + IndexerAPIURL string `mapstructure:"indexer_api_url"` +} + +type ReportingClient struct { + httpClient *httpclient.Client + config Config +} + +const defaultBaseURL = "https://indexer.api.gaze.network" + +func New(config Config) (*ReportingClient, error) { + baseURL := utils.Default(config.BaseURL, defaultBaseURL) + httpClient, err := httpclient.New(baseURL) + if err != nil { + return nil, errors.Wrap(err, "can't create http client") + } + if config.Name == "" { + return nil, errors.Wrap(errs.InvalidArgument, "reporting.name config is required if reporting is enabled") + } + return &ReportingClient{ + httpClient: httpClient, + config: config, + }, nil +} + +type SubmitBlockReportPayload struct { + Type string `json:"type"` + ClientVersion string `json:"clientVersion"` + DBVersion int `json:"dbVersion"` + EventHashVersion int `json:"eventHashVersion"` + Network common.Network `json:"network"` + BlockHeight uint64 `json:"blockHeight"` + BlockHash chainhash.Hash `json:"blockHash"` + EventHash chainhash.Hash `json:"eventHash"` + CumulativeEventHash chainhash.Hash `json:"cumulativeEventHash"` +} + +func (r *ReportingClient) SubmitBlockReport(ctx context.Context, payload SubmitBlockReportPayload) error { + ctx = logger.WithContext(ctx, slog.String("package", "reporting_client"), slog.Any("payload", payload)) + + body, err := json.Marshal(payload) + if err != nil { + return errors.Wrap(err, "can't marshal payload") + } + resp, err := r.httpClient.Post(ctx, "/v1/report/block", httpclient.RequestOptions{ + Body: body, + }) + if err != nil { + return errors.Wrap(err, "can't send request") + } + if resp.StatusCode() >= 400 { + // TODO: unmashal response body and log it + logger.WarnContext(ctx, "Reporting block event failed", slog.Any("resp_body", resp.Body())) + return nil + } + logger.DebugContext(ctx, "Reported block event") + return nil +} + +type SubmitNodeReportPayload struct { + Name string `json:"name"` + Type string `json:"type"` + Network common.Network `json:"network"` + WebsiteURL string `json:"websiteURL,omitempty"` + IndexerAPIURL string `json:"indexerAPIURL,omitempty"` +} + +func (r *ReportingClient) SubmitNodeReport(ctx context.Context, module string, network common.Network) error { + payload := SubmitNodeReportPayload{ + Name: r.config.Name, + Type: module, + Network: network, + WebsiteURL: r.config.WebsiteURL, + IndexerAPIURL: r.config.IndexerAPIURL, + } + + ctx = logger.WithContext(ctx, slog.String("package", "reporting_client"), slog.Any("payload", payload)) + + body, err := json.Marshal(payload) + if err != nil { + return errors.Wrap(err, "can't marshal payload") + } + resp, err := r.httpClient.Post(ctx, "/v1/report/node", httpclient.RequestOptions{ + Body: body, + }) + if err != nil { + return errors.Wrap(err, "can't send request") + } + if resp.StatusCode() >= 400 { + logger.WarnContext(ctx, "Reporting node info failed", slog.Any("resp_body", resp.Body())) + } + logger.DebugContext(ctx, "Reported node info") + return nil +} diff --git a/pkg/stacktrace/errors.go b/pkg/stacktrace/errors.go new file mode 100644 index 0000000..35d9281 --- /dev/null +++ b/pkg/stacktrace/errors.go @@ -0,0 +1,89 @@ +package stacktrace + +import ( + "fmt" + "io" + "strings" + + "github.com/cockroachdb/errors/errbase" + "github.com/samber/lo" +) + +// ErrorStackTrace is a pair of an error and its stack trace. +type ErrorStackTrace struct { + Cause error + StackTrace *StackTrace +} + +func (s ErrorStackTrace) String() string { + return fmt.Sprintf("%s %v", s.Cause.Error(), s.StackTrace.FramesStrings()) +} + +func (s ErrorStackTrace) Error() string { + return s.Cause.Error() +} + +// nolint: errcheck +func (s ErrorStackTrace) Format(f fmt.State, verb rune) { + fmt.Fprintf(f, "%s %v", s.Cause.Error(), s.StackTrace.FramesStrings()) +} + +// ErrorStackTraces is a list of error stack traces. +type ErrorStackTraces []ErrorStackTrace + +func (s ErrorStackTraces) String() string { + var sb strings.Builder + for i, errSt := range s { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("[%d] ", i+1)) + sb.WriteString(errSt.String()) + } + return sb.String() +} + +// nolint: errcheck +func (s ErrorStackTraces) Format(f fmt.State, verb rune) { + for i, errSt := range s { + if i > 0 { + io.WriteString(f, "\n") + } + io.WriteString(f, fmt.Sprintf("[%d] %s", i+1, errSt.String())) + } +} + +// ExtractErrorStackTraces extracts the stack traces from the provided error and its causes. +// Sorted from oldest to newest. +func ExtractErrorStackTraces(err error) ErrorStackTraces { + result := ErrorStackTraces{} + + for err != nil { + causeErr := errbase.UnwrapOnce(err) + if errStack, ok := err.(errbase.StackTraceProvider); ok { + pcs := pkgErrStackTaceToPCs(errStack.StackTrace()) + if len(pcs) > 0 { + stacktraces := ParsePCS(pcs) + result = append(result, ErrorStackTrace{ + Cause: err, + StackTrace: stacktraces, + }) + } + } + err = causeErr + } + + // reverse the order (oldest first) + result = lo.Reverse(result) + + return result +} + +// convert type of [github.com/cockroachdb/errors/errbase.StackTrace] to a slice of PCs. +func pkgErrStackTaceToPCs(stacktrace errbase.StackTrace) []uintptr { + pcs := make([]uintptr, len(stacktrace)) + for i, frame := range stacktrace { + pcs[i] = uintptr(frame) + } + return pcs +} diff --git a/pkg/stacktrace/frame.go b/pkg/stacktrace/frame.go new file mode 100644 index 0000000..e2bf594 --- /dev/null +++ b/pkg/stacktrace/frame.go @@ -0,0 +1,24 @@ +package stacktrace + +import ( + "fmt" + "io" + "runtime" +) + +type Frame struct { + runtime.Frame +} + +func (f Frame) String() string { + return fmt.Sprintf("%s %s:%d", f.Function, f.File, f.Line) +} + +// nolint: errcheck +func (f Frame) Format(fs fmt.State, verb rune) { + io.WriteString(fs, f.Function) + io.WriteString(fs, "\n\t") + io.WriteString(fs, f.File) + io.WriteString(fs, ":") + io.WriteString(fs, fmt.Sprint(f.Line)) +} diff --git a/pkg/stacktrace/stacktrace.go b/pkg/stacktrace/stacktrace.go new file mode 100644 index 0000000..3efeb6c --- /dev/null +++ b/pkg/stacktrace/stacktrace.go @@ -0,0 +1,92 @@ +package stacktrace + +import ( + "fmt" + "io" + "runtime" + "strings" +) + +// StackTrace is the type of the data for a call stack. +// This mirrors the type of the same name in [github.com/cockroachdb/errors/errbase.StackTrace]. +type StackTrace struct { + PCS []uintptr + Frames []Frame +} + +// Caller captures a stack trace of the specified depth, skipping +// the provided number of frames. skip=0 identifies the caller of Caller. +// +// Alias of [Capture] +func Caller(skip int) *StackTrace { + return Capture(1 + skip) +} + +// Capture captures a stack trace of the specified depth, skipping +// the provided number of frames. skip=0 identifies the caller of Capture. +func Capture(skip int) *StackTrace { + pcs := make([]uintptr, 64) + n := runtime.Callers(2+skip, pcs[:]) + + // Expand the pcs slice if there wasn't enough room. + for n == len(pcs) { + pcs = make([]uintptr, 2*len(pcs)) + n = runtime.Callers(2+skip, pcs[:]) + } + + // Deallocate the unused space in the slice. + pcs = pcs[:n:n] + + return ParsePCS(pcs) +} + +func ParsePCS(pcs []uintptr) *StackTrace { + frames := make([]Frame, 0, len(pcs)) + callerFrames := runtime.CallersFrames(pcs) + for frame, more := callerFrames.Next(); more; frame, more = callerFrames.Next() { + if !strings.HasPrefix(frame.Function, "runtime.") { + frames = append(frames, Frame{frame}) + } + } + return &StackTrace{ + PCS: pcs, + Frames: frames[:len(frames):len(frames)], + } +} + +// FramesStrings returns the frames of this stacktrace as slice of strings. +func (s *StackTrace) FramesStrings() []string { + str := make([]string, len(s.Frames)) + for i, frame := range s.Frames { + str[i] = frame.String() + } + return str +} + +// Count reports the total number of frames in this stacktrace. +func (s *StackTrace) Count() int { + return len(s.Frames) +} + +// Format formats the stack of Frames according to the fmt.Formatter interface. +func (s *StackTrace) Format(fs fmt.State, verb rune) { + for i, frame := range s.Frames { + if i > 0 { + _, _ = io.WriteString(fs, "\n") + } + frame.Format(fs, verb) + } +} + +// String returns a string representation of the stack trace. +func (s *StackTrace) String() string { + var sb strings.Builder + for i, frame := range s.Frames { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("[%d] ", len(s.Frames)-i)) + sb.WriteString(frame.String()) + } + return sb.String() +} diff --git a/sqlc.yaml b/sqlc.yaml new file mode 100644 index 0000000..958c355 --- /dev/null +++ b/sqlc.yaml @@ -0,0 +1,29 @@ +# sqlc configuration file +# https://docs.sqlc.dev/en/stable/reference/config.html +# +# run `sqlc generate` to generate Go code from SQL queries and schema definitions. +# +# use `golang-migrate` to manage your database schema +# https://docs.sqlc.dev/en/stable/howto/ddl.html#golang-migrate +version: "2" +sql: + - schema: "./modules/bitcoin/database/postgresql/migrations" + queries: "./modules/bitcoin/database/postgresql/queries" + engine: "postgresql" + gen: + go: + package: "gen" + out: "./modules/bitcoin/repository/postgres/gen" + sql_package: "pgx/v5" + rename: + id: "Id" + - schema: "./modules/runes/database/postgresql/migrations" + queries: "./modules/runes/database/postgresql/queries" + engine: "postgresql" + gen: + go: + package: "gen" + out: "./modules/runes/repository/postgres/gen" + sql_package: "pgx/v5" + rename: + id: "Id"